collection of python libs developed for testing purposes

hoover.py 47KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394
  1. # coding=utf-8
  2. import collections
  3. import csv
  4. import difflib
  5. import hashlib
  6. import inspect
  7. import itertools
  8. import json
  9. import operator
  10. import time
  11. from copy import deepcopy
  12. # ########################################################################### #
  13. # ## The Motor ## #
  14. # ########################################################################### #
  15. def regression_test(argsrc, tests, driver_settings, cleanup_hack=None,
  16. apply_hacks=None, on_next=None):
  17. """Perform regression test with argsets from `argsrc`.
  18. For each argset pulled from source, performs one comparison
  19. per driver pair in `tests`, which is list of tuples with
  20. comparison function and pair of test driver classes: `(operator,
  21. oracle_class, result_class)`. (The classes are assumed to
  22. be sub-classes of `hoover.BaseTestDriver`.)
  23. `driver_settings` is a dictionary supposed to hold environmental
  24. values for all the drivers, the keys having form "DriverName.
  25. settingName". Each driver is then instantiated with this
  26. dict, and gets a copy of the dict with settings only intended
  27. for itself (and the "DriverName" part stripped).
  28. If comparison fails, report is generated using `hoover.jsDiff()`,
  29. and along with affected arguments stored in `hoover.Tracker`
  30. instance, which is finally used as a return value. This instance
  31. then contains method for basic stats as well as method to format
  32. the final report and a helper method to export argument sets
  33. as a CSV files.
  34. Supports hacks, which are a data transformations performed by
  35. `hoover.TinyCase` class and are intended to avoid known bugs
  36. and anomalies (`apply_hacks`) or clean up data structures of
  37. irrelevant data (`cleanup_hack`, performed only if the comparison
  38. function provided along with driver pair is not "equals").
  39. A function can be provided as `on_next` argument, that will be
  40. called after pulling each argument set, with last argument set
  41. (or `None`) as first argument and current one as second argument.
  42. """
  43. # TODO: do not parse driver_settings thousands of times (use a view class?)
  44. on_next = on_next if on_next else lambda a, b: None
  45. apply_hacks = apply_hacks if apply_hacks else []
  46. tracker = Tracker()
  47. last_argset = None
  48. all_classes = set(reduce(lambda a, b: a+b,
  49. [triple[1:] for triple in tests]))
  50. counter = StatCounter()
  51. for argset in argsrc:
  52. on_start = time.time()
  53. on_next(argset, last_argset)
  54. counter.add('on_next', time.time() - on_start)
  55. # # load the data first, only once for each driver
  56. #
  57. data = {}
  58. for aclass in all_classes:
  59. try:
  60. aclass.check_values(argset)
  61. except NotImplementedError: # let them bail out
  62. counter.count_for(aclass, 'bailouts')
  63. pass
  64. else:
  65. data[aclass], duration, overhead = get_data_and_stats(
  66. aclass, argset, driver_settings)
  67. counter.count_for(aclass, 'calls')
  68. counter.add_for(aclass, 'duration', duration)
  69. counter.add_for(aclass, 'overhead', overhead)
  70. for match_op, oclass, rclass in tests:
  71. # skip test if one of classes bailed out on the argset
  72. if oclass not in data or rclass not in data:
  73. continue
  74. diff = None
  75. case = TinyCase({
  76. 'argset': argset,
  77. 'oracle': deepcopy(data[oclass]),
  78. 'result': deepcopy(data[rclass]),
  79. 'oname': oclass.__name__,
  80. 'rname': rclass.__name__
  81. })
  82. hacks_done = sum([case.hack(h) for h in apply_hacks])
  83. counter.add_for(oclass, 'ohacks', hacks_done)
  84. counter.add_for(rclass, 'rhacks', hacks_done)
  85. counter.add('hacks', hacks_done)
  86. counter.add('hacked_cases', (1 if hacks_done else 0))
  87. if not match_op(case['oracle'], case['result']):
  88. # try to clean up so that normally ignored items
  89. # do not clutter up the report
  90. if not match_op == operator.eq:
  91. case.hack(cleanup_hack)
  92. # but panic if that "removed" the error condition
  93. if match_op(case['oracle'], case['result']):
  94. raise RuntimeError("cleanup ate error")
  95. diff = jsDiff(dira=case['oracle'],
  96. dirb=case['result'],
  97. namea=case['oname'],
  98. nameb=case['rname'])
  99. tracker.update(diff, argset)
  100. counter.count('cases')
  101. tracker.argsets_done += 1
  102. last_argset = argset
  103. counter.count('argsets')
  104. tracker.driver_stats = counter.all_stats()
  105. return tracker
  106. def get_data_and_stats(driverClass, argset, driver_settings):
  107. """Run test with given driver"""
  108. start = time.time()
  109. d = driverClass()
  110. d.setup(driver_settings, only_own=True)
  111. d.run(argset)
  112. return (d.data, d.duration, time.time() - d.duration - start)
  113. def get_data(driverClass, argset, driver_settings):
  114. """Run test with given driver"""
  115. d = driverClass()
  116. d.setup(driver_settings, only_own=True)
  117. d.run(argset)
  118. return d.data
  119. # ########################################################################### #
  120. # ## The Pattern ## #
  121. # ########################################################################### #
  122. class _BaseRuleOp():
  123. def __init__(self, items, item_ok):
  124. self._items = items
  125. self._item_ok = item_ok
  126. def _eval(self, item):
  127. try: # it's a pattern! (recurse)
  128. return RuleOp.Match(item, self._item_ok)
  129. except ValueError: # no, it's something else...
  130. return self._item_ok(item)
  131. def __nonzero__(self):
  132. try:
  133. return self._match()
  134. except TypeError:
  135. raise ValueError("items must be an iterable: %r" % self._items)
  136. class RuleOp():
  137. class ALL(_BaseRuleOp):
  138. def _match(self):
  139. return all(self._eval(item) for item in self._items)
  140. class ANY(_BaseRuleOp):
  141. def _match(self):
  142. return any(self._eval(item) for item in self._items)
  143. @staticmethod
  144. def Match(pattern, item_ok):
  145. """Evaluate set of logically structured patterns using passed function.
  146. pattern has form of `(op, [item1, item2, ...])` where op can be any of
  147. pre-defined logical operators (`ALL`/`ANY`, I doubt you will ever need
  148. more) and item_ok is a function that will be used to evaluate each one
  149. in the list. In case an itemN is actually pattern as well, it will be
  150. recursed into, passing the item_ok on and on.
  151. Note that there is no data to evaluate "against", you can use closure
  152. if you need to do that.
  153. """
  154. try:
  155. op, items = pattern
  156. except TypeError:
  157. raise ValueError("pattern is not a tuple: %r" % pattern)
  158. try:
  159. assert issubclass(op, _BaseRuleOp)
  160. except TypeError:
  161. raise ValueError("invalid operator: %r" % op)
  162. except AssertionError:
  163. raise ValueError("invalid operator class: %s" % op.__name__)
  164. return bool(op(items, item_ok))
  165. # ########################################################################### #
  166. # ## The Path ## #
  167. # ########################################################################### #
  168. class DictPath():
  169. """Mixin that adds "path-like" behavior to the top dict of dicts.
  170. Use this class as a mixin for a deep dic-like structure and you can access
  171. the elements using a path. For example:
  172. MyData(dict, DictPath):
  173. pass
  174. d = MyData({
  175. 'name': 'Joe',
  176. 'age': 34,
  177. 'ssn': {
  178. 'number': '012 345 678',
  179. 'expires': '10-01-16',
  180. },
  181. })
  182. print ("%s's ssn number %s will expire on %s"
  183. % (d.getpath('/name'),
  184. d.getpath('/ssn/number'),
  185. d.getpath('/ssn/expiry')))
  186. # joe's ssn number 012 345 678 will expire 10-01-16
  187. """
  188. DIV = "/"
  189. class Path():
  190. def __init__(self, path, div):
  191. self.DIV = div
  192. self._path = path
  193. def _validate(self):
  194. try:
  195. assert self._path.startswith(self.DIV)
  196. except (AttributeError, AssertionError):
  197. raise ValueError("invalid path: %r" % self._path)
  198. def stripped(self):
  199. return self._path.lstrip(self.DIV)
  200. @classmethod
  201. def __s2path(cls, path):
  202. return cls.Path(path, cls.DIV)
  203. @classmethod
  204. def __err_path_not_found(cls, path):
  205. raise KeyError("path not found: %s" % path)
  206. @classmethod
  207. def __getitem(cls, dct, key):
  208. if cls.DIV in key:
  209. frag, rest = key.split(cls.DIV, 1)
  210. subdct = dct[frag]
  211. result = cls.__getitem(subdct, rest)
  212. else:
  213. result = dct[key]
  214. return result
  215. @classmethod
  216. def __setitem(cls, dct, key, value):
  217. if cls.DIV not in key:
  218. dct[key] = value
  219. else:
  220. frag, rest = key.split(cls.DIV, 1)
  221. subdct = dct[frag]
  222. cls.__setitem(subdct, rest, value)
  223. @classmethod
  224. def __delitem(cls, dct, key):
  225. if cls.DIV not in key:
  226. del dct[key]
  227. else:
  228. frag, rest = key.split(cls.DIV, 1)
  229. subdct = dct[frag]
  230. return cls.__delitem(subdct, rest)
  231. # # public methods
  232. #
  233. def getpath(self, path):
  234. try:
  235. return self.__getitem(self, self.__s2path(path).stripped())
  236. except (TypeError, KeyError):
  237. self.__err_path_not_found(path)
  238. def setpath(self, path, value):
  239. try:
  240. self.__setitem(self, self.__s2path(path).stripped(), value)
  241. except (TypeError, KeyError):
  242. self.__err_path_not_found(path)
  243. def delpath(self, path):
  244. try:
  245. self.__delitem(self, self.__s2path(path).stripped())
  246. except (TypeError, KeyError):
  247. self.__err_path_not_found(path)
  248. def ispath(self, path):
  249. try:
  250. self.getpath(path)
  251. return True
  252. except KeyError:
  253. return False
  254. # ########################################################################### #
  255. # ## The Case ## #
  256. # ########################################################################### #
  257. class TinyCase(dict, DictPath):
  258. """Abstraction of the smallest unit of testing.
  259. This class is intended to hold relevant data after the actual test
  260. and apply transformations (hacks) as defined by rules.
  261. The data form (self) is:
  262. {
  263. 'argset': {}, # argset as fed into `BaseTestDriver.run`
  264. 'oracle': {}, # data as returned from oracle driver's `run()`
  265. 'result': {}, # data as returned from result driver's `run()`
  266. 'oname': "", # name of oracle driver's class
  267. 'rname': "" # name of result driver's class
  268. }
  269. The transformation is done using the `TinyCase.hack()` method to which
  270. a list of rules is passed. Each rule is applied, and rules are expected
  271. to be in a following form:
  272. {
  273. 'drivers': [{}], # list of structures to match against self
  274. 'argsets': [{}], # -ditto-
  275. 'action_name': <Arg> # an action name with argument
  276. }
  277. For each of patterns ('drivers', argsets') present, match against self
  278. is done using function `hoover.dataMatch`, which is basically a recursive
  279. test if the pattern is a subset of the case. If none of results is
  280. negative (i.e. both patterns missing results in match), any known actions
  281. included in the rule are called. Along with action name a list or a dict
  282. providing necessary parameters is expected: this is simply passed as only
  283. parameter to corresponding method.
  284. Actions use specific way how to address elements in the structures
  285. saved in the oracle and result keys provided by `DictPath`, which makes
  286. it easy to define rules for arbitrarily complex dictionary structures.
  287. The format resembles to Unix path, where "directories" are dict
  288. keys and "root" is the `self` of the `TinyCase` instance:
  289. /oracle/temperature
  290. /result/stats/word_count
  291. Refer to each action's docstring for descriprion of their function
  292. as well as expected format of argument. The name of action as used
  293. in the reule is the name of method without leading 'a_'.
  294. Warning: All actions will silently ignore any paths that are invalid
  295. or leading to non-existent data!
  296. (This does not apply to a path leading to `None`.)
  297. """
  298. def a_exchange(self, action):
  299. """Exchange value A for value B.
  300. Expects a dict, where key is a tuple of two values `(a, b)` and
  301. value is a list of paths. For each key, it goes through the
  302. paths and if the value equals `a` it is set to `b`.
  303. """
  304. for (oldv, newv), paths in action.iteritems():
  305. for path in paths:
  306. try:
  307. curv = self.getpath(path)
  308. except KeyError:
  309. continue
  310. else:
  311. if curv == oldv:
  312. self.setpath(path, newv)
  313. def a_format_str(self, action):
  314. """Convert value to a string using format string.
  315. Expects a dict, where key is a format string, and value is a list
  316. of paths. For each record, the paths are traversed, and value is
  317. converted to string using the format string and the `%` operator.
  318. This is especially useful for floats which you may want to trim
  319. before comparison, since direct comparison of floats is unreliable
  320. on some architectures.
  321. """
  322. for fmt, paths in action.iteritems():
  323. for path in paths:
  324. if self.ispath(path):
  325. new = fmt % self.getpath(path)
  326. self.setpath(path, new)
  327. def a_even_up(self, action):
  328. """Even up structure of both dictionaries.
  329. Expects a list of two-element tuples `('/dict/a', '/dict/b')`
  330. containing pairs of path do simple dictionaries.
  331. Then the two dicts are altered to have same structure: if a key
  332. in dict "a" is missing in dict "b", it is set to `None` in "b" and
  333. vice-versa,
  334. """
  335. for patha, pathb in action:
  336. try:
  337. a = self.getpath(patha)
  338. b = self.getpath(pathb)
  339. except KeyError:
  340. continue
  341. else:
  342. for key in set(a.keys()) | set(b.keys()):
  343. if key in a and key in b:
  344. pass # nothing to do here
  345. elif key in a and a[key] is None:
  346. b[key] = None
  347. elif key in b and b[key] is None:
  348. a[key] = None
  349. else:
  350. pass # bailout: odd key but value is *not* None
  351. def a_remove(self, action):
  352. """Remove elements from structure.
  353. Expects a simple list of paths that are simply deleted fro, the
  354. structure.
  355. """
  356. for path in action:
  357. if self.ispath(path):
  358. self.delpath(path)
  359. def a_round(self, action):
  360. """Round a (presumably) float using tha `float()` built-in.
  361. Expects dict with precision (ndigits, after the dot) as a key and
  362. list of paths as value.
  363. """
  364. for ndigits, paths in action.iteritems():
  365. for path in paths:
  366. try:
  367. f = self.getpath(path)
  368. except KeyError:
  369. pass
  370. else:
  371. self.setpath(path, round(f, ndigits))
  372. known_actions = {'remove': a_remove,
  373. 'even_up': a_even_up,
  374. 'format_str': a_format_str,
  375. 'exchange': a_exchange,
  376. 'round': a_round}
  377. def hack(self, ruleset):
  378. """Apply action from each rule, if patterns match."""
  379. def driver_matches():
  380. if 'drivers' not in rule:
  381. return True
  382. else:
  383. return any(dataMatch(p, self)
  384. for p in rule['drivers'])
  385. def argset_matches():
  386. if 'argsets' not in rule:
  387. return True
  388. else:
  389. return any(dataMatch(p, self)
  390. for p in rule['argsets'])
  391. matched = False
  392. cls = self.__class__
  393. for rule in ruleset:
  394. if driver_matches() and argset_matches():
  395. matched = True
  396. for action_name in cls.known_actions:
  397. if action_name in rule:
  398. cls.known_actions[action_name](self, rule[action_name])
  399. return matched
  400. # ########################################################################### #
  401. # ## Drivers ## #
  402. # ########################################################################### #
  403. class DriverError(Exception):
  404. """Error encountered when obtaining driver data"""
  405. def __init__(self, message, driver):
  406. self.message = message
  407. self.driver = driver
  408. def __str__(self):
  409. result = ("\n\n"
  410. " type: %s\n"
  411. " message: %s\n"
  412. " driver: %s\n"
  413. " args: %s\n"
  414. " settings: %s\n"
  415. % (self.message.__class__.__name__,
  416. self.message,
  417. self.driver.__class__.__name__,
  418. self.driver._args,
  419. self.driver._settings))
  420. return result
  421. class DriverDataError(Exception):
  422. """Error encountered when decoding or normalizing driver data"""
  423. def __init__(self, exception, driver):
  424. self.exception = exception
  425. self.driver = driver
  426. def __str__(self):
  427. result = ("%s: %s\n"
  428. " class: %s\n"
  429. " args: %s\n"
  430. " data: %s\n"
  431. % (self.exception.__class__.__name__, self.exception,
  432. self.driver.__class__.__name__,
  433. json.dumps(self.driver._args, sort_keys=True, indent=4),
  434. json.dumps(self.driver.data, sort_keys=True, indent=4)))
  435. return result
  436. class BaseTestDriver(object):
  437. """Base class for test drivers used by `hoover.regression_test` and others.
  438. This class is used to create a test driver, which is an abstraction
  439. and encapsulation of the system being tested. Or, the driver in fact
  440. can be just a "mock" driver that provides data for comparison with
  441. a "real" driver.
  442. The minimum you need to create a working driver is to implement a working
  443. `self._get_data` method that sets `self.data`. Any exception from this
  444. method will be re-raised as DriverError with additional information.
  445. Also, you can set self.duration (in fractional seconds, as returned by
  446. standard time module) in the _get_data method, but if you don't, it is
  447. measured for you as time the method call took. This is useful if you
  448. need to fetch the data from some other driver or a gateway, and you
  449. have better mechanism to determine how long the action would take "in
  450. real life".
  451. For example, if we are testing a Java library using a Py4J gateway,
  452. we need to do some more conversions outside our testing code just to
  453. be able to use the data in our Python test. We don't want to include
  454. this in the "duration", since we are measuring the Java library, not the
  455. Py4J GW (or our ability to perform the conversions optimally). So we
  456. do our measurement within the Java machine and pass the result to the
  457. Python driver.
  458. Optionally, you can:
  459. * Make an __init__ and after calling base __init__, set
  460. * `self._mandatory_args`, a list of keys that need to be present
  461. in `args` argument to `run()`
  462. * and `self._mandatory_settings`, a list of keys that need to be
  463. present in the `settings` argument to `__init__`
  464. * implement methods
  465. * `_decode_data` and `_normalize_data`, which are intended to decode
  466. the data from any raw format it is received, and to prepare it
  467. for comparison in test,
  468. * and `_check_data`, to allow for early detection of failure,
  469. from which any exception is re-raised as a DriverDataError with
  470. some additional info
  471. * set "bailouts", a list of functions which, when passed "args"
  472. argument, return true to indicate that driver is not able to
  473. process these values (see below for explanation). If any of
  474. these functions returns true, NotImplementedError is raised.
  475. The expected workflow when using the driver is:
  476. # 1. sub-class hoover.BaseTestDriver
  477. # 2. prepare settings and args
  478. MyDriver.check_values(args) # optional, to force bailouts ASAP
  479. d = MyDriver()
  480. d.setup(settings)
  481. d.run(args)
  482. assert d.data, "no data" # evaluate the result...
  483. assert d.duration < 1 # duration of _get_data in seconds
  484. Note on bailouts: Typical strategy for which the driver is intended is
  485. that each possible combination of `args` is exhausted, and results from
  486. multiple drivers are compared to evaluate if driver, i.e. system in
  487. question is O.K.
  488. The bailouts mechanism is useful in cases, where for a certain system,
  489. a valid combination of arguments would bring the same result as another,
  490. so there is basically no value in testing both of them.
  491. Example might be a system that does not support a binary flag and
  492. behaves as if it was "on": you can simply make the test driver
  493. accept the option but "bail out" any time it is "off", therefore
  494. skipping the time-and-resource-consuming test.
  495. """
  496. bailouts = []
  497. ##
  498. # internal methods
  499. #
  500. def __init__(self):
  501. self.data = {}
  502. self.duration = None
  503. self._args = {}
  504. self._mandatory_args = []
  505. self._mandatory_settings = []
  506. self._settings = {}
  507. self._setup_ok = False
  508. def __check_mandatory(self):
  509. """validate before run()"""
  510. for key in self._mandatory_args:
  511. assert key in self._args, "missing arg: '%s'" % key
  512. for key in self._mandatory_settings:
  513. assert key in self._settings, "missing setting: '%s'" % key
  514. def __cleanup_data(self):
  515. """remove hidden data; e.g. what was only there for _check_data"""
  516. for key in self.data.keys():
  517. if key.startswith("_"):
  518. del self.data[key]
  519. ##
  520. # virtual methods
  521. #
  522. def _check_data(self):
  523. """Early check for failure"""
  524. pass
  525. def _decode_data(self):
  526. """Decode from raw data as brought by _get_data"""
  527. pass
  528. def _normalize_data(self):
  529. """Preare data for comparison (e.g. sort, split, trim...)"""
  530. pass
  531. ##
  532. # public methods
  533. #
  534. @classmethod
  535. def check_values(cls, args=None):
  536. """check args in advance before running or setting up anything"""
  537. for fn in cls.bailouts:
  538. if fn(args):
  539. raise NotImplementedError(inspect.getsource(fn))
  540. def setup(self, settings, only_own=False):
  541. """Load settings. only_own means that only settings that belong to us
  542. are loaded ("DriverClass.settingName", the first discriminating part
  543. is removed)"""
  544. if only_own:
  545. for ckey in settings.keys():
  546. driver_class_name, setting_name = ckey.split(".", 2)
  547. if self.__class__.__name__ == driver_class_name:
  548. self._settings[setting_name] = settings[ckey]
  549. else:
  550. self._settings = settings
  551. self._setup_ok = True
  552. def run(self, args):
  553. """validate, run and store data"""
  554. self._args = args
  555. assert self._setup_ok, "run() before setup()?"
  556. self.__class__.check_values(self._args)
  557. self.__check_mandatory()
  558. start = time.time()
  559. try:
  560. self._get_data() # run the test, i.e. obtain raw data
  561. except StandardError as e:
  562. raise DriverError(e, self)
  563. self.duration = (time.time() - start if self.duration is None
  564. else self.duration)
  565. try:
  566. self._decode_data() # decode raw data
  567. self._normalize_data() # normalize decoded data
  568. self._check_data() # perform arbitrarty checking
  569. except StandardError, e:
  570. raise DriverDataError(e, self)
  571. self.__cleanup_data() # cleanup (remove data['_*'])
  572. class MockDriverTrue(BaseTestDriver):
  573. """A simple mock driver, always returning True"""
  574. def _get_data(self, args):
  575. self.data = True
  576. # ########################################################################### #
  577. # ## Helpers ## #
  578. # ########################################################################### #
  579. class StatCounter(object):
  580. """A simple counter with formulas support."""
  581. def __init__(self):
  582. self.generic_stats = {}
  583. self.driver_stats = {}
  584. self.formulas = {}
  585. self._born = time.time()
  586. def _register(self, dname):
  587. self.driver_stats[dname] = {
  588. 'calls': 0,
  589. 'rhacks': 0,
  590. 'ohacks': 0,
  591. 'duration': 0,
  592. 'overhead': 0
  593. }
  594. ##
  595. # Formulas
  596. #
  597. # cumulative duration/overhead; just round to ms
  598. self.add_formula(dname + '_overhead',
  599. lambda g, d: int(1000 * d[dname]['overhead']))
  600. self.add_formula(dname + '_duration',
  601. lambda g, d: int(1000 * d[dname]['duration']))
  602. # average (per driver call) overhead/duration
  603. self.add_formula(
  604. dname + '_overhead_per_call',
  605. lambda g, d: int(1000 * d[dname]['overhead'] / d[dname]['calls'])
  606. )
  607. self.add_formula(
  608. dname + '_duration_per_call',
  609. lambda g, d: int(1000 * d[dname]['duration'] / d[dname]['calls'])
  610. )
  611. def gtotal_drivertime(g, d):
  612. driver_time = (sum(s['overhead'] for s in d.values())
  613. + sum(s['duration'] for s in d.values()))
  614. return int(1000 * driver_time)
  615. def gtotal_loop_overhead(g, d):
  616. driver_time = gtotal_drivertime(g, d)
  617. onnext_time = int(1000 * g['on_next'])
  618. age = int(1000 * (time.time() - self._born))
  619. return age - driver_time - onnext_time
  620. # grand totals in times: driver time, loop overhead
  621. self.add_formula('gtotal_drivertime', gtotal_drivertime)
  622. self.add_formula('gtotal_loop_overhead', gtotal_loop_overhead)
  623. self.add_formula('gtotal_loop_onnext',
  624. lambda g, d: int(1000 * g['on_next']))
  625. # average (per driver call) overhead/duration
  626. self.add_formula(
  627. 'cases_hacked',
  628. lambda g, d: round(100 * float(g['hacked_cases']) / g['cases'], 2)
  629. )
  630. def _computed_stats(self):
  631. computed = dict.fromkeys(self.formulas.keys())
  632. for fname, fml in self.formulas.iteritems():
  633. try:
  634. v = fml(self.generic_stats, self.driver_stats)
  635. except ZeroDivisionError:
  636. v = None
  637. computed[fname] = v
  638. return computed
  639. def add_formula(self, vname, formula):
  640. """Add a function to work with generic_stats, driver_stats."""
  641. self.formulas[vname] = formula
  642. def add(self, vname, value):
  643. """Add a value to generic stat counter."""
  644. if vname in self.generic_stats:
  645. self.generic_stats[vname] += value
  646. else:
  647. self.generic_stats[vname] = value
  648. def add_for(self, dclass, vname, value):
  649. """Add a value to driver stat counter."""
  650. dname = dclass.__name__
  651. if dname not in self.driver_stats:
  652. self._register(dname)
  653. if vname in self.driver_stats[dname]:
  654. self.driver_stats[dname][vname] += value
  655. else:
  656. self.driver_stats[dname][vname] = value
  657. def count(self, vname):
  658. """Alias to add(vname, 1)"""
  659. self.add(vname, 1)
  660. def count_for(self, dclass, vname):
  661. """Alias to add_for(vname, 1)"""
  662. self.add_for(dclass, vname, 1)
  663. def all_stats(self):
  664. """Compute stats from formulas and add them to colledted data."""
  665. stats = self.generic_stats
  666. for dname, dstats in self.driver_stats.iteritems():
  667. for key, value in dstats.iteritems():
  668. stats[dname + "_" + key] = value
  669. stats.update(self._computed_stats())
  670. return stats
  671. class Tracker(dict):
  672. """Error tracker to allow for usable reports from huge regression tests.
  673. Best used as a result bearer from `regression_test`, this class keeps
  674. a simple in-memory "database" of errors seen during the regression
  675. test, and implements few methods to access the data.
  676. The basic usage is:
  677. 1. Instantiate (no parameters)
  678. 2. Each time you have a result of a test, you pass it to `update()`
  679. method along with the argument set (as a single object, typically
  680. a dict) that caused the error.
  681. If boolean value of the result is False, the object is thrown away
  682. and nothing happen. Otherwise, its string value is used as a key
  683. under which the argument set is saved.
  684. As you can see, the string is supposed to be ''as deterministic
  685. as possible'', i.e. it should provide as little information
  686. about the error as is necessary. Do not include any timestamps
  687. or "volatile" values.
  688. 3. At final stage, you can retrieve statistics as how many (distinct)
  689. errors have been recorded, what was the duration of the whole test,
  690. how many times `update()` was called, etc.
  691. 4. Optionally, you can also call `format_report()` to get a nicely
  692. formatted report with list of arguments for each error string.
  693. 5. Since in bigger tests, argument lists can grow really large,
  694. complete lists are not normally printed. Instead, you can use
  695. `write_stats_csv()`, which will create one CSV per each error,
  696. named as first 7 chars of its SHA1 (inspired by Git).
  697. Note that you need to pass an existing writable folder path.
  698. """
  699. ##
  700. # internal methods
  701. #
  702. def __init__(self):
  703. self._start = time.time()
  704. self._db = {}
  705. self.tests_done = 0
  706. self.tests_passed = 0
  707. self.argsets_done = 0
  708. self.driver_stats = {}
  709. def _csv_fname(self, errstr, prefix):
  710. """Format name of file for this error string"""
  711. return '%s/%s.csv' % (prefix, self._eid(errstr))
  712. def _eid(self, errstr):
  713. """Return EID for the error string (first 7 chars of SHA1)."""
  714. return hashlib.sha1(errstr).hexdigest()[:7]
  715. def _insert(self, errstr, argset):
  716. """Insert the argset into DB."""
  717. if errstr not in self._db:
  718. self._db[errstr] = []
  719. self._db[errstr].append(argset)
  720. def _format_error(self, errstr, max_aa=0):
  721. """Format single error for output."""
  722. argsets_affected = self._db[errstr]
  723. num_aa = len(argsets_affected)
  724. # trim if list is too long for Jenkins
  725. argsets_shown = argsets_affected
  726. if max_aa and (num_aa > max_aa):
  727. div = ["[...] not showing %s cases, see %s.csv for full list"
  728. % (num_aa - max_aa, self._eid(errstr))]
  729. argsets_shown = argsets_affected[0:max_aa] + div
  730. # format error
  731. formatted_aa = "\n".join([str(arg) for arg in argsets_shown])
  732. return ("~~~ ERROR FOUND (%s) ~~~~~~~~~~~~~~~~~~~~~~~~~\n"
  733. "--- error string: -----------------------------------\n%s\n"
  734. "--- argsets affected (%d) ---------------------------\n%s\n"
  735. % (self._eid(errstr), errstr, num_aa, formatted_aa))
  736. ##
  737. # public methods
  738. #
  739. def errors_found(self):
  740. """Return number of non-distinct errors in db."""
  741. return bool(self._db)
  742. def format_report(self, max_aa=0):
  743. """Return complete report formatted as string."""
  744. error_list = "\n".join([self._format_error(e, max_aa=max_aa)
  745. for e in self._db])
  746. return ("Found %(total_errors)s (%(distinct_errors)s distinct) errors"
  747. " in %(tests_done)s tests with %(argsets)s argsets"
  748. " (duration: %(time)ss):"
  749. % self.getstats()
  750. + "\n\n" + error_list)
  751. def getstats(self):
  752. """Return basic and driver stats
  753. argsets_done - this should must be raised by outer code,
  754. once per each unique argset
  755. tests_done - how many times Tracker.update() was called
  756. distinct_errors - how many distinct errors (same `str(error)`)
  757. were seen by Tracker.update()
  758. total_errors - how many times `Tracker.update()` saw an
  759. error, i.e. how many argsets are in DB
  760. time - how long since init (seconds)
  761. """
  762. def total_errors():
  763. return reduce(lambda x, y: x + len(y), self._db.values(), 0)
  764. stats = {
  765. "argsets": self.argsets_done,
  766. "tests_done": self.tests_done,
  767. "distinct_errors": len(self._db),
  768. "total_errors": total_errors(),
  769. "time": int(time.time() - self._start)
  770. }
  771. stats.update(self.driver_stats)
  772. return stats
  773. def update(self, error, argset):
  774. """Update tracker with test result.
  775. If `bool(error)` is true, it is considered error and argset
  776. is inserted to DB with `str(error)` as key. This allows for later
  777. sorting and analysis.
  778. """
  779. self.tests_done += 1
  780. if error:
  781. errstr = str(error)
  782. self._insert(errstr, argset)
  783. def write_stats_csv(self, fname):
  784. """Write stats to a simple one row (plus header) CSV."""
  785. stats = self.getstats()
  786. colnames = sorted(stats.keys())
  787. with open(fname, 'a') as fh:
  788. cw = csv.DictWriter(fh, colnames)
  789. cw.writerow(dict(zip(colnames, colnames))) # header
  790. cw.writerow(stats)
  791. def write_args_csv(self, prefix=''):
  792. """Write out a set of CSV files, one per distinctive error.
  793. Each CSV is named with error EID (first 7 chars of SHA1) and lists
  794. all argument sets affected by this error. This is supposed to make
  795. easier to further analyse impact and trigerring values of errors,
  796. perhaps using a table processor software."""
  797. def get_all_colnames():
  798. cn = {}
  799. for errstr, affected in self._db.iteritems():
  800. for argset in affected:
  801. cn.update(dict.fromkeys(argset.keys()))
  802. return sorted(cn.keys())
  803. all_colnames = get_all_colnames()
  804. for errstr in self._db:
  805. with open(self._csv_fname(errstr, prefix), 'a') as fh:
  806. cw = csv.DictWriter(fh, all_colnames)
  807. cw.writerow(dict(zip(all_colnames, all_colnames))) # header
  808. for argset in self._db[errstr]:
  809. cw.writerow(argset)
  810. def dataMatch(pattern, data, rmax=10, _r=0):
  811. """Check if data structure matches a pattern data structure.
  812. Supports lists, dictionaries and scalars (int, float, string).
  813. For scalars, simple `==` is used. Lists are converted to sets and
  814. "to match" means "to have a matching subset (e.g. `[1, 2, 3, 4]`
  815. matches `[3, 2]`). Both lists and dictionaries are matched recursively.
  816. """
  817. def listMatch(pattern, data):
  818. """Match list-like objects"""
  819. assert all([hasattr(o, 'append') for o in [pattern, data]])
  820. results = []
  821. for pv in pattern:
  822. if any([dataMatch(pv, dv, _r=_r+1) for dv in data]):
  823. results.append(True)
  824. else:
  825. results.append(False)
  826. return all(results)
  827. def dictMatch(pattern, data):
  828. """Match dict-like objects"""
  829. assert all([hasattr(o, 'iteritems') for o in [pattern, data]])
  830. results = []
  831. try:
  832. for pk, pv in pattern.iteritems():
  833. results.append(dataMatch(pv, data[pk], _r=_r+1))
  834. except KeyError:
  835. results.append(False)
  836. return all(results)
  837. if _r == rmax:
  838. raise RuntimeError("recursion limit hit")
  839. result = None
  840. if pattern == data:
  841. result = True
  842. else:
  843. for handler in [dictMatch, listMatch]:
  844. try:
  845. result = handler(pattern, data)
  846. except AssertionError:
  847. continue
  848. return result
  849. def jsDump(data):
  850. """A human-readable JSON dump."""
  851. return json.dumps(data, sort_keys=True, indent=4,
  852. separators=(',', ': '))
  853. def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
  854. """JSON-based human-readable diff of two data structures.
  855. '''BETA''' version.
  856. jsDiff is based on unified diff of two human-readable JSON dumps except
  857. that instead of showing line numbers and context based on proximity to
  858. the changed lines, it prints only context important from the data
  859. structure point.
  860. The goal is to be able to quickly tell the story of what has changed
  861. where in the structure, no matter size and complexity of the data set.
  862. For example:
  863. a = {
  864. 'w': {1: 2, 3: 4},
  865. 'x': [1, 2, 3],
  866. 'y': [3, 1, 2]
  867. }
  868. b = {
  869. 'w': {1: 2, 3: 4},
  870. 'x': [1, 1, 3],
  871. 'y': [3, 1, 3]
  872. }
  873. print jsDiff(a, b)
  874. will output:
  875. aaa ~/A
  876. "x": [
  877. a 2,
  878. "y": [
  879. a 2
  880. bbb ~/B
  881. "x": [
  882. b 1,
  883. "y": [
  884. b 3
  885. Notice that the final output somehow resembles the traditional unified
  886. diff, so to avoid confusion, +/- is changed to a/b (the characters can
  887. be provided as well as the names A/B).
  888. """
  889. def compress(lines):
  890. def is_body(line):
  891. return line.startswith(("-", "+", " "))
  892. def is_diff(line):
  893. return line.startswith(("-", "+"))
  894. def is_diffA(line):
  895. return line.startswith("-")
  896. def is_diffB(line):
  897. return line.startswith("+")
  898. def is_context(line):
  899. return line.startswith(" ")
  900. def is_hdr(line):
  901. return line.startswith(("@@", "---", "+++"))
  902. def is_hdr_hunk(line):
  903. return line.startswith("@@")
  904. def is_hdr_A(line):
  905. return line.startswith("---")
  906. def is_hdr_B(line):
  907. return line.startswith("+++")
  908. class Level(object):
  909. def __init__(self, hint):
  910. self.hint = hint
  911. self.hinted = False
  912. def __str__(self):
  913. return str(self.hint)
  914. def get_hint(self):
  915. if not self.hinted:
  916. self.hinted = True
  917. return self.hint
  918. class ContextTracker(object):
  919. def __init__(self):
  920. self.trace = []
  921. self.last_line = None
  922. self.last_indent = -1
  923. def indent_of(self, line):
  924. meat = line[1:].lstrip(" ")
  925. ind = len(line) - len(meat) - 1
  926. return ind
  927. def check(self, line):
  928. indent = self.indent_of(line)
  929. if indent > self.last_indent:
  930. self.trace.append(Level(self.last_line))
  931. elif indent < self.last_indent:
  932. self.trace.pop()
  933. self.last_line = line
  934. self.last_indent = indent
  935. def get_hint(self):
  936. return self.trace[-1].get_hint()
  937. buffa = []
  938. buffb = []
  939. ct = ContextTracker()
  940. for line in lines:
  941. if is_hdr_hunk(line):
  942. continue
  943. elif is_hdr_A(line):
  944. line = line.replace("---", chara * 3, 1)
  945. buffa.insert(0, line)
  946. elif is_hdr_B(line):
  947. line = line.replace("+++", charb * 3, 1)
  948. buffb.insert(0, line)
  949. elif is_body(line):
  950. ct.check(line)
  951. if is_diff(line):
  952. hint = ct.get_hint()
  953. if hint:
  954. buffa.append(hint)
  955. buffb.append(hint)
  956. if is_diffA(line):
  957. line = line.replace("-", chara, 1)
  958. buffa.append(line)
  959. elif is_diffB(line):
  960. line = line.replace("+", charb, 1)
  961. buffb.append(line)
  962. else:
  963. raise AssertionError("difflib.unified_diff emited"
  964. " unknown format (%s chars):\n%s"
  965. % (len(line), line))
  966. return buffa + buffb
  967. dumpa = jsDump(dira)
  968. dumpb = jsDump(dirb)
  969. udiff = difflib.unified_diff(dumpa.split("\n"), dumpb.split("\n"),
  970. "~/" + namea, "~/" + nameb,
  971. n=10000, lineterm='')
  972. return "\n".join(compress([line for line in udiff]))
  973. class Cartman(object):
  974. """Create argument sets from ranges (or ay iterators) of values.
  975. This class is to enable easy definition and generation of dictionary
  976. argument sets using Cartesian product. You only need to define:
  977. * structure of argument set (can be more than just flat dict)
  978. * ranges, or arbitrary iterators of values on each "leaf" of the
  979. argument set
  980. Since there is expectation that any argument can have any kind of values
  981. even another iterables, the pure logic "iterate it if you can"
  982. is insufficient. Instead, definition is divided in two parts:
  983. * scheme, which is a "prototype" of a final argument set, except
  984. that for each value that will change, a `Cartman.Iterable`
  985. sentinel is used. For each leaf that is constant, `Cartman.Scalar`
  986. is used
  987. * source, which has the same structure, except that where in scheme
  988. is `Iterable`, an iterable object is expected, whereas in places
  989. where `Scalar` is used, a value is assigned that does not change
  990. during iteration.
  991. Finally, when such instance is used in loop, argument sets are generated
  992. uising Cartesian product of each iterable found. This allows for
  993. relatively easy definition of complex scenarios.
  994. Consider this example:
  995. You have a system (wrapped up in test driver) that takes ''size''
  996. argument, that is supposed to be ''width'', ''height'' and ''depth'',
  997. each an integer ranging from 1 to 100, and ''color'' that can
  998. be "white", "black" or "yellow".
  999. For a test using all-combinations strategy, you will need to generate
  1000. 100 * 100 * 100 * 3 argument sets, i.e. 3M tests.
  1001. All you need to do is:
  1002. scheme = {
  1003. 'size': {
  1004. 'width': Cartman.Iterable,
  1005. 'height': Cartman.Iterable,
  1006. 'depth': Cartman.Iterable,
  1007. }
  1008. 'color': Cartman.Iterable,
  1009. }
  1010. source = {
  1011. 'size': {
  1012. 'width': range(1, 100),
  1013. 'height': range(1, 100),
  1014. 'depth': range(1, 100),
  1015. }
  1016. 'color': ['white', 'black', 'yellow'],
  1017. }
  1018. c = Cartman(source, scheme)
  1019. for argset in c:
  1020. result = my_test(argset)
  1021. # assert ...
  1022. The main advantage is that you can separate the definition from
  1023. the code, and you can keep yor iterators as big or as small as
  1024. needed, and add / remove values.
  1025. Also in case your parameters vary in structure over time, or from
  1026. one test to another, it gets much easier to keep up with changes
  1027. without much jumping through hoops.
  1028. Note: `Cartman.Scalar` is provided mainly to make your definitions
  1029. more readable. Following constructions are functionally equal:
  1030. c = Cartman({'a': 1}, {'a': Cartman.Scalar})
  1031. c = Cartman({'a': [1]}, {'a': Cartman.Iterable})
  1032. In future, however, this might change, though, mainly in case
  1033. optimization became possible based on what was used.
  1034. """
  1035. # TODO: support for arbitrary ordering (profile / nginx)
  1036. # TODO: implement getstats and fmtstats
  1037. # TODO: N-wise
  1038. class _BaseMark(object):
  1039. pass
  1040. class Scalar(_BaseMark):
  1041. pass
  1042. class Iterable(_BaseMark):
  1043. pass
  1044. def __init__(self, source, scheme, recursion_limit=10, _r=0):
  1045. self.source = source
  1046. self.scheme = scheme
  1047. self.recursion_limit = recursion_limit
  1048. self._r = _r
  1049. if self._r > self.recursion_limit:
  1050. raise RuntimeError("recursion limit exceeded")
  1051. # validate scheme + source and throw useful error
  1052. scheme_ok = isinstance(self.scheme, collections.Mapping)
  1053. source_ok = isinstance(self.source, collections.Mapping)
  1054. if not scheme_ok:
  1055. raise ValueError("scheme must be a mapping (e.g. dict)")
  1056. elif scheme_ok and not source_ok:
  1057. raise ValueError("scheme vs. source mismatch")
  1058. def __deepcopy__(self, memo):
  1059. return Cartman(deepcopy(self.source, memo),
  1060. deepcopy(self.scheme, memo))
  1061. def _is_mark(self, subscheme):
  1062. try:
  1063. return issubclass(subscheme, Cartman._BaseMark)
  1064. except TypeError:
  1065. return False
  1066. def _means_scalar(self, subscheme):
  1067. if self._is_mark(subscheme):
  1068. return issubclass(subscheme, Cartman.Scalar)
  1069. def _means_iterable(self, subscheme):
  1070. if self._is_mark(subscheme):
  1071. return issubclass(subscheme, Cartman.Iterable)
  1072. def _get_iterable_for(self, key):
  1073. subscheme = self.scheme[key]
  1074. subsource = self.source[key]
  1075. if self._means_scalar(subscheme):
  1076. return [subsource]
  1077. elif self._means_iterable(subscheme):
  1078. return subsource
  1079. else: # try to use it as scheme
  1080. return iter(Cartman(subsource, subscheme, _r=self._r+1))
  1081. def __iter__(self):
  1082. names = []
  1083. iterables = []
  1084. keys = self.scheme.keys()
  1085. for key in keys:
  1086. try:
  1087. iterables.append(self._get_iterable_for(key))
  1088. except KeyError:
  1089. pass # ignore that subsource mentioned by scheme is missing
  1090. else:
  1091. names.append(key)
  1092. for values in itertools.product(*iterables):
  1093. yield dict(zip(names, values))
  1094. def getstats(self):
  1095. return {}
  1096. def fmtstats(self):
  1097. return ""