collection of python libs developed for testing purposes

hoover.py 47KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. # coding=utf-8
  2. import collections
  3. import functools
  4. import csv
  5. import difflib
  6. import hashlib
  7. import inspect
  8. import itertools
  9. import json
  10. import operator
  11. import time
  12. from copy import deepcopy
  13. # ########################################################################### #
  14. # ## The Motor ## #
  15. # ########################################################################### #
  16. def regression_test(argsrc, tests, driver_settings=None, cleanup_hack=None,
  17. apply_hacks=None, on_next=None):
  18. """Perform regression test with argsets from `argsrc`.
  19. For each argset pulled from source, performs one comparison
  20. per driver pair in `tests`, which is list of tuples with
  21. comparison function and pair of test driver classes: `(operator,
  22. oracle_class, result_class)`. (The classes are assumed to
  23. be sub-classes of `hoover.BaseTestDriver`.)
  24. `driver_settings` is a dictionary supposed to hold environmental
  25. values for all the drivers, the keys having form "DriverName.
  26. settingName". Each driver is then instantiated with this
  27. dict, and gets a copy of the dict with settings only intended
  28. for itself (and the "DriverName" part stripped).
  29. If comparison fails, report is generated using `hoover.jsDiff()`,
  30. and along with affected arguments stored in `hoover.Tracker`
  31. instance, which is finally used as a return value. This instance
  32. then contains method for basic stats as well as method to format
  33. the final report and a helper method to export argument sets
  34. as a CSV files.
  35. Supports hacks, which are a data transformations performed by
  36. `hoover.TinyCase` class and are intended to avoid known bugs
  37. and anomalies (`apply_hacks`) or clean up data structures of
  38. irrelevant data (`cleanup_hack`, performed only if the comparison
  39. function provided along with driver pair is not "equals").
  40. A function can be provided as `on_next` argument, that will be
  41. called after pulling each argument set, with last argument set
  42. (or `None`) as first argument and current one as second argument.
  43. """
  44. # TODO: do not parse driver_settings thousands of times (use a view class?)
  45. on_next = on_next if on_next else lambda a, b: None
  46. apply_hacks = apply_hacks if apply_hacks else []
  47. driver_settings = driver_settings if driver_settings else {}
  48. tracker = Tracker()
  49. last_argset = None
  50. all_classes = set(functools.reduce(
  51. lambda a, b: a+b,
  52. [triple[1:] for triple in tests]
  53. ))
  54. counter = StatCounter()
  55. for argset in argsrc:
  56. on_start = time.time()
  57. on_next(argset, last_argset)
  58. counter.add('on_next', time.time() - on_start)
  59. # # load the data first, only once for each driver
  60. #
  61. data = {}
  62. for aclass in all_classes:
  63. try:
  64. aclass.check_values(argset)
  65. except NotImplementedError: # let them bail out
  66. counter.count_for(aclass, 'bailouts')
  67. else:
  68. data[aclass], duration, overhead = get_data_and_stats(
  69. aclass, argset, driver_settings)
  70. counter.count_for(aclass, 'calls')
  71. counter.add_for(aclass, 'duration', duration)
  72. counter.add_for(aclass, 'overhead', overhead)
  73. for match_op, oclass, rclass in tests:
  74. # skip test if one of classes bailed out on the argset
  75. if oclass not in data or rclass not in data:
  76. continue
  77. diff = None
  78. case = TinyCase({
  79. 'argset': argset,
  80. 'oracle': deepcopy(data[oclass]),
  81. 'result': deepcopy(data[rclass]),
  82. 'oname': oclass.__name__,
  83. 'rname': rclass.__name__
  84. })
  85. hacks_done = sum([case.hack(h) for h in apply_hacks])
  86. counter.add_for(oclass, 'ohacks', hacks_done)
  87. counter.add_for(rclass, 'rhacks', hacks_done)
  88. counter.add('hacks', hacks_done)
  89. counter.add('hacked_cases', (1 if hacks_done else 0))
  90. if not match_op(case['oracle'], case['result']):
  91. # try to clean up so that normally ignored items
  92. # do not clutter up the report
  93. if not match_op == operator.eq:
  94. case.hack(cleanup_hack)
  95. # but panic if that "removed" the error condition
  96. if match_op(case['oracle'], case['result']):
  97. raise RuntimeError("cleanup ate error")
  98. diff = jsDiff(dira=case['oracle'],
  99. dirb=case['result'],
  100. namea=case['oname'],
  101. nameb=case['rname'],
  102. chara='o',
  103. charb='r')
  104. tracker.update(diff, argset)
  105. counter.count('cases')
  106. tracker.argsets_done += 1
  107. last_argset = argset
  108. counter.count('argsets')
  109. tracker.driver_stats = counter.all_stats()
  110. return tracker
  111. def get_data_and_stats(driverClass, argset, driver_settings, only_own=False):
  112. """Run test with given driver"""
  113. start = time.time()
  114. d = driverClass()
  115. d.setup(driver_settings, only_own=only_own)
  116. d.run(argset)
  117. return (d.data, d.duration, time.time() - d.duration - start)
  118. def get_data(driverClass, argset, driver_settings, only_own=False):
  119. """Run test with given driver"""
  120. d = driverClass()
  121. d.setup(driver_settings, only_own=only_own)
  122. d.run(argset)
  123. return d.data
  124. # ########################################################################### #
  125. # ## The Pattern ## #
  126. # ########################################################################### #
  127. class _BaseRuleOp:
  128. def __init__(self, items, item_ok):
  129. self._items = items
  130. self._item_ok = item_ok
  131. def _eval(self, item):
  132. try: # it's a pattern! (recurse)
  133. return RuleOp.Match(item, self._item_ok)
  134. except ValueError: # no, it's something else...
  135. return self._item_ok(item)
  136. def __bool__(self):
  137. try:
  138. return self._match()
  139. except TypeError:
  140. raise ValueError("items must be an iterable: %r" % self._items)
  141. class RuleOp:
  142. class ALL(_BaseRuleOp):
  143. def _match(self):
  144. return all(self._eval(item) for item in self._items)
  145. class ANY(_BaseRuleOp):
  146. def _match(self):
  147. return any(self._eval(item) for item in self._items)
  148. @staticmethod
  149. def Match(pattern, item_ok):
  150. """Evaluate set of logically structured patterns using passed function.
  151. pattern has form of `(op, [item1, item2, ...])` where op can be any of
  152. pre-defined logical operators (`ALL`/`ANY`, I doubt you will ever need
  153. more) and item_ok is a function that will be used to evaluate each one
  154. in the list. In case an itemN is actually pattern as well, it will be
  155. recursed into, passing the item_ok on and on.
  156. Note that there is no data to evaluate "against", you can use closure
  157. if you need to do that.
  158. """
  159. try:
  160. op, items = pattern
  161. except TypeError:
  162. raise ValueError("pattern is not a tuple: %r" % pattern)
  163. try:
  164. assert issubclass(op, _BaseRuleOp)
  165. except TypeError:
  166. raise ValueError("invalid operator: %r" % op)
  167. except AssertionError:
  168. raise ValueError("invalid operator class: %s" % op.__name__)
  169. return bool(op(items, item_ok))
  170. # ########################################################################### #
  171. # ## The Path ## #
  172. # ########################################################################### #
  173. class DictPath:
  174. """Mixin that adds "path-like" behavior to the top dict of dicts.
  175. Use this class as a mixin for a deep dic-like structure and you can access
  176. the elements using a path. For example:
  177. MyData(dict, DictPath):
  178. pass
  179. d = MyData({
  180. 'name': 'Joe',
  181. 'age': 34,
  182. 'ssn': {
  183. 'number': '012 345 678',
  184. 'expires': '10-01-16',
  185. },
  186. })
  187. print ("%s's ssn number %s will expire on %s"
  188. % (d.getpath('/name'),
  189. d.getpath('/ssn/number'),
  190. d.getpath('/ssn/expiry')))
  191. # joe's ssn number 012 345 678 will expire 10-01-16
  192. """
  193. DIV = "/"
  194. class Path:
  195. def __init__(self, path, div):
  196. self.DIV = div
  197. self._path = path
  198. def _validate(self):
  199. try:
  200. assert self._path.startswith(self.DIV)
  201. except (AttributeError, AssertionError):
  202. raise ValueError("invalid path: %r" % self._path)
  203. def stripped(self):
  204. return self._path.lstrip(self.DIV)
  205. @classmethod
  206. def __s2path(cls, path):
  207. return cls.Path(path, cls.DIV)
  208. @classmethod
  209. def __err_path_not_found(cls, path):
  210. raise KeyError("path not found: %s" % path)
  211. @classmethod
  212. def __getitem(cls, dct, key):
  213. if cls.DIV in key:
  214. frag, rest = key.split(cls.DIV, 1)
  215. subdct = dct[frag]
  216. result = cls.__getitem(subdct, rest)
  217. else:
  218. result = dct[key]
  219. return result
  220. @classmethod
  221. def __setitem(cls, dct, key, value):
  222. if cls.DIV not in key:
  223. dct[key] = value
  224. else:
  225. frag, rest = key.split(cls.DIV, 1)
  226. subdct = dct[frag]
  227. cls.__setitem(subdct, rest, value)
  228. @classmethod
  229. def __delitem(cls, dct, key):
  230. if cls.DIV not in key:
  231. del dct[key]
  232. else:
  233. frag, rest = key.split(cls.DIV, 1)
  234. subdct = dct[frag]
  235. return cls.__delitem(subdct, rest)
  236. # # public methods
  237. #
  238. def getpath(self, path):
  239. try:
  240. return self.__getitem(self, self.__s2path(path).stripped())
  241. except (TypeError, KeyError):
  242. self.__err_path_not_found(path)
  243. def setpath(self, path, value):
  244. try:
  245. self.__setitem(self, self.__s2path(path).stripped(), value)
  246. except (TypeError, KeyError):
  247. self.__err_path_not_found(path)
  248. def delpath(self, path):
  249. try:
  250. self.__delitem(self, self.__s2path(path).stripped())
  251. except (TypeError, KeyError):
  252. self.__err_path_not_found(path)
  253. def ispath(self, path):
  254. try:
  255. self.getpath(path)
  256. return True
  257. except KeyError:
  258. return False
  259. # ########################################################################### #
  260. # ## The Case ## #
  261. # ########################################################################### #
  262. class TinyCase(dict, DictPath):
  263. """Abstraction of the smallest unit of testing.
  264. This class is intended to hold relevant data after the actual test
  265. and apply transformations (hacks) as defined by rules.
  266. The data form (self) is:
  267. {
  268. 'argset': {}, # argset as fed into `BaseTestDriver.run`
  269. 'oracle': {}, # data as returned from oracle driver's `run()`
  270. 'result': {}, # data as returned from result driver's `run()`
  271. 'oname': "", # name of oracle driver's class
  272. 'rname': "" # name of result driver's class
  273. }
  274. The transformation is done using the `TinyCase.hack()` method to which
  275. a list of rules is passed. Each rule is applied, and rules are expected
  276. to be in a following form:
  277. {
  278. 'drivers': [{}], # list of structures to match against self
  279. 'argsets': [{}], # -ditto-
  280. 'action_name': <Arg> # an action name with argument
  281. }
  282. For each of patterns ('drivers', argsets') present, match against self
  283. is done using function `hoover.dataMatch`, which is basically a recursive
  284. test if the pattern is a subset of the case. If none of results is
  285. negative (i.e. both patterns missing results in match), any known actions
  286. included in the rule are called. Along with action name a list or a dict
  287. providing necessary parameters is expected: this is simply passed as only
  288. parameter to corresponding method.
  289. Actions use specific way how to address elements in the structures
  290. saved in the oracle and result keys provided by `DictPath`, which makes
  291. it easy to define rules for arbitrarily complex dictionary structures.
  292. The format resembles to Unix path, where "directories" are dict
  293. keys and "root" is the `self` of the `TinyCase` instance:
  294. /oracle/temperature
  295. /result/stats/word_count
  296. Refer to each action's docstring for descriprion of their function
  297. as well as expected format of argument. The name of action as used
  298. in the reule is the name of method without leading 'a_'.
  299. Warning: All actions will silently ignore any paths that are invalid
  300. or leading to non-existent data!
  301. (This does not apply to a path leading to `None`.)
  302. """
  303. def a_exchange(self, action):
  304. """Exchange value A for value B.
  305. Expects a dict, where key is a tuple of two values `(a, b)` and
  306. value is a list of paths. For each key, it goes through the
  307. paths and if the value equals `a` it is set to `b`.
  308. """
  309. for (oldv, newv), paths in action.items():
  310. for path in paths:
  311. try:
  312. curv = self.getpath(path)
  313. except KeyError:
  314. continue
  315. else:
  316. if curv == oldv:
  317. self.setpath(path, newv)
  318. def a_format_str(self, action):
  319. """Convert value to a string using format string.
  320. Expects a dict, where key is a format string, and value is a list
  321. of paths. For each record, the paths are traversed, and value is
  322. converted to string using the format string and the `%` operator.
  323. This is especially useful for floats which you may want to trim
  324. before comparison, since direct comparison of floats is unreliable
  325. on some architectures.
  326. """
  327. for fmt, paths in action.items():
  328. for path in paths:
  329. if self.ispath(path):
  330. new = fmt % self.getpath(path)
  331. self.setpath(path, new)
  332. def a_even_up(self, action):
  333. """Even up structure of both dictionaries.
  334. Expects a list of two-element tuples `('/dict/a', '/dict/b')`
  335. containing pairs of path do simple dictionaries.
  336. Then the two dicts are altered to have same structure: if a key
  337. in dict "a" is missing in dict "b", it is set to `None` in "b" and
  338. vice-versa,
  339. """
  340. for patha, pathb in action:
  341. try:
  342. a = self.getpath(patha)
  343. b = self.getpath(pathb)
  344. except KeyError:
  345. continue
  346. else:
  347. for key in set(a.keys()) | set(b.keys()):
  348. if key in a and key in b:
  349. pass # nothing to do here
  350. elif key in a and a[key] is None:
  351. b[key] = None
  352. elif key in b and b[key] is None:
  353. a[key] = None
  354. else:
  355. pass # bailout: odd key but value is *not* None
  356. def a_remove(self, action):
  357. """Remove elements from structure.
  358. Expects a simple list of paths that are simply deleted fro, the
  359. structure.
  360. """
  361. for path in action:
  362. if self.ispath(path):
  363. self.delpath(path)
  364. def a_round(self, action):
  365. """Round a (presumably) float using tha `float()` built-in.
  366. Expects dict with precision (ndigits, after the dot) as a key and
  367. list of paths as value.
  368. """
  369. for ndigits, paths in action.items():
  370. for path in paths:
  371. try:
  372. f = self.getpath(path)
  373. except KeyError:
  374. pass
  375. else:
  376. self.setpath(path, round(f, ndigits))
  377. known_actions = {'remove': a_remove,
  378. 'even_up': a_even_up,
  379. 'format_str': a_format_str,
  380. 'exchange': a_exchange,
  381. 'round': a_round}
  382. def hack(self, ruleset):
  383. """Apply action from each rule, if patterns match."""
  384. def driver_matches(rule):
  385. if 'drivers' not in rule:
  386. return True
  387. else:
  388. return any(dataMatch(p, self)
  389. for p in rule['drivers'])
  390. def argset_matches(rule):
  391. if 'argsets' not in rule:
  392. return True
  393. else:
  394. return any(dataMatch(p, self)
  395. for p in rule['argsets'])
  396. matched = False
  397. cls = self.__class__
  398. for rule in ruleset:
  399. if driver_matches(rule) and argset_matches(rule):
  400. matched = True
  401. for action_name in cls.known_actions:
  402. if action_name in rule:
  403. cls.known_actions[action_name](self, rule[action_name])
  404. return matched
  405. # ########################################################################### #
  406. # ## Drivers ## #
  407. # ########################################################################### #
  408. class DriverError(Exception):
  409. """Error encountered when obtaining driver data"""
  410. def __init__(self, message, driver):
  411. self.message = message
  412. self.driver = driver
  413. def __str__(self):
  414. result = ("\n\n"
  415. " type: %s\n"
  416. " message: %s\n"
  417. " driver: %s\n"
  418. " args: %s\n"
  419. " settings: %s\n"
  420. % (self.message.__class__.__name__,
  421. self.message,
  422. self.driver.__class__.__name__,
  423. self.driver._args,
  424. self.driver._settings))
  425. return result
  426. class DriverDataError(Exception):
  427. """Error encountered when decoding or normalizing driver data"""
  428. def __init__(self, exception, driver):
  429. self.exception = exception
  430. self.driver = driver
  431. def __str__(self):
  432. result = ("%s: %s\n"
  433. " class: %s\n"
  434. " args: %s\n"
  435. " data: %s\n"
  436. % (self.exception.__class__.__name__, self.exception,
  437. self.driver.__class__.__name__,
  438. json.dumps(self.driver._args, sort_keys=True, indent=4),
  439. json.dumps(self.driver.data, sort_keys=True, indent=4)))
  440. return result
  441. class BaseTestDriver:
  442. """Base class for test drivers used by `hoover.regression_test` and others.
  443. This class is used to create a test driver, which is an abstraction
  444. and encapsulation of the system being tested. Or, the driver in fact
  445. can be just a "mock" driver that provides data for comparison with
  446. a "real" driver.
  447. The minimum you need to create a working driver is to implement a working
  448. `self._get_data` method that sets `self.data`. Any exception from this
  449. method will be re-raised as DriverError with additional information.
  450. Also, you can set self.duration (in fractional seconds, as returned by
  451. standard time module) in the _get_data method, but if you don't, it is
  452. measured for you as time the method call took. This is useful if you
  453. need to fetch the data from some other driver or a gateway, and you
  454. have better mechanism to determine how long the action would take "in
  455. real life".
  456. For example, if we are testing a Java library using a Py4J gateway,
  457. we need to do some more conversions outside our testing code just to
  458. be able to use the data in our Python test. We don't want to include
  459. this in the "duration", since we are measuring the Java library, not the
  460. Py4J GW (or our ability to perform the conversions optimally). So we
  461. do our measurement within the Java machine and pass the result to the
  462. Python driver.
  463. Optionally, you can:
  464. * Make an __init__ and after calling base __init__, set
  465. * `self._mandatory_args`, a list of keys that need to be present
  466. in `args` argument to `run()`
  467. * and `self._mandatory_settings`, a list of keys that need to be
  468. present in the `settings` argument to `__init__`
  469. * implement methods
  470. * `_decode_data` and `_normalize_data`, which are intended to decode
  471. the data from any raw format it is received, and to prepare it
  472. for comparison in test,
  473. * and `_check_data`, to allow for early detection of failure,
  474. from which any exception is re-raised as a DriverDataError with
  475. some additional info
  476. * set "bailouts", a list of functions which, when passed "args"
  477. argument, return true to indicate that driver is not able to
  478. process these values (see below for explanation). If any of
  479. these functions returns true, NotImplementedError is raised.
  480. The expected workflow when using the driver is:
  481. # 1. sub-class hoover.BaseTestDriver
  482. # 2. prepare settings and args
  483. MyDriver.check_values(args) # optional, to force bailouts ASAP
  484. d = MyDriver()
  485. d.setup(settings)
  486. d.run(args)
  487. assert d.data, "no data" # evaluate the result...
  488. assert d.duration < 1 # duration of _get_data in seconds
  489. Note on bailouts: Typical strategy for which the driver is intended is
  490. that each possible combination of `args` is exhausted, and results from
  491. multiple drivers are compared to evaluate if driver, i.e. system in
  492. question is O.K.
  493. The bailouts mechanism is useful in cases, where for a certain system,
  494. a valid combination of arguments would bring the same result as another,
  495. so there is basically no value in testing both of them.
  496. Example might be a system that does not support a binary flag and
  497. behaves as if it was "on": you can simply make the test driver
  498. accept the option but "bail out" any time it is "off", therefore
  499. skipping the time-and-resource-consuming test.
  500. """
  501. bailouts = []
  502. ##
  503. # internal methods
  504. #
  505. def __init__(self):
  506. self.data = {}
  507. self.duration = None
  508. self._args = {}
  509. self._mandatory_args = []
  510. self._mandatory_settings = []
  511. self._settings = {}
  512. self._setup_ok = False
  513. def __check_mandatory(self):
  514. """validate before run()"""
  515. for key in self._mandatory_args:
  516. assert key in self._args, "missing arg: '%s'" % key
  517. for key in self._mandatory_settings:
  518. assert key in self._settings, "missing setting: '%s'" % key
  519. def __cleanup_data(self):
  520. """remove hidden data; e.g. what was only there for _check_data"""
  521. for key in self.data:
  522. if key.startswith("_"):
  523. del self.data[key]
  524. ##
  525. # virtual methods
  526. #
  527. def _check_data(self):
  528. """Early check for failure"""
  529. pass
  530. def _decode_data(self):
  531. """Decode from raw data as brought by _get_data"""
  532. pass
  533. def _normalize_data(self):
  534. """Preare data for comparison (e.g. sort, split, trim...)"""
  535. pass
  536. ##
  537. # public methods
  538. #
  539. @classmethod
  540. def check_values(cls, args=None):
  541. """check args in advance before running or setting up anything"""
  542. for fn in cls.bailouts:
  543. if fn(args):
  544. raise NotImplementedError(inspect.getsource(fn))
  545. def setup(self, settings, only_own=False):
  546. """Load settings. only_own means that only settings that belong to us
  547. are loaded ("DriverClass.settingName", the first discriminating part
  548. is removed)"""
  549. if only_own:
  550. for ckey in settings:
  551. driver_class_name, setting_name = ckey.split(".", 2)
  552. if self.__class__.__name__ == driver_class_name:
  553. self._settings[setting_name] = settings[ckey]
  554. else:
  555. self._settings = settings
  556. self._setup_ok = True
  557. def run(self, args):
  558. """validate, run and store data"""
  559. self._args = args
  560. assert self._setup_ok, "run() before setup()?"
  561. self.__class__.check_values(self._args)
  562. self.__check_mandatory()
  563. start = time.time()
  564. try:
  565. self._get_data() # run the test, i.e. obtain raw data
  566. except Exception as e:
  567. raise DriverError(e, self)
  568. self.duration = (time.time() - start if self.duration is None
  569. else self.duration)
  570. try:
  571. self._decode_data() # decode raw data
  572. self._normalize_data() # normalize decoded data
  573. self._check_data() # perform arbitrarty checking
  574. except Exception as e:
  575. raise DriverDataError(e, self)
  576. self.__cleanup_data() # cleanup (remove data['_*'])
  577. class MockDriverTrue(BaseTestDriver):
  578. """A simple mock driver, always returning True"""
  579. def _get_data(self, args):
  580. self.data = True
  581. # ########################################################################### #
  582. # ## Helpers ## #
  583. # ########################################################################### #
  584. class StatCounter:
  585. """A simple counter with formulas support."""
  586. def __init__(self):
  587. self.generic_stats = {}
  588. self.driver_stats = {}
  589. self.formulas = {}
  590. self._born = time.time()
  591. def _register(self, dname):
  592. self.driver_stats[dname] = {
  593. 'calls': 0,
  594. 'rhacks': 0,
  595. 'ohacks': 0,
  596. 'duration': 0,
  597. 'overhead': 0
  598. }
  599. ##
  600. # Formulas
  601. #
  602. # cumulative duration/overhead; just round to ms
  603. self.add_formula(dname + '_overhead',
  604. lambda g, d: int(1000 * d[dname]['overhead']))
  605. self.add_formula(dname + '_duration',
  606. lambda g, d: int(1000 * d[dname]['duration']))
  607. # average (per driver call) overhead/duration
  608. self.add_formula(
  609. dname + '_overhead_per_call',
  610. lambda g, d: int(1000 * d[dname]['overhead'] / d[dname]['calls'])
  611. )
  612. self.add_formula(
  613. dname + '_duration_per_call',
  614. lambda g, d: int(1000 * d[dname]['duration'] / d[dname]['calls'])
  615. )
  616. def gtotal_drivertime(g, d):
  617. driver_time = (sum(s['overhead'] for s in d.values())
  618. + sum(s['duration'] for s in d.values()))
  619. return int(1000 * driver_time)
  620. def gtotal_loop_overhead(g, d):
  621. driver_time = gtotal_drivertime(g, d)
  622. onnext_time = int(1000 * g['on_next'])
  623. age = int(1000 * (time.time() - self._born))
  624. return age - driver_time - onnext_time
  625. # grand totals in times: driver time, loop overhead
  626. self.add_formula('gtotal_drivertime', gtotal_drivertime)
  627. self.add_formula('gtotal_loop_overhead', gtotal_loop_overhead)
  628. self.add_formula('gtotal_loop_onnext',
  629. lambda g, d: int(1000 * g['on_next']))
  630. # average (per driver call) overhead/duration
  631. self.add_formula(
  632. 'cases_hacked',
  633. lambda g, d: round(100 * float(g['hacked_cases']) / g['cases'], 2)
  634. )
  635. def _computed_stats(self):
  636. computed = dict.fromkeys(self.formulas)
  637. for fname, fml in self.formulas.items():
  638. try:
  639. v = fml(self.generic_stats, self.driver_stats)
  640. except ZeroDivisionError:
  641. v = None
  642. computed[fname] = v
  643. return computed
  644. def add_formula(self, vname, formula):
  645. """Add a function to work with generic_stats, driver_stats."""
  646. self.formulas[vname] = formula
  647. def add(self, vname, value):
  648. """Add a value to generic stat counter."""
  649. if vname in self.generic_stats:
  650. self.generic_stats[vname] += value
  651. else:
  652. self.generic_stats[vname] = value
  653. def add_for(self, dclass, vname, value):
  654. """Add a value to driver stat counter."""
  655. dname = dclass.__name__
  656. if dname not in self.driver_stats:
  657. self._register(dname)
  658. if vname in self.driver_stats[dname]:
  659. self.driver_stats[dname][vname] += value
  660. else:
  661. self.driver_stats[dname][vname] = value
  662. def count(self, vname):
  663. """Alias to add(vname, 1)"""
  664. self.add(vname, 1)
  665. def count_for(self, dclass, vname):
  666. """Alias to add_for(vname, 1)"""
  667. self.add_for(dclass, vname, 1)
  668. def all_stats(self):
  669. """Compute stats from formulas and add them to colledted data."""
  670. stats = self.generic_stats
  671. for dname, dstats in self.driver_stats.items():
  672. for key, value in dstats.items():
  673. stats[dname + "_" + key] = value
  674. stats.update(self._computed_stats())
  675. return stats
  676. class Tracker(dict):
  677. """Error tracker to allow for usable reports from huge regression tests.
  678. Best used as a result bearer from `regression_test`, this class keeps
  679. a simple in-memory "database" of errors seen during the regression
  680. test, and implements few methods to access the data.
  681. The basic usage is:
  682. 1. Instantiate (no parameters)
  683. 2. Each time you have a result of a test, you pass it to `update()`
  684. method along with the argument set (as a single object, typically
  685. a dict) that caused the error.
  686. If boolean value of the result is False, the object is thrown away
  687. and nothing happen. Otherwise, its string value is used as a key
  688. under which the argument set is saved.
  689. As you can see, the string is supposed to be ''as deterministic
  690. as possible'', i.e. it should provide as little information
  691. about the error as is necessary. Do not include any timestamps
  692. or "volatile" values.
  693. 3. At final stage, you can retrieve statistics as how many (distinct)
  694. errors have been recorded, what was the duration of the whole test,
  695. how many times `update()` was called, etc.
  696. 4. Optionally, you can also call `format_report()` to get a nicely
  697. formatted report with list of arguments for each error string.
  698. 5. Since in bigger tests, argument lists can grow really large,
  699. complete lists are not normally printed. Instead, you can use
  700. `write_stats_csv()`, which will create one CSV per each error,
  701. named as first 7 chars of its SHA1 (inspired by Git).
  702. Note that you need to pass an existing writable folder path.
  703. """
  704. ##
  705. # internal methods
  706. #
  707. def __init__(self):
  708. self._start = time.time()
  709. self._db = {}
  710. self.tests_done = 0
  711. self.tests_passed = 0
  712. self.argsets_done = 0
  713. self.driver_stats = {}
  714. def _csv_fname(self, errstr, prefix):
  715. """Format name of file for this error string"""
  716. return '%s/%s.csv' % (prefix, self._eid(errstr))
  717. def _eid(self, errstr):
  718. """Return EID for the error string (first 7 chars of SHA1)."""
  719. return hashlib.sha1(errstr).hexdigest()[:7]
  720. def _insert(self, errstr, argset):
  721. """Insert the argset into DB."""
  722. if errstr not in self._db:
  723. self._db[errstr] = []
  724. self._db[errstr].append(argset)
  725. def _format_error(self, errstr, max_aa=0):
  726. """Format single error for output."""
  727. argsets_affected = self._db[errstr]
  728. num_aa = len(argsets_affected)
  729. # trim if list is too long for Jenkins
  730. argsets_shown = argsets_affected
  731. if max_aa and (num_aa > max_aa):
  732. div = ["[...] not showing %s cases, see %s.csv for full list"
  733. % (num_aa - max_aa, self._eid(errstr))]
  734. argsets_shown = argsets_affected[0:max_aa] + div
  735. # format error
  736. formatted_aa = "\n".join([str(arg) for arg in argsets_shown])
  737. return ("~~~ ERROR FOUND (%s) ~~~~~~~~~~~~~~~~~~~~~~~~~\n"
  738. "--- error string: -----------------------------------\n%s\n"
  739. "--- argsets affected (%d) ---------------------------\n%s\n"
  740. % (self._eid(errstr), errstr, num_aa, formatted_aa))
  741. ##
  742. # public methods
  743. #
  744. def errors_found(self):
  745. """Return number of non-distinct errors in db."""
  746. return bool(self._db)
  747. def format_report(self, max_aa=0):
  748. """Return complete report formatted as string."""
  749. error_list = "\n".join([self._format_error(e, max_aa=max_aa)
  750. for e in self._db])
  751. return ("Found %(total_errors)s (%(distinct_errors)s distinct) errors"
  752. " in %(tests_done)s tests with %(argsets)s argsets"
  753. " (duration: %(time)ss):"
  754. % self.getstats()
  755. + "\n\n" + error_list)
  756. def getstats(self):
  757. """Return basic and driver stats
  758. argsets_done - this should must be raised by outer code,
  759. once per each unique argset
  760. tests_done - how many times Tracker.update() was called
  761. distinct_errors - how many distinct errors (same `str(error)`)
  762. were seen by Tracker.update()
  763. total_errors - how many times `Tracker.update()` saw an
  764. error, i.e. how many argsets are in DB
  765. time - how long since init (seconds)
  766. """
  767. def total_errors():
  768. return functools.reduce(
  769. lambda x, y: x + len(y),
  770. self._db.values(),
  771. initial=0,
  772. )
  773. stats = {
  774. "argsets": self.argsets_done,
  775. "tests_done": self.tests_done,
  776. "distinct_errors": len(self._db),
  777. "total_errors": total_errors(),
  778. "time": int(time.time() - self._start)
  779. }
  780. stats.update(self.driver_stats)
  781. return stats
  782. def update(self, error, argset):
  783. """Update tracker with test result.
  784. If `bool(error)` is true, it is considered error and argset
  785. is inserted to DB with `str(error)` as key. This allows for later
  786. sorting and analysis.
  787. """
  788. self.tests_done += 1
  789. if error:
  790. errstr = str(error)
  791. self._insert(errstr, argset)
  792. def write_stats_csv(self, fname):
  793. """Write stats to a simple one row (plus header) CSV."""
  794. stats = self.getstats()
  795. colnames = sorted(stats.keys())
  796. with open(fname, 'a') as fh:
  797. cw = csv.DictWriter(fh, colnames)
  798. cw.writerow(dict(zip(colnames, colnames))) # header
  799. cw.writerow(stats)
  800. def write_args_csv(self, prefix=''):
  801. """Write out a set of CSV files, one per distinctive error.
  802. Each CSV is named with error EID (first 7 chars of SHA1) and lists
  803. all argument sets affected by this error. This is supposed to make
  804. easier to further analyse impact and trigerring values of errors,
  805. perhaps using a table processor software."""
  806. def get_all_colnames():
  807. cn = {}
  808. for affected in self._db.values():
  809. for argset in affected:
  810. cn.update(dict.fromkeys(argset))
  811. return sorted(cn.keys())
  812. all_colnames = get_all_colnames()
  813. for errstr in self._db:
  814. with open(self._csv_fname(errstr, prefix), 'a') as fh:
  815. cw = csv.DictWriter(fh, all_colnames)
  816. cw.writerow(dict(zip(all_colnames, all_colnames))) # header
  817. for argset in self._db[errstr]:
  818. cw.writerow(argset)
  819. def dataMatch(pattern, data):
  820. """Check if data structure matches a pattern data structure.
  821. Supports lists, dictionaries and scalars (int, float, string).
  822. For scalars, simple `==` is used. Lists are converted to sets and
  823. "to match" means "to have a matching subset (e.g. `[1, 2, 3, 4]`
  824. matches `[3, 2]`). Both lists and dictionaries are matched recursively.
  825. """
  826. def listMatch(pattern, data):
  827. """Match list-like objects"""
  828. assert all([hasattr(o, 'append') for o in [pattern, data]])
  829. results = []
  830. for pv in pattern:
  831. if any([dataMatch(pv, dv) for dv in data]):
  832. results.append(True)
  833. else:
  834. results.append(False)
  835. return all(results)
  836. def dictMatch(pattern, data):
  837. """Match dict-like objects"""
  838. assert all([hasattr(o, 'iteritems') for o in [pattern, data]])
  839. results = []
  840. try:
  841. for pk, pv in pattern.items():
  842. results.append(dataMatch(pv, data[pk]))
  843. except KeyError:
  844. results.append(False)
  845. return all(results)
  846. result = None
  847. if pattern == data:
  848. result = True
  849. else:
  850. for handler in [dictMatch, listMatch]:
  851. try:
  852. result = handler(pattern, data)
  853. except AssertionError:
  854. continue
  855. return result
  856. def jsDump(data):
  857. """A human-readable JSON dump."""
  858. return json.dumps(data, sort_keys=True, indent=4,
  859. separators=(',', ': '))
  860. def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
  861. """JSON-based human-readable diff of two data structures.
  862. '''BETA''' version.
  863. jsDiff is based on unified diff of two human-readable JSON dumps except
  864. that instead of showing line numbers and context based on proximity to
  865. the changed lines, it prints only context important from the data
  866. structure point.
  867. The goal is to be able to quickly tell the story of what has changed
  868. where in the structure, no matter size and complexity of the data set.
  869. For example:
  870. a = {
  871. 'w': {1: 2, 3: 4},
  872. 'x': [1, 2, 3],
  873. 'y': [3, 1, 2]
  874. }
  875. b = {
  876. 'w': {1: 2, 3: 4},
  877. 'x': [1, 1, 3],
  878. 'y': [3, 1, 3]
  879. }
  880. print jsDiff(a, b)
  881. will output:
  882. aaa ~/A
  883. "x": [
  884. a 2,
  885. "y": [
  886. a 2
  887. bbb ~/B
  888. "x": [
  889. b 1,
  890. "y": [
  891. b 3
  892. Notice that the final output somehow resembles the traditional unified
  893. diff, so to avoid confusion, +/- is changed to a/b (the characters can
  894. be provided as well as the names A/B).
  895. """
  896. def compress(lines):
  897. def is_body(line):
  898. return line.startswith(("-", "+", " "))
  899. def is_diff(line):
  900. return line.startswith(("-", "+"))
  901. def is_diffA(line):
  902. return line.startswith("-")
  903. def is_diffB(line):
  904. return line.startswith("+")
  905. def is_context(line):
  906. return line.startswith(" ")
  907. def is_hdr(line):
  908. return line.startswith(("@@", "---", "+++"))
  909. def is_hdr_hunk(line):
  910. return line.startswith("@@")
  911. def is_hdr_A(line):
  912. return line.startswith("---")
  913. def is_hdr_B(line):
  914. return line.startswith("+++")
  915. class Level:
  916. def __init__(self, hint):
  917. self.hint = hint
  918. self.hinted = False
  919. def __str__(self):
  920. return str(self.hint)
  921. def get_hint(self):
  922. if not self.hinted:
  923. self.hinted = True
  924. return self.hint
  925. class ContextTracker:
  926. def __init__(self):
  927. self.trace = []
  928. self.last_line = None
  929. self.last_indent = -1
  930. def indent_of(self, line):
  931. meat = line[1:].lstrip(" ")
  932. ind = len(line) - len(meat) - 1
  933. return ind
  934. def check(self, line):
  935. indent = self.indent_of(line)
  936. if indent > self.last_indent:
  937. self.trace.append(Level(self.last_line))
  938. elif indent < self.last_indent:
  939. self.trace.pop()
  940. self.last_line = line
  941. self.last_indent = indent
  942. def get_hint(self):
  943. return self.trace[-1].get_hint()
  944. buffa = []
  945. buffb = []
  946. ct = ContextTracker()
  947. for line in lines:
  948. if is_hdr_hunk(line):
  949. continue
  950. elif is_hdr_A(line):
  951. line = line.replace("---", chara * 3, 1)
  952. buffa.insert(0, line)
  953. elif is_hdr_B(line):
  954. line = line.replace("+++", charb * 3, 1)
  955. buffb.insert(0, line)
  956. elif is_body(line):
  957. ct.check(line)
  958. if is_diff(line):
  959. hint = ct.get_hint()
  960. if hint:
  961. buffa.append(hint)
  962. buffb.append(hint)
  963. if is_diffA(line):
  964. line = line.replace("-", chara, 1)
  965. buffa.append(line)
  966. elif is_diffB(line):
  967. line = line.replace("+", charb, 1)
  968. buffb.append(line)
  969. else:
  970. raise AssertionError("difflib.unified_diff emited"
  971. " unknown format (%s chars):\n%s"
  972. % (len(line), line))
  973. return buffa + buffb
  974. dumpa = jsDump(dira)
  975. dumpb = jsDump(dirb)
  976. udiff = difflib.unified_diff(dumpa.split("\n"), dumpb.split("\n"),
  977. "~/" + namea, "~/" + nameb,
  978. n=10000, lineterm='')
  979. return "\n".join(compress([line for line in udiff]))
  980. class Cartman:
  981. """Create argument sets from ranges (or ay iterators) of values.
  982. This class is to enable easy definition and generation of dictionary
  983. argument sets using Cartesian product. You only need to define:
  984. * structure of argument set (can be more than just flat dict)
  985. * ranges, or arbitrary iterators of values on each "leaf" of the
  986. argument set
  987. Since there is expectation that any argument can have any kind of values
  988. even another iterables, the pure logic "iterate it if you can"
  989. is insufficient. Instead, definition is divided in two parts:
  990. * scheme, which is a "prototype" of a final argument set, except
  991. that for each value that will change, a `Cartman.Iterable`
  992. sentinel is used. For each leaf that is constant, `Cartman.Scalar`
  993. is used
  994. * source, which has the same structure, except that where in scheme
  995. is `Iterable`, an iterable object is expected, whereas in places
  996. where `Scalar` is used, a value is assigned that does not change
  997. during iteration.
  998. Finally, when such instance is used in loop, argument sets are generated
  999. uising Cartesian product of each iterable found. This allows for
  1000. relatively easy definition of complex scenarios.
  1001. Consider this example:
  1002. You have a system (wrapped up in test driver) that takes ''size''
  1003. argument, that is supposed to be ''width'', ''height'' and ''depth'',
  1004. each an integer ranging from 1 to 100, and ''color'' that can
  1005. be "white", "black" or "yellow".
  1006. For a test using all-combinations strategy, you will need to generate
  1007. 100 * 100 * 100 * 3 argument sets, i.e. 3M tests.
  1008. All you need to do is:
  1009. scheme = {
  1010. 'size': {
  1011. 'width': Cartman.Iterable,
  1012. 'height': Cartman.Iterable,
  1013. 'depth': Cartman.Iterable,
  1014. }
  1015. 'color': Cartman.Iterable,
  1016. }
  1017. source = {
  1018. 'size': {
  1019. 'width': range(1, 100),
  1020. 'height': range(1, 100),
  1021. 'depth': range(1, 100),
  1022. }
  1023. 'color': ['white', 'black', 'yellow'],
  1024. }
  1025. c = Cartman(source, scheme)
  1026. for argset in c:
  1027. result = my_test(argset)
  1028. # assert ...
  1029. The main advantage is that you can separate the definition from
  1030. the code, and you can keep yor iterators as big or as small as
  1031. needed, and add / remove values.
  1032. Also in case your parameters vary in structure over time, or from
  1033. one test to another, it gets much easier to keep up with changes
  1034. without much jumping through hoops.
  1035. Note: `Cartman.Scalar` is provided mainly to make your definitions
  1036. more readable. Following constructions are functionally equal:
  1037. c = Cartman({'a': 1}, {'a': Cartman.Scalar})
  1038. c = Cartman({'a': [1]}, {'a': Cartman.Iterable})
  1039. In future, however, this might change, though, mainly in case
  1040. optimization became possible based on what was used.
  1041. """
  1042. # TODO: support for arbitrary ordering (profile / nginx)
  1043. # TODO: implement getstats and fmtstats
  1044. # TODO: N-wise
  1045. class _BaseMark:
  1046. pass
  1047. class Scalar(_BaseMark):
  1048. pass
  1049. class Iterable(_BaseMark):
  1050. pass
  1051. def __init__(self, source, scheme):
  1052. self.source = source
  1053. self.scheme = scheme
  1054. # validate scheme + source and throw useful error
  1055. scheme_ok = isinstance(self.scheme, collections.Mapping)
  1056. source_ok = isinstance(self.source, collections.Mapping)
  1057. if not scheme_ok:
  1058. raise ValueError("scheme must be a mapping (e.g. dict)")
  1059. elif scheme_ok and not source_ok:
  1060. raise ValueError("scheme vs. source mismatch")
  1061. def __deepcopy__(self, memo):
  1062. return Cartman(deepcopy(self.source, memo),
  1063. deepcopy(self.scheme, memo))
  1064. def _is_mark(self, subscheme):
  1065. try:
  1066. return issubclass(subscheme, Cartman._BaseMark)
  1067. except TypeError:
  1068. return False
  1069. def _means_scalar(self, subscheme):
  1070. if self._is_mark(subscheme):
  1071. return issubclass(subscheme, Cartman.Scalar)
  1072. def _means_iterable(self, subscheme):
  1073. if self._is_mark(subscheme):
  1074. return issubclass(subscheme, Cartman.Iterable)
  1075. def _get_iterable_for(self, key):
  1076. subscheme = self.scheme[key]
  1077. subsource = self.source[key]
  1078. if self._means_scalar(subscheme):
  1079. return [subsource]
  1080. elif self._means_iterable(subscheme):
  1081. return subsource
  1082. else: # try to use it as scheme
  1083. return iter(Cartman(subsource, subscheme))
  1084. def __iter__(self):
  1085. names = []
  1086. iterables = []
  1087. for key in self.scheme:
  1088. try:
  1089. iterables.append(self._get_iterable_for(key))
  1090. except KeyError:
  1091. pass # ignore that subsource mentioned by scheme is missing
  1092. else:
  1093. names.append(key)
  1094. for values in itertools.product(*iterables):
  1095. yield dict(zip(names, values))
  1096. def getstats(self):
  1097. return {}
  1098. def fmtstats(self):
  1099. return ""