collection of python libs developed for testing purposes

hoover.py 47KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. # coding=utf-8
  2. import collections
  3. import functools
  4. import csv
  5. import difflib
  6. import hashlib
  7. import inspect
  8. import itertools
  9. import json
  10. import operator
  11. import time
  12. from copy import deepcopy
  13. # ########################################################################### #
  14. # ## The Motor ## #
  15. # ########################################################################### #
  16. def regression_test(argsrc, tests, driver_settings, cleanup_hack=None,
  17. apply_hacks=None, on_next=None):
  18. """Perform regression test with argsets from `argsrc`.
  19. For each argset pulled from source, performs one comparison
  20. per driver pair in `tests`, which is list of tuples with
  21. comparison function and pair of test driver classes: `(operator,
  22. oracle_class, result_class)`. (The classes are assumed to
  23. be sub-classes of `hoover.BaseTestDriver`.)
  24. `driver_settings` is a dictionary supposed to hold environmental
  25. values for all the drivers, the keys having form "DriverName.
  26. settingName". Each driver is then instantiated with this
  27. dict, and gets a copy of the dict with settings only intended
  28. for itself (and the "DriverName" part stripped).
  29. If comparison fails, report is generated using `hoover.jsDiff()`,
  30. and along with affected arguments stored in `hoover.Tracker`
  31. instance, which is finally used as a return value. This instance
  32. then contains method for basic stats as well as method to format
  33. the final report and a helper method to export argument sets
  34. as a CSV files.
  35. Supports hacks, which are a data transformations performed by
  36. `hoover.TinyCase` class and are intended to avoid known bugs
  37. and anomalies (`apply_hacks`) or clean up data structures of
  38. irrelevant data (`cleanup_hack`, performed only if the comparison
  39. function provided along with driver pair is not "equals").
  40. A function can be provided as `on_next` argument, that will be
  41. called after pulling each argument set, with last argument set
  42. (or `None`) as first argument and current one as second argument.
  43. """
  44. # TODO: do not parse driver_settings thousands of times (use a view class?)
  45. on_next = on_next if on_next else lambda a, b: None
  46. apply_hacks = apply_hacks if apply_hacks else []
  47. tracker = Tracker()
  48. last_argset = None
  49. all_classes = set(functools.reduce(
  50. lambda a, b: a+b,
  51. [triple[1:] for triple in tests]
  52. ))
  53. counter = StatCounter()
  54. for argset in argsrc:
  55. on_start = time.time()
  56. on_next(argset, last_argset)
  57. counter.add('on_next', time.time() - on_start)
  58. # # load the data first, only once for each driver
  59. #
  60. data = {}
  61. for aclass in all_classes:
  62. try:
  63. aclass.check_values(argset)
  64. except NotImplementedError: # let them bail out
  65. counter.count_for(aclass, 'bailouts')
  66. else:
  67. data[aclass], duration, overhead = get_data_and_stats(
  68. aclass, argset, driver_settings)
  69. counter.count_for(aclass, 'calls')
  70. counter.add_for(aclass, 'duration', duration)
  71. counter.add_for(aclass, 'overhead', overhead)
  72. for match_op, oclass, rclass in tests:
  73. # skip test if one of classes bailed out on the argset
  74. if oclass not in data or rclass not in data:
  75. continue
  76. diff = None
  77. case = TinyCase({
  78. 'argset': argset,
  79. 'oracle': deepcopy(data[oclass]),
  80. 'result': deepcopy(data[rclass]),
  81. 'oname': oclass.__name__,
  82. 'rname': rclass.__name__
  83. })
  84. hacks_done = sum([case.hack(h) for h in apply_hacks])
  85. counter.add_for(oclass, 'ohacks', hacks_done)
  86. counter.add_for(rclass, 'rhacks', hacks_done)
  87. counter.add('hacks', hacks_done)
  88. counter.add('hacked_cases', (1 if hacks_done else 0))
  89. if not match_op(case['oracle'], case['result']):
  90. # try to clean up so that normally ignored items
  91. # do not clutter up the report
  92. if not match_op == operator.eq:
  93. case.hack(cleanup_hack)
  94. # but panic if that "removed" the error condition
  95. if match_op(case['oracle'], case['result']):
  96. raise RuntimeError("cleanup ate error")
  97. diff = jsDiff(dira=case['oracle'],
  98. dirb=case['result'],
  99. namea=case['oname'],
  100. nameb=case['rname'])
  101. tracker.update(diff, argset)
  102. counter.count('cases')
  103. tracker.argsets_done += 1
  104. last_argset = argset
  105. counter.count('argsets')
  106. tracker.driver_stats = counter.all_stats()
  107. return tracker
  108. def get_data_and_stats(driverClass, argset, driver_settings):
  109. """Run test with given driver"""
  110. start = time.time()
  111. d = driverClass()
  112. d.setup(driver_settings, only_own=True)
  113. d.run(argset)
  114. return (d.data, d.duration, time.time() - d.duration - start)
  115. def get_data(driverClass, argset, driver_settings):
  116. """Run test with given driver"""
  117. d = driverClass()
  118. d.setup(driver_settings, only_own=True)
  119. d.run(argset)
  120. return d.data
  121. # ########################################################################### #
  122. # ## The Pattern ## #
  123. # ########################################################################### #
  124. class _BaseRuleOp:
  125. def __init__(self, items, item_ok):
  126. self._items = items
  127. self._item_ok = item_ok
  128. def _eval(self, item):
  129. try: # it's a pattern! (recurse)
  130. return RuleOp.Match(item, self._item_ok)
  131. except ValueError: # no, it's something else...
  132. return self._item_ok(item)
  133. def __bool__(self):
  134. try:
  135. return self._match()
  136. except TypeError:
  137. raise ValueError("items must be an iterable: %r" % self._items)
  138. class RuleOp:
  139. class ALL(_BaseRuleOp):
  140. def _match(self):
  141. return all(self._eval(item) for item in self._items)
  142. class ANY(_BaseRuleOp):
  143. def _match(self):
  144. return any(self._eval(item) for item in self._items)
  145. @staticmethod
  146. def Match(pattern, item_ok):
  147. """Evaluate set of logically structured patterns using passed function.
  148. pattern has form of `(op, [item1, item2, ...])` where op can be any of
  149. pre-defined logical operators (`ALL`/`ANY`, I doubt you will ever need
  150. more) and item_ok is a function that will be used to evaluate each one
  151. in the list. In case an itemN is actually pattern as well, it will be
  152. recursed into, passing the item_ok on and on.
  153. Note that there is no data to evaluate "against", you can use closure
  154. if you need to do that.
  155. """
  156. try:
  157. op, items = pattern
  158. except TypeError:
  159. raise ValueError("pattern is not a tuple: %r" % pattern)
  160. try:
  161. assert issubclass(op, _BaseRuleOp)
  162. except TypeError:
  163. raise ValueError("invalid operator: %r" % op)
  164. except AssertionError:
  165. raise ValueError("invalid operator class: %s" % op.__name__)
  166. return bool(op(items, item_ok))
  167. # ########################################################################### #
  168. # ## The Path ## #
  169. # ########################################################################### #
  170. class DictPath:
  171. """Mixin that adds "path-like" behavior to the top dict of dicts.
  172. Use this class as a mixin for a deep dic-like structure and you can access
  173. the elements using a path. For example:
  174. MyData(dict, DictPath):
  175. pass
  176. d = MyData({
  177. 'name': 'Joe',
  178. 'age': 34,
  179. 'ssn': {
  180. 'number': '012 345 678',
  181. 'expires': '10-01-16',
  182. },
  183. })
  184. print ("%s's ssn number %s will expire on %s"
  185. % (d.getpath('/name'),
  186. d.getpath('/ssn/number'),
  187. d.getpath('/ssn/expiry')))
  188. # joe's ssn number 012 345 678 will expire 10-01-16
  189. """
  190. DIV = "/"
  191. class Path:
  192. def __init__(self, path, div):
  193. self.DIV = div
  194. self._path = path
  195. def _validate(self):
  196. try:
  197. assert self._path.startswith(self.DIV)
  198. except (AttributeError, AssertionError):
  199. raise ValueError("invalid path: %r" % self._path)
  200. def stripped(self):
  201. return self._path.lstrip(self.DIV)
  202. @classmethod
  203. def __s2path(cls, path):
  204. return cls.Path(path, cls.DIV)
  205. @classmethod
  206. def __err_path_not_found(cls, path):
  207. raise KeyError("path not found: %s" % path)
  208. @classmethod
  209. def __getitem(cls, dct, key):
  210. if cls.DIV in key:
  211. frag, rest = key.split(cls.DIV, 1)
  212. subdct = dct[frag]
  213. result = cls.__getitem(subdct, rest)
  214. else:
  215. result = dct[key]
  216. return result
  217. @classmethod
  218. def __setitem(cls, dct, key, value):
  219. if cls.DIV not in key:
  220. dct[key] = value
  221. else:
  222. frag, rest = key.split(cls.DIV, 1)
  223. subdct = dct[frag]
  224. cls.__setitem(subdct, rest, value)
  225. @classmethod
  226. def __delitem(cls, dct, key):
  227. if cls.DIV not in key:
  228. del dct[key]
  229. else:
  230. frag, rest = key.split(cls.DIV, 1)
  231. subdct = dct[frag]
  232. return cls.__delitem(subdct, rest)
  233. # # public methods
  234. #
  235. def getpath(self, path):
  236. try:
  237. return self.__getitem(self, self.__s2path(path).stripped())
  238. except (TypeError, KeyError):
  239. self.__err_path_not_found(path)
  240. def setpath(self, path, value):
  241. try:
  242. self.__setitem(self, self.__s2path(path).stripped(), value)
  243. except (TypeError, KeyError):
  244. self.__err_path_not_found(path)
  245. def delpath(self, path):
  246. try:
  247. self.__delitem(self, self.__s2path(path).stripped())
  248. except (TypeError, KeyError):
  249. self.__err_path_not_found(path)
  250. def ispath(self, path):
  251. try:
  252. self.getpath(path)
  253. return True
  254. except KeyError:
  255. return False
  256. # ########################################################################### #
  257. # ## The Case ## #
  258. # ########################################################################### #
  259. class TinyCase(dict, DictPath):
  260. """Abstraction of the smallest unit of testing.
  261. This class is intended to hold relevant data after the actual test
  262. and apply transformations (hacks) as defined by rules.
  263. The data form (self) is:
  264. {
  265. 'argset': {}, # argset as fed into `BaseTestDriver.run`
  266. 'oracle': {}, # data as returned from oracle driver's `run()`
  267. 'result': {}, # data as returned from result driver's `run()`
  268. 'oname': "", # name of oracle driver's class
  269. 'rname': "" # name of result driver's class
  270. }
  271. The transformation is done using the `TinyCase.hack()` method to which
  272. a list of rules is passed. Each rule is applied, and rules are expected
  273. to be in a following form:
  274. {
  275. 'drivers': [{}], # list of structures to match against self
  276. 'argsets': [{}], # -ditto-
  277. 'action_name': <Arg> # an action name with argument
  278. }
  279. For each of patterns ('drivers', argsets') present, match against self
  280. is done using function `hoover.dataMatch`, which is basically a recursive
  281. test if the pattern is a subset of the case. If none of results is
  282. negative (i.e. both patterns missing results in match), any known actions
  283. included in the rule are called. Along with action name a list or a dict
  284. providing necessary parameters is expected: this is simply passed as only
  285. parameter to corresponding method.
  286. Actions use specific way how to address elements in the structures
  287. saved in the oracle and result keys provided by `DictPath`, which makes
  288. it easy to define rules for arbitrarily complex dictionary structures.
  289. The format resembles to Unix path, where "directories" are dict
  290. keys and "root" is the `self` of the `TinyCase` instance:
  291. /oracle/temperature
  292. /result/stats/word_count
  293. Refer to each action's docstring for descriprion of their function
  294. as well as expected format of argument. The name of action as used
  295. in the reule is the name of method without leading 'a_'.
  296. Warning: All actions will silently ignore any paths that are invalid
  297. or leading to non-existent data!
  298. (This does not apply to a path leading to `None`.)
  299. """
  300. def a_exchange(self, action):
  301. """Exchange value A for value B.
  302. Expects a dict, where key is a tuple of two values `(a, b)` and
  303. value is a list of paths. For each key, it goes through the
  304. paths and if the value equals `a` it is set to `b`.
  305. """
  306. for (oldv, newv), paths in action.items():
  307. for path in paths:
  308. try:
  309. curv = self.getpath(path)
  310. except KeyError:
  311. continue
  312. else:
  313. if curv == oldv:
  314. self.setpath(path, newv)
  315. def a_format_str(self, action):
  316. """Convert value to a string using format string.
  317. Expects a dict, where key is a format string, and value is a list
  318. of paths. For each record, the paths are traversed, and value is
  319. converted to string using the format string and the `%` operator.
  320. This is especially useful for floats which you may want to trim
  321. before comparison, since direct comparison of floats is unreliable
  322. on some architectures.
  323. """
  324. for fmt, paths in action.items():
  325. for path in paths:
  326. if self.ispath(path):
  327. new = fmt % self.getpath(path)
  328. self.setpath(path, new)
  329. def a_even_up(self, action):
  330. """Even up structure of both dictionaries.
  331. Expects a list of two-element tuples `('/dict/a', '/dict/b')`
  332. containing pairs of path do simple dictionaries.
  333. Then the two dicts are altered to have same structure: if a key
  334. in dict "a" is missing in dict "b", it is set to `None` in "b" and
  335. vice-versa,
  336. """
  337. for patha, pathb in action:
  338. try:
  339. a = self.getpath(patha)
  340. b = self.getpath(pathb)
  341. except KeyError:
  342. continue
  343. else:
  344. for key in set(a.keys()) | set(b.keys()):
  345. if key in a and key in b:
  346. pass # nothing to do here
  347. elif key in a and a[key] is None:
  348. b[key] = None
  349. elif key in b and b[key] is None:
  350. a[key] = None
  351. else:
  352. pass # bailout: odd key but value is *not* None
  353. def a_remove(self, action):
  354. """Remove elements from structure.
  355. Expects a simple list of paths that are simply deleted fro, the
  356. structure.
  357. """
  358. for path in action:
  359. if self.ispath(path):
  360. self.delpath(path)
  361. def a_round(self, action):
  362. """Round a (presumably) float using tha `float()` built-in.
  363. Expects dict with precision (ndigits, after the dot) as a key and
  364. list of paths as value.
  365. """
  366. for ndigits, paths in action.items():
  367. for path in paths:
  368. try:
  369. f = self.getpath(path)
  370. except KeyError:
  371. pass
  372. else:
  373. self.setpath(path, round(f, ndigits))
  374. known_actions = {'remove': a_remove,
  375. 'even_up': a_even_up,
  376. 'format_str': a_format_str,
  377. 'exchange': a_exchange,
  378. 'round': a_round}
  379. def hack(self, ruleset):
  380. """Apply action from each rule, if patterns match."""
  381. def driver_matches(rule):
  382. if 'drivers' not in rule:
  383. return True
  384. else:
  385. return any(dataMatch(p, self)
  386. for p in rule['drivers'])
  387. def argset_matches(rule):
  388. if 'argsets' not in rule:
  389. return True
  390. else:
  391. return any(dataMatch(p, self)
  392. for p in rule['argsets'])
  393. matched = False
  394. cls = self.__class__
  395. for rule in ruleset:
  396. if driver_matches(rule) and argset_matches(rule):
  397. matched = True
  398. for action_name in cls.known_actions:
  399. if action_name in rule:
  400. cls.known_actions[action_name](self, rule[action_name])
  401. return matched
  402. # ########################################################################### #
  403. # ## Drivers ## #
  404. # ########################################################################### #
  405. class DriverError(Exception):
  406. """Error encountered when obtaining driver data"""
  407. def __init__(self, message, driver):
  408. self.message = message
  409. self.driver = driver
  410. def __str__(self):
  411. result = ("\n\n"
  412. " type: %s\n"
  413. " message: %s\n"
  414. " driver: %s\n"
  415. " args: %s\n"
  416. " settings: %s\n"
  417. % (self.message.__class__.__name__,
  418. self.message,
  419. self.driver.__class__.__name__,
  420. self.driver._args,
  421. self.driver._settings))
  422. return result
  423. class DriverDataError(Exception):
  424. """Error encountered when decoding or normalizing driver data"""
  425. def __init__(self, exception, driver):
  426. self.exception = exception
  427. self.driver = driver
  428. def __str__(self):
  429. result = ("%s: %s\n"
  430. " class: %s\n"
  431. " args: %s\n"
  432. " data: %s\n"
  433. % (self.exception.__class__.__name__, self.exception,
  434. self.driver.__class__.__name__,
  435. json.dumps(self.driver._args, sort_keys=True, indent=4),
  436. json.dumps(self.driver.data, sort_keys=True, indent=4)))
  437. return result
  438. class BaseTestDriver:
  439. """Base class for test drivers used by `hoover.regression_test` and others.
  440. This class is used to create a test driver, which is an abstraction
  441. and encapsulation of the system being tested. Or, the driver in fact
  442. can be just a "mock" driver that provides data for comparison with
  443. a "real" driver.
  444. The minimum you need to create a working driver is to implement a working
  445. `self._get_data` method that sets `self.data`. Any exception from this
  446. method will be re-raised as DriverError with additional information.
  447. Also, you can set self.duration (in fractional seconds, as returned by
  448. standard time module) in the _get_data method, but if you don't, it is
  449. measured for you as time the method call took. This is useful if you
  450. need to fetch the data from some other driver or a gateway, and you
  451. have better mechanism to determine how long the action would take "in
  452. real life".
  453. For example, if we are testing a Java library using a Py4J gateway,
  454. we need to do some more conversions outside our testing code just to
  455. be able to use the data in our Python test. We don't want to include
  456. this in the "duration", since we are measuring the Java library, not the
  457. Py4J GW (or our ability to perform the conversions optimally). So we
  458. do our measurement within the Java machine and pass the result to the
  459. Python driver.
  460. Optionally, you can:
  461. * Make an __init__ and after calling base __init__, set
  462. * `self._mandatory_args`, a list of keys that need to be present
  463. in `args` argument to `run()`
  464. * and `self._mandatory_settings`, a list of keys that need to be
  465. present in the `settings` argument to `__init__`
  466. * implement methods
  467. * `_decode_data` and `_normalize_data`, which are intended to decode
  468. the data from any raw format it is received, and to prepare it
  469. for comparison in test,
  470. * and `_check_data`, to allow for early detection of failure,
  471. from which any exception is re-raised as a DriverDataError with
  472. some additional info
  473. * set "bailouts", a list of functions which, when passed "args"
  474. argument, return true to indicate that driver is not able to
  475. process these values (see below for explanation). If any of
  476. these functions returns true, NotImplementedError is raised.
  477. The expected workflow when using the driver is:
  478. # 1. sub-class hoover.BaseTestDriver
  479. # 2. prepare settings and args
  480. MyDriver.check_values(args) # optional, to force bailouts ASAP
  481. d = MyDriver()
  482. d.setup(settings)
  483. d.run(args)
  484. assert d.data, "no data" # evaluate the result...
  485. assert d.duration < 1 # duration of _get_data in seconds
  486. Note on bailouts: Typical strategy for which the driver is intended is
  487. that each possible combination of `args` is exhausted, and results from
  488. multiple drivers are compared to evaluate if driver, i.e. system in
  489. question is O.K.
  490. The bailouts mechanism is useful in cases, where for a certain system,
  491. a valid combination of arguments would bring the same result as another,
  492. so there is basically no value in testing both of them.
  493. Example might be a system that does not support a binary flag and
  494. behaves as if it was "on": you can simply make the test driver
  495. accept the option but "bail out" any time it is "off", therefore
  496. skipping the time-and-resource-consuming test.
  497. """
  498. bailouts = []
  499. ##
  500. # internal methods
  501. #
  502. def __init__(self):
  503. self.data = {}
  504. self.duration = None
  505. self._args = {}
  506. self._mandatory_args = []
  507. self._mandatory_settings = []
  508. self._settings = {}
  509. self._setup_ok = False
  510. def __check_mandatory(self):
  511. """validate before run()"""
  512. for key in self._mandatory_args:
  513. assert key in self._args, "missing arg: '%s'" % key
  514. for key in self._mandatory_settings:
  515. assert key in self._settings, "missing setting: '%s'" % key
  516. def __cleanup_data(self):
  517. """remove hidden data; e.g. what was only there for _check_data"""
  518. for key in self.data:
  519. if key.startswith("_"):
  520. del self.data[key]
  521. ##
  522. # virtual methods
  523. #
  524. def _check_data(self):
  525. """Early check for failure"""
  526. pass
  527. def _decode_data(self):
  528. """Decode from raw data as brought by _get_data"""
  529. pass
  530. def _normalize_data(self):
  531. """Preare data for comparison (e.g. sort, split, trim...)"""
  532. pass
  533. ##
  534. # public methods
  535. #
  536. @classmethod
  537. def check_values(cls, args=None):
  538. """check args in advance before running or setting up anything"""
  539. for fn in cls.bailouts:
  540. if fn(args):
  541. raise NotImplementedError(inspect.getsource(fn))
  542. def setup(self, settings, only_own=False):
  543. """Load settings. only_own means that only settings that belong to us
  544. are loaded ("DriverClass.settingName", the first discriminating part
  545. is removed)"""
  546. if only_own:
  547. for ckey in settings:
  548. driver_class_name, setting_name = ckey.split(".", 2)
  549. if self.__class__.__name__ == driver_class_name:
  550. self._settings[setting_name] = settings[ckey]
  551. else:
  552. self._settings = settings
  553. self._setup_ok = True
  554. def run(self, args):
  555. """validate, run and store data"""
  556. self._args = args
  557. assert self._setup_ok, "run() before setup()?"
  558. self.__class__.check_values(self._args)
  559. self.__check_mandatory()
  560. start = time.time()
  561. try:
  562. self._get_data() # run the test, i.e. obtain raw data
  563. except Exception as e:
  564. raise DriverError(e, self)
  565. self.duration = (time.time() - start if self.duration is None
  566. else self.duration)
  567. try:
  568. self._decode_data() # decode raw data
  569. self._normalize_data() # normalize decoded data
  570. self._check_data() # perform arbitrarty checking
  571. except Exception as e:
  572. raise DriverDataError(e, self)
  573. self.__cleanup_data() # cleanup (remove data['_*'])
  574. class MockDriverTrue(BaseTestDriver):
  575. """A simple mock driver, always returning True"""
  576. def _get_data(self, args):
  577. self.data = True
  578. # ########################################################################### #
  579. # ## Helpers ## #
  580. # ########################################################################### #
  581. class StatCounter:
  582. """A simple counter with formulas support."""
  583. def __init__(self):
  584. self.generic_stats = {}
  585. self.driver_stats = {}
  586. self.formulas = {}
  587. self._born = time.time()
  588. def _register(self, dname):
  589. self.driver_stats[dname] = {
  590. 'calls': 0,
  591. 'rhacks': 0,
  592. 'ohacks': 0,
  593. 'duration': 0,
  594. 'overhead': 0
  595. }
  596. ##
  597. # Formulas
  598. #
  599. # cumulative duration/overhead; just round to ms
  600. self.add_formula(dname + '_overhead',
  601. lambda g, d: int(1000 * d[dname]['overhead']))
  602. self.add_formula(dname + '_duration',
  603. lambda g, d: int(1000 * d[dname]['duration']))
  604. # average (per driver call) overhead/duration
  605. self.add_formula(
  606. dname + '_overhead_per_call',
  607. lambda g, d: int(1000 * d[dname]['overhead'] / d[dname]['calls'])
  608. )
  609. self.add_formula(
  610. dname + '_duration_per_call',
  611. lambda g, d: int(1000 * d[dname]['duration'] / d[dname]['calls'])
  612. )
  613. def gtotal_drivertime(g, d):
  614. driver_time = (sum(s['overhead'] for s in d.values())
  615. + sum(s['duration'] for s in d.values()))
  616. return int(1000 * driver_time)
  617. def gtotal_loop_overhead(g, d):
  618. driver_time = gtotal_drivertime(g, d)
  619. onnext_time = int(1000 * g['on_next'])
  620. age = int(1000 * (time.time() - self._born))
  621. return age - driver_time - onnext_time
  622. # grand totals in times: driver time, loop overhead
  623. self.add_formula('gtotal_drivertime', gtotal_drivertime)
  624. self.add_formula('gtotal_loop_overhead', gtotal_loop_overhead)
  625. self.add_formula('gtotal_loop_onnext',
  626. lambda g, d: int(1000 * g['on_next']))
  627. # average (per driver call) overhead/duration
  628. self.add_formula(
  629. 'cases_hacked',
  630. lambda g, d: round(100 * float(g['hacked_cases']) / g['cases'], 2)
  631. )
  632. def _computed_stats(self):
  633. computed = dict.fromkeys(self.formulas)
  634. for fname, fml in self.formulas.items():
  635. try:
  636. v = fml(self.generic_stats, self.driver_stats)
  637. except ZeroDivisionError:
  638. v = None
  639. computed[fname] = v
  640. return computed
  641. def add_formula(self, vname, formula):
  642. """Add a function to work with generic_stats, driver_stats."""
  643. self.formulas[vname] = formula
  644. def add(self, vname, value):
  645. """Add a value to generic stat counter."""
  646. if vname in self.generic_stats:
  647. self.generic_stats[vname] += value
  648. else:
  649. self.generic_stats[vname] = value
  650. def add_for(self, dclass, vname, value):
  651. """Add a value to driver stat counter."""
  652. dname = dclass.__name__
  653. if dname not in self.driver_stats:
  654. self._register(dname)
  655. if vname in self.driver_stats[dname]:
  656. self.driver_stats[dname][vname] += value
  657. else:
  658. self.driver_stats[dname][vname] = value
  659. def count(self, vname):
  660. """Alias to add(vname, 1)"""
  661. self.add(vname, 1)
  662. def count_for(self, dclass, vname):
  663. """Alias to add_for(vname, 1)"""
  664. self.add_for(dclass, vname, 1)
  665. def all_stats(self):
  666. """Compute stats from formulas and add them to colledted data."""
  667. stats = self.generic_stats
  668. for dname, dstats in self.driver_stats.items():
  669. for key, value in dstats.items():
  670. stats[dname + "_" + key] = value
  671. stats.update(self._computed_stats())
  672. return stats
  673. class Tracker(dict):
  674. """Error tracker to allow for usable reports from huge regression tests.
  675. Best used as a result bearer from `regression_test`, this class keeps
  676. a simple in-memory "database" of errors seen during the regression
  677. test, and implements few methods to access the data.
  678. The basic usage is:
  679. 1. Instantiate (no parameters)
  680. 2. Each time you have a result of a test, you pass it to `update()`
  681. method along with the argument set (as a single object, typically
  682. a dict) that caused the error.
  683. If boolean value of the result is False, the object is thrown away
  684. and nothing happen. Otherwise, its string value is used as a key
  685. under which the argument set is saved.
  686. As you can see, the string is supposed to be ''as deterministic
  687. as possible'', i.e. it should provide as little information
  688. about the error as is necessary. Do not include any timestamps
  689. or "volatile" values.
  690. 3. At final stage, you can retrieve statistics as how many (distinct)
  691. errors have been recorded, what was the duration of the whole test,
  692. how many times `update()` was called, etc.
  693. 4. Optionally, you can also call `format_report()` to get a nicely
  694. formatted report with list of arguments for each error string.
  695. 5. Since in bigger tests, argument lists can grow really large,
  696. complete lists are not normally printed. Instead, you can use
  697. `write_stats_csv()`, which will create one CSV per each error,
  698. named as first 7 chars of its SHA1 (inspired by Git).
  699. Note that you need to pass an existing writable folder path.
  700. """
  701. ##
  702. # internal methods
  703. #
  704. def __init__(self):
  705. self._start = time.time()
  706. self._db = {}
  707. self.tests_done = 0
  708. self.tests_passed = 0
  709. self.argsets_done = 0
  710. self.driver_stats = {}
  711. def _csv_fname(self, errstr, prefix):
  712. """Format name of file for this error string"""
  713. return '%s/%s.csv' % (prefix, self._eid(errstr))
  714. def _eid(self, errstr):
  715. """Return EID for the error string (first 7 chars of SHA1)."""
  716. return hashlib.sha1(errstr).hexdigest()[:7]
  717. def _insert(self, errstr, argset):
  718. """Insert the argset into DB."""
  719. if errstr not in self._db:
  720. self._db[errstr] = []
  721. self._db[errstr].append(argset)
  722. def _format_error(self, errstr, max_aa=0):
  723. """Format single error for output."""
  724. argsets_affected = self._db[errstr]
  725. num_aa = len(argsets_affected)
  726. # trim if list is too long for Jenkins
  727. argsets_shown = argsets_affected
  728. if max_aa and (num_aa > max_aa):
  729. div = ["[...] not showing %s cases, see %s.csv for full list"
  730. % (num_aa - max_aa, self._eid(errstr))]
  731. argsets_shown = argsets_affected[0:max_aa] + div
  732. # format error
  733. formatted_aa = "\n".join([str(arg) for arg in argsets_shown])
  734. return ("~~~ ERROR FOUND (%s) ~~~~~~~~~~~~~~~~~~~~~~~~~\n"
  735. "--- error string: -----------------------------------\n%s\n"
  736. "--- argsets affected (%d) ---------------------------\n%s\n"
  737. % (self._eid(errstr), errstr, num_aa, formatted_aa))
  738. ##
  739. # public methods
  740. #
  741. def errors_found(self):
  742. """Return number of non-distinct errors in db."""
  743. return bool(self._db)
  744. def format_report(self, max_aa=0):
  745. """Return complete report formatted as string."""
  746. error_list = "\n".join([self._format_error(e, max_aa=max_aa)
  747. for e in self._db])
  748. return ("Found %(total_errors)s (%(distinct_errors)s distinct) errors"
  749. " in %(tests_done)s tests with %(argsets)s argsets"
  750. " (duration: %(time)ss):"
  751. % self.getstats()
  752. + "\n\n" + error_list)
  753. def getstats(self):
  754. """Return basic and driver stats
  755. argsets_done - this should must be raised by outer code,
  756. once per each unique argset
  757. tests_done - how many times Tracker.update() was called
  758. distinct_errors - how many distinct errors (same `str(error)`)
  759. were seen by Tracker.update()
  760. total_errors - how many times `Tracker.update()` saw an
  761. error, i.e. how many argsets are in DB
  762. time - how long since init (seconds)
  763. """
  764. def total_errors():
  765. return functools.reduce(
  766. lambda x, y: x + len(y),
  767. self._db.values(),
  768. initial=0,
  769. )
  770. stats = {
  771. "argsets": self.argsets_done,
  772. "tests_done": self.tests_done,
  773. "distinct_errors": len(self._db),
  774. "total_errors": total_errors(),
  775. "time": int(time.time() - self._start)
  776. }
  777. stats.update(self.driver_stats)
  778. return stats
  779. def update(self, error, argset):
  780. """Update tracker with test result.
  781. If `bool(error)` is true, it is considered error and argset
  782. is inserted to DB with `str(error)` as key. This allows for later
  783. sorting and analysis.
  784. """
  785. self.tests_done += 1
  786. if error:
  787. errstr = str(error)
  788. self._insert(errstr, argset)
  789. def write_stats_csv(self, fname):
  790. """Write stats to a simple one row (plus header) CSV."""
  791. stats = self.getstats()
  792. colnames = sorted(stats.keys())
  793. with open(fname, 'a') as fh:
  794. cw = csv.DictWriter(fh, colnames)
  795. cw.writerow(dict(zip(colnames, colnames))) # header
  796. cw.writerow(stats)
  797. def write_args_csv(self, prefix=''):
  798. """Write out a set of CSV files, one per distinctive error.
  799. Each CSV is named with error EID (first 7 chars of SHA1) and lists
  800. all argument sets affected by this error. This is supposed to make
  801. easier to further analyse impact and trigerring values of errors,
  802. perhaps using a table processor software."""
  803. def get_all_colnames():
  804. cn = {}
  805. for affected in self._db.values():
  806. for argset in affected:
  807. cn.update(dict.fromkeys(argset))
  808. return sorted(cn.keys())
  809. all_colnames = get_all_colnames()
  810. for errstr in self._db:
  811. with open(self._csv_fname(errstr, prefix), 'a') as fh:
  812. cw = csv.DictWriter(fh, all_colnames)
  813. cw.writerow(dict(zip(all_colnames, all_colnames))) # header
  814. for argset in self._db[errstr]:
  815. cw.writerow(argset)
  816. def dataMatch(pattern, data, rmax=10, _r=0):
  817. """Check if data structure matches a pattern data structure.
  818. Supports lists, dictionaries and scalars (int, float, string).
  819. For scalars, simple `==` is used. Lists are converted to sets and
  820. "to match" means "to have a matching subset (e.g. `[1, 2, 3, 4]`
  821. matches `[3, 2]`). Both lists and dictionaries are matched recursively.
  822. """
  823. def listMatch(pattern, data):
  824. """Match list-like objects"""
  825. assert all([hasattr(o, 'append') for o in [pattern, data]])
  826. results = []
  827. for pv in pattern:
  828. if any([dataMatch(pv, dv, _r=_r+1) for dv in data]):
  829. results.append(True)
  830. else:
  831. results.append(False)
  832. return all(results)
  833. def dictMatch(pattern, data):
  834. """Match dict-like objects"""
  835. assert all([hasattr(o, 'iteritems') for o in [pattern, data]])
  836. results = []
  837. try:
  838. for pk, pv in pattern.items():
  839. results.append(dataMatch(pv, data[pk], _r=_r+1))
  840. except KeyError:
  841. results.append(False)
  842. return all(results)
  843. if _r == rmax:
  844. raise RuntimeError("recursion limit hit")
  845. result = None
  846. if pattern == data:
  847. result = True
  848. else:
  849. for handler in [dictMatch, listMatch]:
  850. try:
  851. result = handler(pattern, data)
  852. except AssertionError:
  853. continue
  854. return result
  855. def jsDump(data):
  856. """A human-readable JSON dump."""
  857. return json.dumps(data, sort_keys=True, indent=4,
  858. separators=(',', ': '))
  859. def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
  860. """JSON-based human-readable diff of two data structures.
  861. '''BETA''' version.
  862. jsDiff is based on unified diff of two human-readable JSON dumps except
  863. that instead of showing line numbers and context based on proximity to
  864. the changed lines, it prints only context important from the data
  865. structure point.
  866. The goal is to be able to quickly tell the story of what has changed
  867. where in the structure, no matter size and complexity of the data set.
  868. For example:
  869. a = {
  870. 'w': {1: 2, 3: 4},
  871. 'x': [1, 2, 3],
  872. 'y': [3, 1, 2]
  873. }
  874. b = {
  875. 'w': {1: 2, 3: 4},
  876. 'x': [1, 1, 3],
  877. 'y': [3, 1, 3]
  878. }
  879. print jsDiff(a, b)
  880. will output:
  881. aaa ~/A
  882. "x": [
  883. a 2,
  884. "y": [
  885. a 2
  886. bbb ~/B
  887. "x": [
  888. b 1,
  889. "y": [
  890. b 3
  891. Notice that the final output somehow resembles the traditional unified
  892. diff, so to avoid confusion, +/- is changed to a/b (the characters can
  893. be provided as well as the names A/B).
  894. """
  895. def compress(lines):
  896. def is_body(line):
  897. return line.startswith(("-", "+", " "))
  898. def is_diff(line):
  899. return line.startswith(("-", "+"))
  900. def is_diffA(line):
  901. return line.startswith("-")
  902. def is_diffB(line):
  903. return line.startswith("+")
  904. def is_context(line):
  905. return line.startswith(" ")
  906. def is_hdr(line):
  907. return line.startswith(("@@", "---", "+++"))
  908. def is_hdr_hunk(line):
  909. return line.startswith("@@")
  910. def is_hdr_A(line):
  911. return line.startswith("---")
  912. def is_hdr_B(line):
  913. return line.startswith("+++")
  914. class Level:
  915. def __init__(self, hint):
  916. self.hint = hint
  917. self.hinted = False
  918. def __str__(self):
  919. return str(self.hint)
  920. def get_hint(self):
  921. if not self.hinted:
  922. self.hinted = True
  923. return self.hint
  924. class ContextTracker:
  925. def __init__(self):
  926. self.trace = []
  927. self.last_line = None
  928. self.last_indent = -1
  929. def indent_of(self, line):
  930. meat = line[1:].lstrip(" ")
  931. ind = len(line) - len(meat) - 1
  932. return ind
  933. def check(self, line):
  934. indent = self.indent_of(line)
  935. if indent > self.last_indent:
  936. self.trace.append(Level(self.last_line))
  937. elif indent < self.last_indent:
  938. self.trace.pop()
  939. self.last_line = line
  940. self.last_indent = indent
  941. def get_hint(self):
  942. return self.trace[-1].get_hint()
  943. buffa = []
  944. buffb = []
  945. ct = ContextTracker()
  946. for line in lines:
  947. if is_hdr_hunk(line):
  948. continue
  949. elif is_hdr_A(line):
  950. line = line.replace("---", chara * 3, 1)
  951. buffa.insert(0, line)
  952. elif is_hdr_B(line):
  953. line = line.replace("+++", charb * 3, 1)
  954. buffb.insert(0, line)
  955. elif is_body(line):
  956. ct.check(line)
  957. if is_diff(line):
  958. hint = ct.get_hint()
  959. if hint:
  960. buffa.append(hint)
  961. buffb.append(hint)
  962. if is_diffA(line):
  963. line = line.replace("-", chara, 1)
  964. buffa.append(line)
  965. elif is_diffB(line):
  966. line = line.replace("+", charb, 1)
  967. buffb.append(line)
  968. else:
  969. raise AssertionError("difflib.unified_diff emited"
  970. " unknown format (%s chars):\n%s"
  971. % (len(line), line))
  972. return buffa + buffb
  973. dumpa = jsDump(dira)
  974. dumpb = jsDump(dirb)
  975. udiff = difflib.unified_diff(dumpa.split("\n"), dumpb.split("\n"),
  976. "~/" + namea, "~/" + nameb,
  977. n=10000, lineterm='')
  978. return "\n".join(compress([line for line in udiff]))
  979. class Cartman:
  980. """Create argument sets from ranges (or ay iterators) of values.
  981. This class is to enable easy definition and generation of dictionary
  982. argument sets using Cartesian product. You only need to define:
  983. * structure of argument set (can be more than just flat dict)
  984. * ranges, or arbitrary iterators of values on each "leaf" of the
  985. argument set
  986. Since there is expectation that any argument can have any kind of values
  987. even another iterables, the pure logic "iterate it if you can"
  988. is insufficient. Instead, definition is divided in two parts:
  989. * scheme, which is a "prototype" of a final argument set, except
  990. that for each value that will change, a `Cartman.Iterable`
  991. sentinel is used. For each leaf that is constant, `Cartman.Scalar`
  992. is used
  993. * source, which has the same structure, except that where in scheme
  994. is `Iterable`, an iterable object is expected, whereas in places
  995. where `Scalar` is used, a value is assigned that does not change
  996. during iteration.
  997. Finally, when such instance is used in loop, argument sets are generated
  998. uising Cartesian product of each iterable found. This allows for
  999. relatively easy definition of complex scenarios.
  1000. Consider this example:
  1001. You have a system (wrapped up in test driver) that takes ''size''
  1002. argument, that is supposed to be ''width'', ''height'' and ''depth'',
  1003. each an integer ranging from 1 to 100, and ''color'' that can
  1004. be "white", "black" or "yellow".
  1005. For a test using all-combinations strategy, you will need to generate
  1006. 100 * 100 * 100 * 3 argument sets, i.e. 3M tests.
  1007. All you need to do is:
  1008. scheme = {
  1009. 'size': {
  1010. 'width': Cartman.Iterable,
  1011. 'height': Cartman.Iterable,
  1012. 'depth': Cartman.Iterable,
  1013. }
  1014. 'color': Cartman.Iterable,
  1015. }
  1016. source = {
  1017. 'size': {
  1018. 'width': range(1, 100),
  1019. 'height': range(1, 100),
  1020. 'depth': range(1, 100),
  1021. }
  1022. 'color': ['white', 'black', 'yellow'],
  1023. }
  1024. c = Cartman(source, scheme)
  1025. for argset in c:
  1026. result = my_test(argset)
  1027. # assert ...
  1028. The main advantage is that you can separate the definition from
  1029. the code, and you can keep yor iterators as big or as small as
  1030. needed, and add / remove values.
  1031. Also in case your parameters vary in structure over time, or from
  1032. one test to another, it gets much easier to keep up with changes
  1033. without much jumping through hoops.
  1034. Note: `Cartman.Scalar` is provided mainly to make your definitions
  1035. more readable. Following constructions are functionally equal:
  1036. c = Cartman({'a': 1}, {'a': Cartman.Scalar})
  1037. c = Cartman({'a': [1]}, {'a': Cartman.Iterable})
  1038. In future, however, this might change, though, mainly in case
  1039. optimization became possible based on what was used.
  1040. """
  1041. # TODO: support for arbitrary ordering (profile / nginx)
  1042. # TODO: implement getstats and fmtstats
  1043. # TODO: N-wise
  1044. class _BaseMark:
  1045. pass
  1046. class Scalar(_BaseMark):
  1047. pass
  1048. class Iterable(_BaseMark):
  1049. pass
  1050. def __init__(self, source, scheme, recursion_limit=10, _r=0):
  1051. self.source = source
  1052. self.scheme = scheme
  1053. self.recursion_limit = recursion_limit
  1054. self._r = _r
  1055. if self._r > self.recursion_limit:
  1056. raise RuntimeError("recursion limit exceeded")
  1057. # validate scheme + source and throw useful error
  1058. scheme_ok = isinstance(self.scheme, collections.Mapping)
  1059. source_ok = isinstance(self.source, collections.Mapping)
  1060. if not scheme_ok:
  1061. raise ValueError("scheme must be a mapping (e.g. dict)")
  1062. elif scheme_ok and not source_ok:
  1063. raise ValueError("scheme vs. source mismatch")
  1064. def __deepcopy__(self, memo):
  1065. return Cartman(deepcopy(self.source, memo),
  1066. deepcopy(self.scheme, memo))
  1067. def _is_mark(self, subscheme):
  1068. try:
  1069. return issubclass(subscheme, Cartman._BaseMark)
  1070. except TypeError:
  1071. return False
  1072. def _means_scalar(self, subscheme):
  1073. if self._is_mark(subscheme):
  1074. return issubclass(subscheme, Cartman.Scalar)
  1075. def _means_iterable(self, subscheme):
  1076. if self._is_mark(subscheme):
  1077. return issubclass(subscheme, Cartman.Iterable)
  1078. def _get_iterable_for(self, key):
  1079. subscheme = self.scheme[key]
  1080. subsource = self.source[key]
  1081. if self._means_scalar(subscheme):
  1082. return [subsource]
  1083. elif self._means_iterable(subscheme):
  1084. return subsource
  1085. else: # try to use it as scheme
  1086. return iter(Cartman(subsource, subscheme, _r=self._r+1))
  1087. def __iter__(self):
  1088. names = []
  1089. iterables = []
  1090. for key in self.scheme:
  1091. try:
  1092. iterables.append(self._get_iterable_for(key))
  1093. except KeyError:
  1094. pass # ignore that subsource mentioned by scheme is missing
  1095. else:
  1096. names.append(key)
  1097. for values in itertools.product(*iterables):
  1098. yield dict(zip(names, values))
  1099. def getstats(self):
  1100. return {}
  1101. def fmtstats(self):
  1102. return ""