collection of python libs developed for testing purposes

hoover.py 47KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391
  1. # coding=utf-8
  2. import collections
  3. import functools
  4. import csv
  5. import difflib
  6. import hashlib
  7. import inspect
  8. import itertools
  9. import json
  10. import operator
  11. import time
  12. from copy import deepcopy
  13. # ########################################################################### #
  14. # ## The Motor ## #
  15. # ########################################################################### #
  16. def regression_test(argsrc, tests, driver_settings=None, cleanup_hack=None,
  17. apply_hacks=None, on_next=None):
  18. """Perform regression test with argsets from `argsrc`.
  19. For each argset pulled from source, performs one comparison
  20. per driver pair in `tests`, which is list of tuples with
  21. comparison function and pair of test driver classes: `(operator,
  22. oracle_class, result_class)`. (The classes are assumed to
  23. be sub-classes of `hoover.BaseTestDriver`.)
  24. `driver_settings` is a dictionary supposed to hold environmental
  25. values for all the drivers, the keys having form "DriverName.
  26. settingName". Each driver is then instantiated with this
  27. dict, and gets a copy of the dict with settings only intended
  28. for itself (and the "DriverName" part stripped).
  29. If comparison fails, report is generated using `hoover.jsDiff()`,
  30. and along with affected arguments stored in `hoover.Tracker`
  31. instance, which is finally used as a return value. This instance
  32. then contains method for basic stats as well as method to format
  33. the final report and a helper method to export argument sets
  34. as a CSV files.
  35. Supports hacks, which are a data transformations performed by
  36. `hoover.TinyCase` class and are intended to avoid known bugs
  37. and anomalies (`apply_hacks`) or clean up data structures of
  38. irrelevant data (`cleanup_hack`, performed only if the comparison
  39. function provided along with driver pair is not "equals").
  40. A function can be provided as `on_next` argument, that will be
  41. called after pulling each argument set, with last argument set
  42. (or `None`) as first argument and current one as second argument.
  43. """
  44. # TODO: do not parse driver_settings thousands of times (use a view class?)
  45. on_next = on_next if on_next else lambda a, b: None
  46. apply_hacks = apply_hacks if apply_hacks else []
  47. driver_settings = driver_settings if driver_settings else {}
  48. tracker = Tracker()
  49. last_argset = None
  50. all_classes = set(functools.reduce(
  51. lambda a, b: a+b,
  52. [triple[1:] for triple in tests]
  53. ))
  54. counter = StatCounter()
  55. for argset in argsrc:
  56. on_start = time.time()
  57. on_next(argset, last_argset)
  58. counter.add('on_next', time.time() - on_start)
  59. # # load the data first, only once for each driver
  60. #
  61. data = {}
  62. for aclass in all_classes:
  63. try:
  64. aclass.check_values(argset)
  65. except NotImplementedError: # let them bail out
  66. counter.count_for(aclass, 'bailouts')
  67. else:
  68. data[aclass], duration, overhead = get_data_and_stats(
  69. aclass, argset, driver_settings)
  70. counter.count_for(aclass, 'calls')
  71. counter.add_for(aclass, 'duration', duration)
  72. counter.add_for(aclass, 'overhead', overhead)
  73. for match_op, oclass, rclass in tests:
  74. # skip test if one of classes bailed out on the argset
  75. if oclass not in data or rclass not in data:
  76. continue
  77. diff = None
  78. case = TinyCase({
  79. 'argset': argset,
  80. 'oracle': deepcopy(data[oclass]),
  81. 'result': deepcopy(data[rclass]),
  82. 'oname': oclass.__name__,
  83. 'rname': rclass.__name__
  84. })
  85. hacks_done = sum([case.hack(h) for h in apply_hacks])
  86. counter.add_for(oclass, 'ohacks', hacks_done)
  87. counter.add_for(rclass, 'rhacks', hacks_done)
  88. counter.add('hacks', hacks_done)
  89. counter.add('hacked_cases', (1 if hacks_done else 0))
  90. if not match_op(case['oracle'], case['result']):
  91. # try to clean up so that normally ignored items
  92. # do not clutter up the report
  93. if not match_op == operator.eq:
  94. case.hack(cleanup_hack)
  95. # but panic if that "removed" the error condition
  96. if match_op(case['oracle'], case['result']):
  97. raise RuntimeError("cleanup ate error")
  98. diff = jsDiff(dira=case['oracle'],
  99. dirb=case['result'],
  100. namea=case['oname'],
  101. nameb=case['rname'])
  102. tracker.update(diff, argset)
  103. counter.count('cases')
  104. tracker.argsets_done += 1
  105. last_argset = argset
  106. counter.count('argsets')
  107. tracker.driver_stats = counter.all_stats()
  108. return tracker
  109. def get_data_and_stats(driverClass, argset, driver_settings, only_own=False):
  110. """Run test with given driver"""
  111. start = time.time()
  112. d = driverClass()
  113. d.setup(driver_settings, only_own=only_own)
  114. d.run(argset)
  115. return (d.data, d.duration, time.time() - d.duration - start)
  116. def get_data(driverClass, argset, driver_settings, only_own=False):
  117. """Run test with given driver"""
  118. d = driverClass()
  119. d.setup(driver_settings, only_own=only_own)
  120. d.run(argset)
  121. return d.data
  122. # ########################################################################### #
  123. # ## The Pattern ## #
  124. # ########################################################################### #
  125. class _BaseRuleOp:
  126. def __init__(self, items, item_ok):
  127. self._items = items
  128. self._item_ok = item_ok
  129. def _eval(self, item):
  130. try: # it's a pattern! (recurse)
  131. return RuleOp.Match(item, self._item_ok)
  132. except ValueError: # no, it's something else...
  133. return self._item_ok(item)
  134. def __bool__(self):
  135. try:
  136. return self._match()
  137. except TypeError:
  138. raise ValueError("items must be an iterable: %r" % self._items)
  139. class RuleOp:
  140. class ALL(_BaseRuleOp):
  141. def _match(self):
  142. return all(self._eval(item) for item in self._items)
  143. class ANY(_BaseRuleOp):
  144. def _match(self):
  145. return any(self._eval(item) for item in self._items)
  146. @staticmethod
  147. def Match(pattern, item_ok):
  148. """Evaluate set of logically structured patterns using passed function.
  149. pattern has form of `(op, [item1, item2, ...])` where op can be any of
  150. pre-defined logical operators (`ALL`/`ANY`, I doubt you will ever need
  151. more) and item_ok is a function that will be used to evaluate each one
  152. in the list. In case an itemN is actually pattern as well, it will be
  153. recursed into, passing the item_ok on and on.
  154. Note that there is no data to evaluate "against", you can use closure
  155. if you need to do that.
  156. """
  157. try:
  158. op, items = pattern
  159. except TypeError:
  160. raise ValueError("pattern is not a tuple: %r" % pattern)
  161. try:
  162. assert issubclass(op, _BaseRuleOp)
  163. except TypeError:
  164. raise ValueError("invalid operator: %r" % op)
  165. except AssertionError:
  166. raise ValueError("invalid operator class: %s" % op.__name__)
  167. return bool(op(items, item_ok))
  168. # ########################################################################### #
  169. # ## The Path ## #
  170. # ########################################################################### #
  171. class DictPath:
  172. """Mixin that adds "path-like" behavior to the top dict of dicts.
  173. Use this class as a mixin for a deep dic-like structure and you can access
  174. the elements using a path. For example:
  175. MyData(dict, DictPath):
  176. pass
  177. d = MyData({
  178. 'name': 'Joe',
  179. 'age': 34,
  180. 'ssn': {
  181. 'number': '012 345 678',
  182. 'expires': '10-01-16',
  183. },
  184. })
  185. print ("%s's ssn number %s will expire on %s"
  186. % (d.getpath('/name'),
  187. d.getpath('/ssn/number'),
  188. d.getpath('/ssn/expiry')))
  189. # joe's ssn number 012 345 678 will expire 10-01-16
  190. """
  191. DIV = "/"
  192. class Path:
  193. def __init__(self, path, div):
  194. self.DIV = div
  195. self._path = path
  196. def _validate(self):
  197. try:
  198. assert self._path.startswith(self.DIV)
  199. except (AttributeError, AssertionError):
  200. raise ValueError("invalid path: %r" % self._path)
  201. def stripped(self):
  202. return self._path.lstrip(self.DIV)
  203. @classmethod
  204. def __s2path(cls, path):
  205. return cls.Path(path, cls.DIV)
  206. @classmethod
  207. def __err_path_not_found(cls, path):
  208. raise KeyError("path not found: %s" % path)
  209. @classmethod
  210. def __getitem(cls, dct, key):
  211. if cls.DIV in key:
  212. frag, rest = key.split(cls.DIV, 1)
  213. subdct = dct[frag]
  214. result = cls.__getitem(subdct, rest)
  215. else:
  216. result = dct[key]
  217. return result
  218. @classmethod
  219. def __setitem(cls, dct, key, value):
  220. if cls.DIV not in key:
  221. dct[key] = value
  222. else:
  223. frag, rest = key.split(cls.DIV, 1)
  224. subdct = dct[frag]
  225. cls.__setitem(subdct, rest, value)
  226. @classmethod
  227. def __delitem(cls, dct, key):
  228. if cls.DIV not in key:
  229. del dct[key]
  230. else:
  231. frag, rest = key.split(cls.DIV, 1)
  232. subdct = dct[frag]
  233. return cls.__delitem(subdct, rest)
  234. # # public methods
  235. #
  236. def getpath(self, path):
  237. try:
  238. return self.__getitem(self, self.__s2path(path).stripped())
  239. except (TypeError, KeyError):
  240. self.__err_path_not_found(path)
  241. def setpath(self, path, value):
  242. try:
  243. self.__setitem(self, self.__s2path(path).stripped(), value)
  244. except (TypeError, KeyError):
  245. self.__err_path_not_found(path)
  246. def delpath(self, path):
  247. try:
  248. self.__delitem(self, self.__s2path(path).stripped())
  249. except (TypeError, KeyError):
  250. self.__err_path_not_found(path)
  251. def ispath(self, path):
  252. try:
  253. self.getpath(path)
  254. return True
  255. except KeyError:
  256. return False
  257. # ########################################################################### #
  258. # ## The Case ## #
  259. # ########################################################################### #
  260. class TinyCase(dict, DictPath):
  261. """Abstraction of the smallest unit of testing.
  262. This class is intended to hold relevant data after the actual test
  263. and apply transformations (hacks) as defined by rules.
  264. The data form (self) is:
  265. {
  266. 'argset': {}, # argset as fed into `BaseTestDriver.run`
  267. 'oracle': {}, # data as returned from oracle driver's `run()`
  268. 'result': {}, # data as returned from result driver's `run()`
  269. 'oname': "", # name of oracle driver's class
  270. 'rname': "" # name of result driver's class
  271. }
  272. The transformation is done using the `TinyCase.hack()` method to which
  273. a list of rules is passed. Each rule is applied, and rules are expected
  274. to be in a following form:
  275. {
  276. 'drivers': [{}], # list of structures to match against self
  277. 'argsets': [{}], # -ditto-
  278. 'action_name': <Arg> # an action name with argument
  279. }
  280. For each of patterns ('drivers', argsets') present, match against self
  281. is done using function `hoover.dataMatch`, which is basically a recursive
  282. test if the pattern is a subset of the case. If none of results is
  283. negative (i.e. both patterns missing results in match), any known actions
  284. included in the rule are called. Along with action name a list or a dict
  285. providing necessary parameters is expected: this is simply passed as only
  286. parameter to corresponding method.
  287. Actions use specific way how to address elements in the structures
  288. saved in the oracle and result keys provided by `DictPath`, which makes
  289. it easy to define rules for arbitrarily complex dictionary structures.
  290. The format resembles to Unix path, where "directories" are dict
  291. keys and "root" is the `self` of the `TinyCase` instance:
  292. /oracle/temperature
  293. /result/stats/word_count
  294. Refer to each action's docstring for descriprion of their function
  295. as well as expected format of argument. The name of action as used
  296. in the reule is the name of method without leading 'a_'.
  297. Warning: All actions will silently ignore any paths that are invalid
  298. or leading to non-existent data!
  299. (This does not apply to a path leading to `None`.)
  300. """
  301. def a_exchange(self, action):
  302. """Exchange value A for value B.
  303. Expects a dict, where key is a tuple of two values `(a, b)` and
  304. value is a list of paths. For each key, it goes through the
  305. paths and if the value equals `a` it is set to `b`.
  306. """
  307. for (oldv, newv), paths in action.items():
  308. for path in paths:
  309. try:
  310. curv = self.getpath(path)
  311. except KeyError:
  312. continue
  313. else:
  314. if curv == oldv:
  315. self.setpath(path, newv)
  316. def a_format_str(self, action):
  317. """Convert value to a string using format string.
  318. Expects a dict, where key is a format string, and value is a list
  319. of paths. For each record, the paths are traversed, and value is
  320. converted to string using the format string and the `%` operator.
  321. This is especially useful for floats which you may want to trim
  322. before comparison, since direct comparison of floats is unreliable
  323. on some architectures.
  324. """
  325. for fmt, paths in action.items():
  326. for path in paths:
  327. if self.ispath(path):
  328. new = fmt % self.getpath(path)
  329. self.setpath(path, new)
  330. def a_even_up(self, action):
  331. """Even up structure of both dictionaries.
  332. Expects a list of two-element tuples `('/dict/a', '/dict/b')`
  333. containing pairs of path do simple dictionaries.
  334. Then the two dicts are altered to have same structure: if a key
  335. in dict "a" is missing in dict "b", it is set to `None` in "b" and
  336. vice-versa,
  337. """
  338. for patha, pathb in action:
  339. try:
  340. a = self.getpath(patha)
  341. b = self.getpath(pathb)
  342. except KeyError:
  343. continue
  344. else:
  345. for key in set(a.keys()) | set(b.keys()):
  346. if key in a and key in b:
  347. pass # nothing to do here
  348. elif key in a and a[key] is None:
  349. b[key] = None
  350. elif key in b and b[key] is None:
  351. a[key] = None
  352. else:
  353. pass # bailout: odd key but value is *not* None
  354. def a_remove(self, action):
  355. """Remove elements from structure.
  356. Expects a simple list of paths that are simply deleted fro, the
  357. structure.
  358. """
  359. for path in action:
  360. if self.ispath(path):
  361. self.delpath(path)
  362. def a_round(self, action):
  363. """Round a (presumably) float using tha `float()` built-in.
  364. Expects dict with precision (ndigits, after the dot) as a key and
  365. list of paths as value.
  366. """
  367. for ndigits, paths in action.items():
  368. for path in paths:
  369. try:
  370. f = self.getpath(path)
  371. except KeyError:
  372. pass
  373. else:
  374. self.setpath(path, round(f, ndigits))
  375. known_actions = {'remove': a_remove,
  376. 'even_up': a_even_up,
  377. 'format_str': a_format_str,
  378. 'exchange': a_exchange,
  379. 'round': a_round}
  380. def hack(self, ruleset):
  381. """Apply action from each rule, if patterns match."""
  382. def driver_matches(rule):
  383. if 'drivers' not in rule:
  384. return True
  385. else:
  386. return any(dataMatch(p, self)
  387. for p in rule['drivers'])
  388. def argset_matches(rule):
  389. if 'argsets' not in rule:
  390. return True
  391. else:
  392. return any(dataMatch(p, self)
  393. for p in rule['argsets'])
  394. matched = False
  395. cls = self.__class__
  396. for rule in ruleset:
  397. if driver_matches(rule) and argset_matches(rule):
  398. matched = True
  399. for action_name in cls.known_actions:
  400. if action_name in rule:
  401. cls.known_actions[action_name](self, rule[action_name])
  402. return matched
  403. # ########################################################################### #
  404. # ## Drivers ## #
  405. # ########################################################################### #
  406. class DriverError(Exception):
  407. """Error encountered when obtaining driver data"""
  408. def __init__(self, message, driver):
  409. self.message = message
  410. self.driver = driver
  411. def __str__(self):
  412. result = ("\n\n"
  413. " type: %s\n"
  414. " message: %s\n"
  415. " driver: %s\n"
  416. " args: %s\n"
  417. " settings: %s\n"
  418. % (self.message.__class__.__name__,
  419. self.message,
  420. self.driver.__class__.__name__,
  421. self.driver._args,
  422. self.driver._settings))
  423. return result
  424. class DriverDataError(Exception):
  425. """Error encountered when decoding or normalizing driver data"""
  426. def __init__(self, exception, driver):
  427. self.exception = exception
  428. self.driver = driver
  429. def __str__(self):
  430. result = ("%s: %s\n"
  431. " class: %s\n"
  432. " args: %s\n"
  433. " data: %s\n"
  434. % (self.exception.__class__.__name__, self.exception,
  435. self.driver.__class__.__name__,
  436. json.dumps(self.driver._args, sort_keys=True, indent=4),
  437. json.dumps(self.driver.data, sort_keys=True, indent=4)))
  438. return result
  439. class BaseTestDriver:
  440. """Base class for test drivers used by `hoover.regression_test` and others.
  441. This class is used to create a test driver, which is an abstraction
  442. and encapsulation of the system being tested. Or, the driver in fact
  443. can be just a "mock" driver that provides data for comparison with
  444. a "real" driver.
  445. The minimum you need to create a working driver is to implement a working
  446. `self._get_data` method that sets `self.data`. Any exception from this
  447. method will be re-raised as DriverError with additional information.
  448. Also, you can set self.duration (in fractional seconds, as returned by
  449. standard time module) in the _get_data method, but if you don't, it is
  450. measured for you as time the method call took. This is useful if you
  451. need to fetch the data from some other driver or a gateway, and you
  452. have better mechanism to determine how long the action would take "in
  453. real life".
  454. For example, if we are testing a Java library using a Py4J gateway,
  455. we need to do some more conversions outside our testing code just to
  456. be able to use the data in our Python test. We don't want to include
  457. this in the "duration", since we are measuring the Java library, not the
  458. Py4J GW (or our ability to perform the conversions optimally). So we
  459. do our measurement within the Java machine and pass the result to the
  460. Python driver.
  461. Optionally, you can:
  462. * Make an __init__ and after calling base __init__, set
  463. * `self._mandatory_args`, a list of keys that need to be present
  464. in `args` argument to `run()`
  465. * and `self._mandatory_settings`, a list of keys that need to be
  466. present in the `settings` argument to `__init__`
  467. * implement methods
  468. * `_decode_data` and `_normalize_data`, which are intended to decode
  469. the data from any raw format it is received, and to prepare it
  470. for comparison in test,
  471. * and `_check_data`, to allow for early detection of failure,
  472. from which any exception is re-raised as a DriverDataError with
  473. some additional info
  474. * set "bailouts", a list of functions which, when passed "args"
  475. argument, return true to indicate that driver is not able to
  476. process these values (see below for explanation). If any of
  477. these functions returns true, NotImplementedError is raised.
  478. The expected workflow when using the driver is:
  479. # 1. sub-class hoover.BaseTestDriver
  480. # 2. prepare settings and args
  481. MyDriver.check_values(args) # optional, to force bailouts ASAP
  482. d = MyDriver()
  483. d.setup(settings)
  484. d.run(args)
  485. assert d.data, "no data" # evaluate the result...
  486. assert d.duration < 1 # duration of _get_data in seconds
  487. Note on bailouts: Typical strategy for which the driver is intended is
  488. that each possible combination of `args` is exhausted, and results from
  489. multiple drivers are compared to evaluate if driver, i.e. system in
  490. question is O.K.
  491. The bailouts mechanism is useful in cases, where for a certain system,
  492. a valid combination of arguments would bring the same result as another,
  493. so there is basically no value in testing both of them.
  494. Example might be a system that does not support a binary flag and
  495. behaves as if it was "on": you can simply make the test driver
  496. accept the option but "bail out" any time it is "off", therefore
  497. skipping the time-and-resource-consuming test.
  498. """
  499. bailouts = []
  500. ##
  501. # internal methods
  502. #
  503. def __init__(self):
  504. self.data = {}
  505. self.duration = None
  506. self._args = {}
  507. self._mandatory_args = []
  508. self._mandatory_settings = []
  509. self._settings = {}
  510. self._setup_ok = False
  511. def __check_mandatory(self):
  512. """validate before run()"""
  513. for key in self._mandatory_args:
  514. assert key in self._args, "missing arg: '%s'" % key
  515. for key in self._mandatory_settings:
  516. assert key in self._settings, "missing setting: '%s'" % key
  517. def __cleanup_data(self):
  518. """remove hidden data; e.g. what was only there for _check_data"""
  519. for key in self.data:
  520. if key.startswith("_"):
  521. del self.data[key]
  522. ##
  523. # virtual methods
  524. #
  525. def _check_data(self):
  526. """Early check for failure"""
  527. pass
  528. def _decode_data(self):
  529. """Decode from raw data as brought by _get_data"""
  530. pass
  531. def _normalize_data(self):
  532. """Preare data for comparison (e.g. sort, split, trim...)"""
  533. pass
  534. ##
  535. # public methods
  536. #
  537. @classmethod
  538. def check_values(cls, args=None):
  539. """check args in advance before running or setting up anything"""
  540. for fn in cls.bailouts:
  541. if fn(args):
  542. raise NotImplementedError(inspect.getsource(fn))
  543. def setup(self, settings, only_own=False):
  544. """Load settings. only_own means that only settings that belong to us
  545. are loaded ("DriverClass.settingName", the first discriminating part
  546. is removed)"""
  547. if only_own:
  548. for ckey in settings:
  549. driver_class_name, setting_name = ckey.split(".", 2)
  550. if self.__class__.__name__ == driver_class_name:
  551. self._settings[setting_name] = settings[ckey]
  552. else:
  553. self._settings = settings
  554. self._setup_ok = True
  555. def run(self, args):
  556. """validate, run and store data"""
  557. self._args = args
  558. assert self._setup_ok, "run() before setup()?"
  559. self.__class__.check_values(self._args)
  560. self.__check_mandatory()
  561. start = time.time()
  562. try:
  563. self._get_data() # run the test, i.e. obtain raw data
  564. except Exception as e:
  565. raise DriverError(e, self)
  566. self.duration = (time.time() - start if self.duration is None
  567. else self.duration)
  568. try:
  569. self._decode_data() # decode raw data
  570. self._normalize_data() # normalize decoded data
  571. self._check_data() # perform arbitrarty checking
  572. except Exception as e:
  573. raise DriverDataError(e, self)
  574. self.__cleanup_data() # cleanup (remove data['_*'])
  575. class MockDriverTrue(BaseTestDriver):
  576. """A simple mock driver, always returning True"""
  577. def _get_data(self, args):
  578. self.data = True
  579. # ########################################################################### #
  580. # ## Helpers ## #
  581. # ########################################################################### #
  582. class StatCounter:
  583. """A simple counter with formulas support."""
  584. def __init__(self):
  585. self.generic_stats = {}
  586. self.driver_stats = {}
  587. self.formulas = {}
  588. self._born = time.time()
  589. def _register(self, dname):
  590. self.driver_stats[dname] = {
  591. 'calls': 0,
  592. 'rhacks': 0,
  593. 'ohacks': 0,
  594. 'duration': 0,
  595. 'overhead': 0
  596. }
  597. ##
  598. # Formulas
  599. #
  600. # cumulative duration/overhead; just round to ms
  601. self.add_formula(dname + '_overhead',
  602. lambda g, d: int(1000 * d[dname]['overhead']))
  603. self.add_formula(dname + '_duration',
  604. lambda g, d: int(1000 * d[dname]['duration']))
  605. # average (per driver call) overhead/duration
  606. self.add_formula(
  607. dname + '_overhead_per_call',
  608. lambda g, d: int(1000 * d[dname]['overhead'] / d[dname]['calls'])
  609. )
  610. self.add_formula(
  611. dname + '_duration_per_call',
  612. lambda g, d: int(1000 * d[dname]['duration'] / d[dname]['calls'])
  613. )
  614. def gtotal_drivertime(g, d):
  615. driver_time = (sum(s['overhead'] for s in d.values())
  616. + sum(s['duration'] for s in d.values()))
  617. return int(1000 * driver_time)
  618. def gtotal_loop_overhead(g, d):
  619. driver_time = gtotal_drivertime(g, d)
  620. onnext_time = int(1000 * g['on_next'])
  621. age = int(1000 * (time.time() - self._born))
  622. return age - driver_time - onnext_time
  623. # grand totals in times: driver time, loop overhead
  624. self.add_formula('gtotal_drivertime', gtotal_drivertime)
  625. self.add_formula('gtotal_loop_overhead', gtotal_loop_overhead)
  626. self.add_formula('gtotal_loop_onnext',
  627. lambda g, d: int(1000 * g['on_next']))
  628. # average (per driver call) overhead/duration
  629. self.add_formula(
  630. 'cases_hacked',
  631. lambda g, d: round(100 * float(g['hacked_cases']) / g['cases'], 2)
  632. )
  633. def _computed_stats(self):
  634. computed = dict.fromkeys(self.formulas)
  635. for fname, fml in self.formulas.items():
  636. try:
  637. v = fml(self.generic_stats, self.driver_stats)
  638. except ZeroDivisionError:
  639. v = None
  640. computed[fname] = v
  641. return computed
  642. def add_formula(self, vname, formula):
  643. """Add a function to work with generic_stats, driver_stats."""
  644. self.formulas[vname] = formula
  645. def add(self, vname, value):
  646. """Add a value to generic stat counter."""
  647. if vname in self.generic_stats:
  648. self.generic_stats[vname] += value
  649. else:
  650. self.generic_stats[vname] = value
  651. def add_for(self, dclass, vname, value):
  652. """Add a value to driver stat counter."""
  653. dname = dclass.__name__
  654. if dname not in self.driver_stats:
  655. self._register(dname)
  656. if vname in self.driver_stats[dname]:
  657. self.driver_stats[dname][vname] += value
  658. else:
  659. self.driver_stats[dname][vname] = value
  660. def count(self, vname):
  661. """Alias to add(vname, 1)"""
  662. self.add(vname, 1)
  663. def count_for(self, dclass, vname):
  664. """Alias to add_for(vname, 1)"""
  665. self.add_for(dclass, vname, 1)
  666. def all_stats(self):
  667. """Compute stats from formulas and add them to colledted data."""
  668. stats = self.generic_stats
  669. for dname, dstats in self.driver_stats.items():
  670. for key, value in dstats.items():
  671. stats[dname + "_" + key] = value
  672. stats.update(self._computed_stats())
  673. return stats
  674. class Tracker(dict):
  675. """Error tracker to allow for usable reports from huge regression tests.
  676. Best used as a result bearer from `regression_test`, this class keeps
  677. a simple in-memory "database" of errors seen during the regression
  678. test, and implements few methods to access the data.
  679. The basic usage is:
  680. 1. Instantiate (no parameters)
  681. 2. Each time you have a result of a test, you pass it to `update()`
  682. method along with the argument set (as a single object, typically
  683. a dict) that caused the error.
  684. If boolean value of the result is False, the object is thrown away
  685. and nothing happen. Otherwise, its string value is used as a key
  686. under which the argument set is saved.
  687. As you can see, the string is supposed to be ''as deterministic
  688. as possible'', i.e. it should provide as little information
  689. about the error as is necessary. Do not include any timestamps
  690. or "volatile" values.
  691. 3. At final stage, you can retrieve statistics as how many (distinct)
  692. errors have been recorded, what was the duration of the whole test,
  693. how many times `update()` was called, etc.
  694. 4. Optionally, you can also call `format_report()` to get a nicely
  695. formatted report with list of arguments for each error string.
  696. 5. Since in bigger tests, argument lists can grow really large,
  697. complete lists are not normally printed. Instead, you can use
  698. `write_stats_csv()`, which will create one CSV per each error,
  699. named as first 7 chars of its SHA1 (inspired by Git).
  700. Note that you need to pass an existing writable folder path.
  701. """
  702. ##
  703. # internal methods
  704. #
  705. def __init__(self):
  706. self._start = time.time()
  707. self._db = {}
  708. self.tests_done = 0
  709. self.tests_passed = 0
  710. self.argsets_done = 0
  711. self.driver_stats = {}
  712. def _csv_fname(self, errstr, prefix):
  713. """Format name of file for this error string"""
  714. return '%s/%s.csv' % (prefix, self._eid(errstr))
  715. def _eid(self, errstr):
  716. """Return EID for the error string (first 7 chars of SHA1)."""
  717. return hashlib.sha1(errstr).hexdigest()[:7]
  718. def _insert(self, errstr, argset):
  719. """Insert the argset into DB."""
  720. if errstr not in self._db:
  721. self._db[errstr] = []
  722. self._db[errstr].append(argset)
  723. def _format_error(self, errstr, max_aa=0):
  724. """Format single error for output."""
  725. argsets_affected = self._db[errstr]
  726. num_aa = len(argsets_affected)
  727. # trim if list is too long for Jenkins
  728. argsets_shown = argsets_affected
  729. if max_aa and (num_aa > max_aa):
  730. div = ["[...] not showing %s cases, see %s.csv for full list"
  731. % (num_aa - max_aa, self._eid(errstr))]
  732. argsets_shown = argsets_affected[0:max_aa] + div
  733. # format error
  734. formatted_aa = "\n".join([str(arg) for arg in argsets_shown])
  735. return ("~~~ ERROR FOUND (%s) ~~~~~~~~~~~~~~~~~~~~~~~~~\n"
  736. "--- error string: -----------------------------------\n%s\n"
  737. "--- argsets affected (%d) ---------------------------\n%s\n"
  738. % (self._eid(errstr), errstr, num_aa, formatted_aa))
  739. ##
  740. # public methods
  741. #
  742. def errors_found(self):
  743. """Return number of non-distinct errors in db."""
  744. return bool(self._db)
  745. def format_report(self, max_aa=0):
  746. """Return complete report formatted as string."""
  747. error_list = "\n".join([self._format_error(e, max_aa=max_aa)
  748. for e in self._db])
  749. return ("Found %(total_errors)s (%(distinct_errors)s distinct) errors"
  750. " in %(tests_done)s tests with %(argsets)s argsets"
  751. " (duration: %(time)ss):"
  752. % self.getstats()
  753. + "\n\n" + error_list)
  754. def getstats(self):
  755. """Return basic and driver stats
  756. argsets_done - this should must be raised by outer code,
  757. once per each unique argset
  758. tests_done - how many times Tracker.update() was called
  759. distinct_errors - how many distinct errors (same `str(error)`)
  760. were seen by Tracker.update()
  761. total_errors - how many times `Tracker.update()` saw an
  762. error, i.e. how many argsets are in DB
  763. time - how long since init (seconds)
  764. """
  765. def total_errors():
  766. return functools.reduce(
  767. lambda x, y: x + len(y),
  768. self._db.values(),
  769. initial=0,
  770. )
  771. stats = {
  772. "argsets": self.argsets_done,
  773. "tests_done": self.tests_done,
  774. "distinct_errors": len(self._db),
  775. "total_errors": total_errors(),
  776. "time": int(time.time() - self._start)
  777. }
  778. stats.update(self.driver_stats)
  779. return stats
  780. def update(self, error, argset):
  781. """Update tracker with test result.
  782. If `bool(error)` is true, it is considered error and argset
  783. is inserted to DB with `str(error)` as key. This allows for later
  784. sorting and analysis.
  785. """
  786. self.tests_done += 1
  787. if error:
  788. errstr = str(error)
  789. self._insert(errstr, argset)
  790. def write_stats_csv(self, fname):
  791. """Write stats to a simple one row (plus header) CSV."""
  792. stats = self.getstats()
  793. colnames = sorted(stats.keys())
  794. with open(fname, 'a') as fh:
  795. cw = csv.DictWriter(fh, colnames)
  796. cw.writerow(dict(zip(colnames, colnames))) # header
  797. cw.writerow(stats)
  798. def write_args_csv(self, prefix=''):
  799. """Write out a set of CSV files, one per distinctive error.
  800. Each CSV is named with error EID (first 7 chars of SHA1) and lists
  801. all argument sets affected by this error. This is supposed to make
  802. easier to further analyse impact and trigerring values of errors,
  803. perhaps using a table processor software."""
  804. def get_all_colnames():
  805. cn = {}
  806. for affected in self._db.values():
  807. for argset in affected:
  808. cn.update(dict.fromkeys(argset))
  809. return sorted(cn.keys())
  810. all_colnames = get_all_colnames()
  811. for errstr in self._db:
  812. with open(self._csv_fname(errstr, prefix), 'a') as fh:
  813. cw = csv.DictWriter(fh, all_colnames)
  814. cw.writerow(dict(zip(all_colnames, all_colnames))) # header
  815. for argset in self._db[errstr]:
  816. cw.writerow(argset)
  817. def dataMatch(pattern, data):
  818. """Check if data structure matches a pattern data structure.
  819. Supports lists, dictionaries and scalars (int, float, string).
  820. For scalars, simple `==` is used. Lists are converted to sets and
  821. "to match" means "to have a matching subset (e.g. `[1, 2, 3, 4]`
  822. matches `[3, 2]`). Both lists and dictionaries are matched recursively.
  823. """
  824. def listMatch(pattern, data):
  825. """Match list-like objects"""
  826. assert all([hasattr(o, 'append') for o in [pattern, data]])
  827. results = []
  828. for pv in pattern:
  829. if any([dataMatch(pv, dv) for dv in data]):
  830. results.append(True)
  831. else:
  832. results.append(False)
  833. return all(results)
  834. def dictMatch(pattern, data):
  835. """Match dict-like objects"""
  836. assert all([hasattr(o, 'iteritems') for o in [pattern, data]])
  837. results = []
  838. try:
  839. for pk, pv in pattern.items():
  840. results.append(dataMatch(pv, data[pk]))
  841. except KeyError:
  842. results.append(False)
  843. return all(results)
  844. result = None
  845. if pattern == data:
  846. result = True
  847. else:
  848. for handler in [dictMatch, listMatch]:
  849. try:
  850. result = handler(pattern, data)
  851. except AssertionError:
  852. continue
  853. return result
  854. def jsDump(data):
  855. """A human-readable JSON dump."""
  856. return json.dumps(data, sort_keys=True, indent=4,
  857. separators=(',', ': '))
  858. def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
  859. """JSON-based human-readable diff of two data structures.
  860. '''BETA''' version.
  861. jsDiff is based on unified diff of two human-readable JSON dumps except
  862. that instead of showing line numbers and context based on proximity to
  863. the changed lines, it prints only context important from the data
  864. structure point.
  865. The goal is to be able to quickly tell the story of what has changed
  866. where in the structure, no matter size and complexity of the data set.
  867. For example:
  868. a = {
  869. 'w': {1: 2, 3: 4},
  870. 'x': [1, 2, 3],
  871. 'y': [3, 1, 2]
  872. }
  873. b = {
  874. 'w': {1: 2, 3: 4},
  875. 'x': [1, 1, 3],
  876. 'y': [3, 1, 3]
  877. }
  878. print jsDiff(a, b)
  879. will output:
  880. aaa ~/A
  881. "x": [
  882. a 2,
  883. "y": [
  884. a 2
  885. bbb ~/B
  886. "x": [
  887. b 1,
  888. "y": [
  889. b 3
  890. Notice that the final output somehow resembles the traditional unified
  891. diff, so to avoid confusion, +/- is changed to a/b (the characters can
  892. be provided as well as the names A/B).
  893. """
  894. def compress(lines):
  895. def is_body(line):
  896. return line.startswith(("-", "+", " "))
  897. def is_diff(line):
  898. return line.startswith(("-", "+"))
  899. def is_diffA(line):
  900. return line.startswith("-")
  901. def is_diffB(line):
  902. return line.startswith("+")
  903. def is_context(line):
  904. return line.startswith(" ")
  905. def is_hdr(line):
  906. return line.startswith(("@@", "---", "+++"))
  907. def is_hdr_hunk(line):
  908. return line.startswith("@@")
  909. def is_hdr_A(line):
  910. return line.startswith("---")
  911. def is_hdr_B(line):
  912. return line.startswith("+++")
  913. class Level:
  914. def __init__(self, hint):
  915. self.hint = hint
  916. self.hinted = False
  917. def __str__(self):
  918. return str(self.hint)
  919. def get_hint(self):
  920. if not self.hinted:
  921. self.hinted = True
  922. return self.hint
  923. class ContextTracker:
  924. def __init__(self):
  925. self.trace = []
  926. self.last_line = None
  927. self.last_indent = -1
  928. def indent_of(self, line):
  929. meat = line[1:].lstrip(" ")
  930. ind = len(line) - len(meat) - 1
  931. return ind
  932. def check(self, line):
  933. indent = self.indent_of(line)
  934. if indent > self.last_indent:
  935. self.trace.append(Level(self.last_line))
  936. elif indent < self.last_indent:
  937. self.trace.pop()
  938. self.last_line = line
  939. self.last_indent = indent
  940. def get_hint(self):
  941. return self.trace[-1].get_hint()
  942. buffa = []
  943. buffb = []
  944. ct = ContextTracker()
  945. for line in lines:
  946. if is_hdr_hunk(line):
  947. continue
  948. elif is_hdr_A(line):
  949. line = line.replace("---", chara * 3, 1)
  950. buffa.insert(0, line)
  951. elif is_hdr_B(line):
  952. line = line.replace("+++", charb * 3, 1)
  953. buffb.insert(0, line)
  954. elif is_body(line):
  955. ct.check(line)
  956. if is_diff(line):
  957. hint = ct.get_hint()
  958. if hint:
  959. buffa.append(hint)
  960. buffb.append(hint)
  961. if is_diffA(line):
  962. line = line.replace("-", chara, 1)
  963. buffa.append(line)
  964. elif is_diffB(line):
  965. line = line.replace("+", charb, 1)
  966. buffb.append(line)
  967. else:
  968. raise AssertionError("difflib.unified_diff emited"
  969. " unknown format (%s chars):\n%s"
  970. % (len(line), line))
  971. return buffa + buffb
  972. dumpa = jsDump(dira)
  973. dumpb = jsDump(dirb)
  974. udiff = difflib.unified_diff(dumpa.split("\n"), dumpb.split("\n"),
  975. "~/" + namea, "~/" + nameb,
  976. n=10000, lineterm='')
  977. return "\n".join(compress([line for line in udiff]))
  978. class Cartman:
  979. """Create argument sets from ranges (or ay iterators) of values.
  980. This class is to enable easy definition and generation of dictionary
  981. argument sets using Cartesian product. You only need to define:
  982. * structure of argument set (can be more than just flat dict)
  983. * ranges, or arbitrary iterators of values on each "leaf" of the
  984. argument set
  985. Since there is expectation that any argument can have any kind of values
  986. even another iterables, the pure logic "iterate it if you can"
  987. is insufficient. Instead, definition is divided in two parts:
  988. * scheme, which is a "prototype" of a final argument set, except
  989. that for each value that will change, a `Cartman.Iterable`
  990. sentinel is used. For each leaf that is constant, `Cartman.Scalar`
  991. is used
  992. * source, which has the same structure, except that where in scheme
  993. is `Iterable`, an iterable object is expected, whereas in places
  994. where `Scalar` is used, a value is assigned that does not change
  995. during iteration.
  996. Finally, when such instance is used in loop, argument sets are generated
  997. uising Cartesian product of each iterable found. This allows for
  998. relatively easy definition of complex scenarios.
  999. Consider this example:
  1000. You have a system (wrapped up in test driver) that takes ''size''
  1001. argument, that is supposed to be ''width'', ''height'' and ''depth'',
  1002. each an integer ranging from 1 to 100, and ''color'' that can
  1003. be "white", "black" or "yellow".
  1004. For a test using all-combinations strategy, you will need to generate
  1005. 100 * 100 * 100 * 3 argument sets, i.e. 3M tests.
  1006. All you need to do is:
  1007. scheme = {
  1008. 'size': {
  1009. 'width': Cartman.Iterable,
  1010. 'height': Cartman.Iterable,
  1011. 'depth': Cartman.Iterable,
  1012. }
  1013. 'color': Cartman.Iterable,
  1014. }
  1015. source = {
  1016. 'size': {
  1017. 'width': range(1, 100),
  1018. 'height': range(1, 100),
  1019. 'depth': range(1, 100),
  1020. }
  1021. 'color': ['white', 'black', 'yellow'],
  1022. }
  1023. c = Cartman(source, scheme)
  1024. for argset in c:
  1025. result = my_test(argset)
  1026. # assert ...
  1027. The main advantage is that you can separate the definition from
  1028. the code, and you can keep yor iterators as big or as small as
  1029. needed, and add / remove values.
  1030. Also in case your parameters vary in structure over time, or from
  1031. one test to another, it gets much easier to keep up with changes
  1032. without much jumping through hoops.
  1033. Note: `Cartman.Scalar` is provided mainly to make your definitions
  1034. more readable. Following constructions are functionally equal:
  1035. c = Cartman({'a': 1}, {'a': Cartman.Scalar})
  1036. c = Cartman({'a': [1]}, {'a': Cartman.Iterable})
  1037. In future, however, this might change, though, mainly in case
  1038. optimization became possible based on what was used.
  1039. """
  1040. # TODO: support for arbitrary ordering (profile / nginx)
  1041. # TODO: implement getstats and fmtstats
  1042. # TODO: N-wise
  1043. class _BaseMark:
  1044. pass
  1045. class Scalar(_BaseMark):
  1046. pass
  1047. class Iterable(_BaseMark):
  1048. pass
  1049. def __init__(self, source, scheme):
  1050. self.source = source
  1051. self.scheme = scheme
  1052. # validate scheme + source and throw useful error
  1053. scheme_ok = isinstance(self.scheme, collections.Mapping)
  1054. source_ok = isinstance(self.source, collections.Mapping)
  1055. if not scheme_ok:
  1056. raise ValueError("scheme must be a mapping (e.g. dict)")
  1057. elif scheme_ok and not source_ok:
  1058. raise ValueError("scheme vs. source mismatch")
  1059. def __deepcopy__(self, memo):
  1060. return Cartman(deepcopy(self.source, memo),
  1061. deepcopy(self.scheme, memo))
  1062. def _is_mark(self, subscheme):
  1063. try:
  1064. return issubclass(subscheme, Cartman._BaseMark)
  1065. except TypeError:
  1066. return False
  1067. def _means_scalar(self, subscheme):
  1068. if self._is_mark(subscheme):
  1069. return issubclass(subscheme, Cartman.Scalar)
  1070. def _means_iterable(self, subscheme):
  1071. if self._is_mark(subscheme):
  1072. return issubclass(subscheme, Cartman.Iterable)
  1073. def _get_iterable_for(self, key):
  1074. subscheme = self.scheme[key]
  1075. subsource = self.source[key]
  1076. if self._means_scalar(subscheme):
  1077. return [subsource]
  1078. elif self._means_iterable(subscheme):
  1079. return subsource
  1080. else: # try to use it as scheme
  1081. return iter(Cartman(subsource, subscheme))
  1082. def __iter__(self):
  1083. names = []
  1084. iterables = []
  1085. for key in self.scheme:
  1086. try:
  1087. iterables.append(self._get_iterable_for(key))
  1088. except KeyError:
  1089. pass # ignore that subsource mentioned by scheme is missing
  1090. else:
  1091. names.append(key)
  1092. for values in itertools.product(*iterables):
  1093. yield dict(zip(names, values))
  1094. def getstats(self):
  1095. return {}
  1096. def fmtstats(self):
  1097. return ""