|
@@ -1,6 +1,7 @@
|
1
|
1
|
# coding=utf-8
|
2
|
2
|
|
3
|
3
|
import collections
|
|
4
|
+import functools
|
4
|
5
|
import csv
|
5
|
6
|
import difflib
|
6
|
7
|
import hashlib
|
|
@@ -16,7 +17,7 @@ from copy import deepcopy
|
16
|
17
|
# ## The Motor ## #
|
17
|
18
|
# ########################################################################### #
|
18
|
19
|
|
19
|
|
-def regression_test(argsrc, tests, driver_settings, cleanup_hack=None,
|
|
20
|
+def regression_test(argsrc, tests, driver_settings=None, cleanup_hack=None,
|
20
|
21
|
apply_hacks=None, on_next=None):
|
21
|
22
|
"""Perform regression test with argsets from `argsrc`.
|
22
|
23
|
|
|
@@ -54,12 +55,15 @@ def regression_test(argsrc, tests, driver_settings, cleanup_hack=None,
|
54
|
55
|
|
55
|
56
|
on_next = on_next if on_next else lambda a, b: None
|
56
|
57
|
apply_hacks = apply_hacks if apply_hacks else []
|
|
58
|
+ driver_settings = driver_settings if driver_settings else {}
|
57
|
59
|
|
58
|
60
|
tracker = Tracker()
|
59
|
61
|
last_argset = None
|
60
|
62
|
|
61
|
|
- all_classes = set(reduce(lambda a, b: a+b,
|
62
|
|
- [triple[1:] for triple in tests]))
|
|
63
|
+ all_classes = set(functools.reduce(
|
|
64
|
+ lambda a, b: a+b,
|
|
65
|
+ [triple[1:] for triple in tests]
|
|
66
|
+ ))
|
63
|
67
|
|
64
|
68
|
counter = StatCounter()
|
65
|
69
|
|
|
@@ -119,7 +123,9 @@ def regression_test(argsrc, tests, driver_settings, cleanup_hack=None,
|
119
|
123
|
diff = jsDiff(dira=case['oracle'],
|
120
|
124
|
dirb=case['result'],
|
121
|
125
|
namea=case['oname'],
|
122
|
|
- nameb=case['rname'])
|
|
126
|
+ nameb=case['rname'],
|
|
127
|
+ chara='o',
|
|
128
|
+ charb='r')
|
123
|
129
|
|
124
|
130
|
tracker.update(diff, argset)
|
125
|
131
|
|
|
@@ -134,19 +140,23 @@ def regression_test(argsrc, tests, driver_settings, cleanup_hack=None,
|
134
|
140
|
return tracker
|
135
|
141
|
|
136
|
142
|
|
137
|
|
-def get_data_and_stats(driverClass, argset, driver_settings):
|
138
|
|
- """Run test with given driver"""
|
|
143
|
+def get_data_and_stats(driverClass, argset, driver_settings, only_own=False):
|
|
144
|
+ """
|
|
145
|
+ Run single test, return data and stats.
|
|
146
|
+ """
|
139
|
147
|
start = time.time()
|
140
|
148
|
d = driverClass()
|
141
|
|
- d.setup(driver_settings, only_own=True)
|
|
149
|
+ d.setup(driver_settings, only_own=only_own)
|
142
|
150
|
d.run(argset)
|
143
|
151
|
return (d.data, d.duration, time.time() - d.duration - start)
|
144
|
152
|
|
145
|
153
|
|
146
|
|
-def get_data(driverClass, argset, driver_settings):
|
147
|
|
- """Run test with given driver"""
|
|
154
|
+def get_data(driverClass, argset, driver_settings, only_own=False):
|
|
155
|
+ """
|
|
156
|
+ Run single test, return data only.
|
|
157
|
+ """
|
148
|
158
|
d = driverClass()
|
149
|
|
- d.setup(driver_settings, only_own=True)
|
|
159
|
+ d.setup(driver_settings, only_own=only_own)
|
150
|
160
|
d.run(argset)
|
151
|
161
|
return d.data
|
152
|
162
|
|
|
@@ -155,7 +165,7 @@ def get_data(driverClass, argset, driver_settings):
|
155
|
165
|
# ## The Pattern ## #
|
156
|
166
|
# ########################################################################### #
|
157
|
167
|
|
158
|
|
-class _BaseRuleOp(object):
|
|
168
|
+class _BaseRuleOp:
|
159
|
169
|
|
160
|
170
|
def __init__(self, items, item_ok):
|
161
|
171
|
self._items = items
|
|
@@ -167,14 +177,14 @@ class _BaseRuleOp(object):
|
167
|
177
|
except ValueError: # no, it's something else...
|
168
|
178
|
return self._item_ok(item)
|
169
|
179
|
|
170
|
|
- def __nonzero__(self):
|
|
180
|
+ def __bool__(self):
|
171
|
181
|
try:
|
172
|
182
|
return self._match()
|
173
|
183
|
except TypeError:
|
174
|
184
|
raise ValueError("items must be an iterable: %r" % self._items)
|
175
|
185
|
|
176
|
186
|
|
177
|
|
-class RuleOp(object):
|
|
187
|
+class RuleOp:
|
178
|
188
|
|
179
|
189
|
class ALL(_BaseRuleOp):
|
180
|
190
|
|
|
@@ -188,15 +198,25 @@ class RuleOp(object):
|
188
|
198
|
|
189
|
199
|
@staticmethod
|
190
|
200
|
def Match(pattern, item_ok):
|
191
|
|
- """Evaluate set of logically structured patterns using passed function.
|
|
201
|
+ """
|
|
202
|
+ Evaluate set of logically structured patterns using passed function.
|
|
203
|
+
|
|
204
|
+ *pattern* must be a tuple in form of `(op, items)` where *op* can be
|
|
205
|
+ either `RuleOp.ALL` or `RuleOp.ANY` and *items* is a list of items
|
|
206
|
+ to check using *item_ok* function.
|
192
|
207
|
|
193
|
|
- pattern has form of `(op, [item1, item2, ...])` where op can be any of
|
194
|
|
- pre-defined logical operators (`ALL`/`ANY`, I doubt you will ever need
|
195
|
|
- more) and item_ok is a function that will be used to evaluate each one
|
196
|
|
- in the list. In case an itemN is actually pattern as well, it will be
|
197
|
|
- recursed into, passing the item_ok on and on.
|
|
208
|
+ *item_ok* is a function that accepts single argument and its return
|
|
209
|
+ value is evaluated for true-ness.
|
198
|
210
|
|
199
|
|
- Note that there is no data to evaluate "against", you can use closure
|
|
211
|
+ Final result is True or False and is computed by combining results
|
|
212
|
+ of individual *item_ok* calls: either all must be true (when `op
|
|
213
|
+ == RuleOp.ALL`) or at least one must be true (when `op == RuleOp.ANY`).
|
|
214
|
+
|
|
215
|
+ The evaluation is done recursively, that is, if an item in the pattern
|
|
216
|
+ is also a pattern itself, it will be evaluated by calling RuleOp.Match
|
|
217
|
+ and passing the same *item_ok* function.
|
|
218
|
+
|
|
219
|
+ Note that there is no data to evaluate "against", you can use closure
|
200
|
220
|
if you need to do that.
|
201
|
221
|
"""
|
202
|
222
|
|
|
@@ -204,11 +224,9 @@ class RuleOp(object):
|
204
|
224
|
op, items = pattern
|
205
|
225
|
except TypeError:
|
206
|
226
|
raise ValueError("pattern is not a tuple: %r" % pattern)
|
207
|
|
- try:
|
208
|
|
- assert issubclass(op, _BaseRuleOp)
|
209
|
|
- except TypeError:
|
|
227
|
+ if type(op) is not type:
|
210
|
228
|
raise ValueError("invalid operator: %r" % op)
|
211
|
|
- except AssertionError:
|
|
229
|
+ if not issubclass(op, _BaseRuleOp):
|
212
|
230
|
raise ValueError("invalid operator class: %s" % op.__name__)
|
213
|
231
|
return bool(op(items, item_ok))
|
214
|
232
|
|
|
@@ -217,11 +235,11 @@ class RuleOp(object):
|
217
|
235
|
# ## The Path ## #
|
218
|
236
|
# ########################################################################### #
|
219
|
237
|
|
220
|
|
-class DictPath(object):
|
|
238
|
+class DictPath:
|
221
|
239
|
"""Mixin that adds "path-like" behavior to the top dict of dicts.
|
222
|
240
|
|
223
|
|
- Use this class as a mixin for a deep dic-like structure and you can access
|
224
|
|
- the elements using a path. For example:
|
|
241
|
+ Use this class as a mixin for a deep dictionary-like structure in order to
|
|
242
|
+ access the elements using a Unix-like path. For example:
|
225
|
243
|
|
226
|
244
|
MyData(dict, DictPath):
|
227
|
245
|
pass
|
|
@@ -244,7 +262,7 @@ class DictPath(object):
|
244
|
262
|
|
245
|
263
|
DIV = "/"
|
246
|
264
|
|
247
|
|
- class Path(object):
|
|
265
|
+ class Path:
|
248
|
266
|
|
249
|
267
|
def __init__(self, path, div):
|
250
|
268
|
self.DIV = div
|
|
@@ -252,9 +270,11 @@ class DictPath(object):
|
252
|
270
|
|
253
|
271
|
def _validate(self):
|
254
|
272
|
try:
|
255
|
|
- assert self._path.startswith(self.DIV)
|
256
|
|
- except (AttributeError, AssertionError):
|
257
|
|
- raise ValueError("invalid path: %r" % self._path)
|
|
273
|
+ has_root = self._path.startswith(self.DIV)
|
|
274
|
+ except AttributeError:
|
|
275
|
+ raise ValueError("invalid path: not a string: %r" % self._path)
|
|
276
|
+ if not has_root:
|
|
277
|
+ raise ValueError("invalid path: missing root: %r" % self._path)
|
258
|
278
|
|
259
|
279
|
def stripped(self):
|
260
|
280
|
return self._path.lstrip(self.DIV)
|
|
@@ -329,12 +349,13 @@ class DictPath(object):
|
329
|
349
|
# ########################################################################### #
|
330
|
350
|
|
331
|
351
|
class TinyCase(dict, DictPath):
|
332
|
|
- """Abstraction of the smallest unit of testing.
|
|
352
|
+ """Test case for hoover.
|
333
|
353
|
|
334
|
|
- This class is intended to hold relevant data after the actual test
|
335
|
|
- and apply transformations (hacks) as defined by rules.
|
|
354
|
+ This class is used as an intermediary container for test parameters,
|
|
355
|
+ oracles and test results. This is to allow post-test transformations
|
|
356
|
+ ("hacks") to happen before the result is evaluated for pass/fail.
|
336
|
357
|
|
337
|
|
- The data form (self) is:
|
|
358
|
+ Instantiate TinyCase with data (self) in following format:
|
338
|
359
|
|
339
|
360
|
{
|
340
|
361
|
'argset': {}, # argset as fed into `BaseTestDriver.run`
|
|
@@ -344,40 +365,19 @@ class TinyCase(dict, DictPath):
|
344
|
365
|
'rname': "" # name of result driver's class
|
345
|
366
|
}
|
346
|
367
|
|
347
|
|
- The transformation is done using the `TinyCase.hack()` method to which
|
348
|
|
- a list of rules is passed. Each rule is applied, and rules are expected
|
349
|
|
- to be in a following form:
|
|
368
|
+ Then call TinyCase.hack() with a set of rules which can alter oracles,
|
|
369
|
+ results or both based on the data stored in TinyCase.
|
350
|
370
|
|
351
|
|
- {
|
352
|
|
- 'drivers': [{}], # list of structures to match against self
|
353
|
|
- 'argsets': [{}], # -ditto-
|
354
|
|
- 'action_name': <Arg> # an action name with argument
|
355
|
|
- }
|
|
371
|
+ Typical use cases for 'hacks' are:
|
356
|
372
|
|
357
|
|
- For each of patterns ('drivers', argsets') present, match against self
|
358
|
|
- is done using function `hoover.dataMatch`, which is basically a recursive
|
359
|
|
- test if the pattern is a subset of the case. If none of results is
|
360
|
|
- negative (i.e. both patterns missing results in match), any known actions
|
361
|
|
- included in the rule are called. Along with action name a list or a dict
|
362
|
|
- providing necessary parameters is expected: this is simply passed as only
|
363
|
|
- parameter to corresponding method.
|
364
|
|
-
|
365
|
|
- Actions use specific way how to address elements in the structures
|
366
|
|
- saved in the oracle and result keys provided by `DictPath`, which makes
|
367
|
|
- it easy to define rules for arbitrarily complex dictionary structures.
|
368
|
|
- The format resembles to Unix path, where "directories" are dict
|
369
|
|
- keys and "root" is the `self` of the `TinyCase` instance:
|
370
|
|
-
|
371
|
|
- /oracle/temperature
|
372
|
|
- /result/stats/word_count
|
373
|
|
-
|
374
|
|
- Refer to each action's docstring for descriprion of their function
|
375
|
|
- as well as expected format of argument. The name of action as used
|
376
|
|
- in the reule is the name of method without leading 'a_'.
|
377
|
|
-
|
378
|
|
- Warning: All actions will silently ignore any paths that are invalid
|
379
|
|
- or leading to non-existent data!
|
380
|
|
- (This does not apply to a path leading to `None`.)
|
|
373
|
+ * avoid known and tracked bugs,
|
|
374
|
+ * help normalize results (remove irrelevant details),
|
|
375
|
+ * solve certain limitations in oracle machines.
|
|
376
|
+
|
|
377
|
+ Note that while for most tests, you should strive for zero hacks,
|
|
378
|
+ sometimes they are inevitable. In such cases, number of hacks can
|
|
379
|
+ be a useful quality metric. For that reason, 'hoover.regression_test'
|
|
380
|
+ will count the applied hacks and return it in the test report.
|
381
|
381
|
"""
|
382
|
382
|
|
383
|
383
|
def a_exchange(self, action):
|
|
@@ -387,7 +387,7 @@ class TinyCase(dict, DictPath):
|
387
|
387
|
value is a list of paths. For each key, it goes through the
|
388
|
388
|
paths and if the value equals `a` it is set to `b`.
|
389
|
389
|
"""
|
390
|
|
- for (oldv, newv), paths in action.iteritems():
|
|
390
|
+ for (oldv, newv), paths in action.items():
|
391
|
391
|
for path in paths:
|
392
|
392
|
try:
|
393
|
393
|
curv = self.getpath(path)
|
|
@@ -408,7 +408,7 @@ class TinyCase(dict, DictPath):
|
408
|
408
|
before comparison, since direct comparison of floats is unreliable
|
409
|
409
|
on some architectures.
|
410
|
410
|
"""
|
411
|
|
- for fmt, paths in action.iteritems():
|
|
411
|
+ for fmt, paths in action.items():
|
412
|
412
|
for path in paths:
|
413
|
413
|
if self.ispath(path):
|
414
|
414
|
new = fmt % self.getpath(path)
|
|
@@ -457,7 +457,7 @@ class TinyCase(dict, DictPath):
|
457
|
457
|
Expects dict with precision (ndigits, after the dot) as a key and
|
458
|
458
|
list of paths as value.
|
459
|
459
|
"""
|
460
|
|
- for ndigits, paths in action.iteritems():
|
|
460
|
+ for ndigits, paths in action.items():
|
461
|
461
|
for path in paths:
|
462
|
462
|
try:
|
463
|
463
|
f = self.getpath(path)
|
|
@@ -473,7 +473,57 @@ class TinyCase(dict, DictPath):
|
473
|
473
|
'round': a_round}
|
474
|
474
|
|
475
|
475
|
def hack(self, ruleset):
|
476
|
|
- """Apply action from each rule, if patterns match."""
|
|
476
|
+ """
|
|
477
|
+ Run any matching actions in the *ruleset*.
|
|
478
|
+
|
|
479
|
+ Each rule must be in in a following form:
|
|
480
|
+
|
|
481
|
+ {
|
|
482
|
+ 'drivers': [{}], # list of structures to match
|
|
483
|
+ # against self
|
|
484
|
+ 'argsets': [{}], # -ditto-
|
|
485
|
+ <action_name>: <Arg> # an action name with argument
|
|
486
|
+ <action_name>: <Arg> # another action...
|
|
487
|
+ }
|
|
488
|
+
|
|
489
|
+ Each of the rules is first evaluated for match (does it apply to this
|
|
490
|
+ TinyCase?), and if the rule applies, transformation is done. The
|
|
491
|
+ transformation is defined by `<action_name>: <Arg>` pairs and it can
|
|
492
|
+ alter 'oracle', 'result' or both.
|
|
493
|
+
|
|
494
|
+ The match evaluation is done using `hoover.dataMatch()` -- this is
|
|
495
|
+ basically a recursive pattern match against 'drivers' and 'argsets'.
|
|
496
|
+ Both 'drivers' and 'argsets' are optional, but when specified, all
|
|
497
|
+ items must must match in order for the rule to apply. (If 'drivers'
|
|
498
|
+ and 'argsets' are both missing or empty, rule will apply to each and
|
|
499
|
+ all test cases.)
|
|
500
|
+
|
|
501
|
+ If rule does not match, `TinyCase.hack()` moves on to next one.
|
|
502
|
+
|
|
503
|
+ If a rule does match, `TinyCase.hack()` will look for actions defined
|
|
504
|
+ in it. Action consists of action name (key of the rule dictionary,
|
|
505
|
+ <action_name>) and an argument (<Arg>).
|
|
506
|
+
|
|
507
|
+ Action name must be one of: 'remove', 'even_up', 'format_str',
|
|
508
|
+ 'exchange' or 'round'. Each action corresponds to a TinyCase method
|
|
509
|
+ prefixed by 'a_'; for example 'even_up' action corresponds to
|
|
510
|
+ TinyCase.a_even_up method. Each action expects different argument
|
|
511
|
+ so see the corresponding method docstrings.
|
|
512
|
+
|
|
513
|
+ Because 'oracle' and 'result' can be relatively complex structures,
|
|
514
|
+ actions accept Unix-like paths to specify elements inside them.
|
|
515
|
+ The "root" of the path is the TinyCase instance, and "directories"
|
|
516
|
+ are keys under it. For example, following would be valid paths
|
|
517
|
+ if test drivers work with dictionaries such as `{'temperature': 50,
|
|
518
|
+ 'stats': {'word_count': 15}}`:
|
|
519
|
+
|
|
520
|
+ /oracle/temperature
|
|
521
|
+ /result/stats/word_count
|
|
522
|
+
|
|
523
|
+ Warning: All actions will silently ignore any paths that are invalid
|
|
524
|
+ or leading to non-existent data!
|
|
525
|
+ (This does not apply to a path leading to `None`.)
|
|
526
|
+ """
|
477
|
527
|
|
478
|
528
|
def driver_matches(rule):
|
479
|
529
|
if 'drivers' not in rule:
|
|
@@ -548,36 +598,65 @@ class DriverDataError(Exception):
|
548
|
598
|
return result
|
549
|
599
|
|
550
|
600
|
|
551
|
|
-class BaseTestDriver(object):
|
|
601
|
+class BaseTestDriver:
|
552
|
602
|
"""Base class for test drivers used by `hoover.regression_test` and others.
|
553
|
603
|
|
554
|
|
- This class is used to create a test driver, which is an abstraction
|
555
|
|
- and encapsulation of the system being tested. Or, the driver in fact
|
556
|
|
- can be just a "mock" driver that provides data for comparison with
|
557
|
|
- a "real" driver.
|
558
|
|
-
|
559
|
|
- The minimum you need to create a working driver is to implement a working
|
560
|
|
- `self._get_data` method that sets `self.data`. Any exception from this
|
561
|
|
- method will be re-raised as DriverError with additional information.
|
562
|
|
-
|
563
|
|
- Also, you can set self.duration (in fractional seconds, as returned by
|
564
|
|
- standard time module) in the _get_data method, but if you don't, it is
|
565
|
|
- measured for you as time the method call took. This is useful if you
|
566
|
|
- need to fetch the data from some other driver or a gateway, and you
|
567
|
|
- have better mechanism to determine how long the action would take "in
|
568
|
|
- real life".
|
569
|
|
-
|
570
|
|
- For example, if we are testing a Java library using a Py4J gateway,
|
571
|
|
- we need to do some more conversions outside our testing code just to
|
572
|
|
- be able to use the data in our Python test. We don't want to include
|
573
|
|
- this in the "duration", since we are measuring the Java library, not the
|
574
|
|
- Py4J GW (or our ability to perform the conversions optimally). So we
|
575
|
|
- do our measurement within the Java machine and pass the result to the
|
576
|
|
- Python driver.
|
|
604
|
+ This class tepresents test driver and can be used to:
|
|
605
|
+
|
|
606
|
+ * Wrap system under test (SUT).
|
|
607
|
+
|
|
608
|
+ Provide simple interface to set up, sandbox and activate the system
|
|
609
|
+ and collect any relevant results. This can be merely return value
|
|
610
|
+ (purely functional test) but also other characteristics such as
|
|
611
|
+ time to complete.
|
|
612
|
+
|
|
613
|
+ * Mimic ("mock") the system under test.
|
|
614
|
+
|
|
615
|
+ Also called as oracle machine, this can be used to predict expected
|
|
616
|
+ behavior of SUT under given parameters.
|
|
617
|
+
|
|
618
|
+ * Wrap an alternative implementation of SUT.
|
|
619
|
+
|
|
620
|
+ As a special case of the previous role, sometimes it's desirable to
|
|
621
|
+ use an alternative implementation of SUT as oracle machine. This
|
|
622
|
+ can be a legacy implementation, reference implementation or other
|
|
623
|
+ platform implementation.
|
|
624
|
+
|
|
625
|
+ In either case, the driver makes sure that any input arguments are
|
|
626
|
+ interpreted (and passed on) correctly and any results are returned in
|
|
627
|
+ a consistent way.
|
|
628
|
+
|
|
629
|
+ To use this class, sub-class it and implement `_get_data()` method.
|
|
630
|
+ Tge `_get_data()` method must:
|
|
631
|
+
|
|
632
|
+ * Accept single argument; this contains arguments to the SUT.
|
|
633
|
+
|
|
634
|
+ If using `hoover.regression_test()`, this value will be retrieved
|
|
635
|
+ from the *argsrc* iterator.
|
|
636
|
+
|
|
637
|
+ * Implement the test case defined by the argument set.
|
|
638
|
+
|
|
639
|
+ The implementation can either be a wrapper to real SUT, alternative
|
|
640
|
+ one, or can be an oracle machine -- i.e. it can figure out the result
|
|
641
|
+ on its own. Note that this can be much easier as it sounds, given
|
|
642
|
+ that you can "cheat" by crafting the set of test cases so that the
|
|
643
|
+ prediction is easy (but still effective at hitting bugs), or you
|
|
644
|
+ can "hide the answer" in the *args* itself, and define set of
|
|
645
|
+ test cases statically in form of "question, answer" pairs.
|
|
646
|
+
|
|
647
|
+ * Collect any relevant data and set it to `data` property.
|
|
648
|
+
|
|
649
|
+ Optionally, you can also set `duration` property (in fractional
|
|
650
|
+ seconds, as returned by standard time module). If you don't
|
|
651
|
+ it will be automatically measured.
|
|
652
|
+
|
|
653
|
+ Any exception from the *_get_data* method will be re-raised as
|
|
654
|
+ DriverError.
|
577
|
655
|
|
578
|
656
|
Optionally, you can:
|
579
|
657
|
|
580
|
|
- * Make an __init__ and after calling base __init__, set
|
|
658
|
+ * Implement *__init__* method calling base __init__ and setting more
|
|
659
|
+ properties:
|
581
|
660
|
|
582
|
661
|
* `self._mandatory_args`, a list of keys that need to be present
|
583
|
662
|
in `args` argument to `run()`
|
|
@@ -585,7 +664,7 @@ class BaseTestDriver(object):
|
585
|
664
|
* and `self._mandatory_settings`, a list of keys that need to be
|
586
|
665
|
present in the `settings` argument to `__init__`
|
587
|
666
|
|
588
|
|
- * implement methods
|
|
667
|
+ * Implement methods
|
589
|
668
|
|
590
|
669
|
* `_decode_data` and `_normalize_data`, which are intended to decode
|
591
|
670
|
the data from any raw format it is received, and to prepare it
|
|
@@ -651,7 +730,7 @@ class BaseTestDriver(object):
|
651
|
730
|
|
652
|
731
|
def __cleanup_data(self):
|
653
|
732
|
"""remove hidden data; e.g. what was only there for _check_data"""
|
654
|
|
- for key in self.data.keys():
|
|
733
|
+ for key in self.data:
|
655
|
734
|
if key.startswith("_"):
|
656
|
735
|
del self.data[key]
|
657
|
736
|
|
|
@@ -677,17 +756,27 @@ class BaseTestDriver(object):
|
677
|
756
|
|
678
|
757
|
@classmethod
|
679
|
758
|
def check_values(cls, args=None):
|
680
|
|
- """check args in advance before running or setting up anything"""
|
|
759
|
+ """
|
|
760
|
+ Check args in advance before running or setting up anything.
|
|
761
|
+ """
|
681
|
762
|
for fn in cls.bailouts:
|
682
|
763
|
if fn(args):
|
683
|
764
|
raise NotImplementedError(inspect.getsource(fn))
|
684
|
765
|
|
685
|
766
|
def setup(self, settings, only_own=False):
|
686
|
|
- """Load settings. only_own means that only settings that belong to us
|
687
|
|
- are loaded ("DriverClass.settingName", the first discriminating part
|
688
|
|
- is removed)"""
|
|
767
|
+ """
|
|
768
|
+ Load settings.
|
|
769
|
+
|
|
770
|
+ If *only_own* is false, *settings* are merely assigned to
|
|
771
|
+ settings attribute.
|
|
772
|
+
|
|
773
|
+ if *only_own* is true, settings are filtered: Any keys that don't
|
|
774
|
+ begin with the prefix of driver class name and period are ignored.
|
|
775
|
+ Settings that do start with this prefix are assigned to settings
|
|
776
|
+ attribute with the prefix removed.
|
|
777
|
+ """
|
689
|
778
|
if only_own:
|
690
|
|
- for ckey in settings.keys():
|
|
779
|
+ for ckey in settings:
|
691
|
780
|
driver_class_name, setting_name = ckey.split(".", 2)
|
692
|
781
|
if self.__class__.__name__ == driver_class_name:
|
693
|
782
|
self._settings[setting_name] = settings[ckey]
|
|
@@ -696,7 +785,9 @@ class BaseTestDriver(object):
|
696
|
785
|
self._setup_ok = True
|
697
|
786
|
|
698
|
787
|
def run(self, args):
|
699
|
|
- """validate, run and store data"""
|
|
788
|
+ """
|
|
789
|
+ Validate args, run SUT and store data.
|
|
790
|
+ """
|
700
|
791
|
|
701
|
792
|
self._args = args
|
702
|
793
|
assert self._setup_ok, "run() before setup()?"
|
|
@@ -705,7 +796,7 @@ class BaseTestDriver(object):
|
705
|
796
|
start = time.time()
|
706
|
797
|
try:
|
707
|
798
|
self._get_data() # run the test, i.e. obtain raw data
|
708
|
|
- except StandardError as e:
|
|
799
|
+ except Exception as e:
|
709
|
800
|
raise DriverError(e, self)
|
710
|
801
|
self.duration = (time.time() - start if self.duration is None
|
711
|
802
|
else self.duration)
|
|
@@ -713,7 +804,7 @@ class BaseTestDriver(object):
|
713
|
804
|
self._decode_data() # decode raw data
|
714
|
805
|
self._normalize_data() # normalize decoded data
|
715
|
806
|
self._check_data() # perform arbitrarty checking
|
716
|
|
- except StandardError, e:
|
|
807
|
+ except Exception as e:
|
717
|
808
|
raise DriverDataError(e, self)
|
718
|
809
|
self.__cleanup_data() # cleanup (remove data['_*'])
|
719
|
810
|
|
|
@@ -729,8 +820,10 @@ class MockDriverTrue(BaseTestDriver):
|
729
|
820
|
# ## Helpers ## #
|
730
|
821
|
# ########################################################################### #
|
731
|
822
|
|
732
|
|
-class StatCounter(object):
|
733
|
|
- """A simple counter with formulas support."""
|
|
823
|
+class StatCounter:
|
|
824
|
+ """
|
|
825
|
+ A simple counter with support for custom formulas.
|
|
826
|
+ """
|
734
|
827
|
|
735
|
828
|
def __init__(self):
|
736
|
829
|
self.generic_stats = {}
|
|
@@ -791,8 +884,8 @@ class StatCounter(object):
|
791
|
884
|
)
|
792
|
885
|
|
793
|
886
|
def _computed_stats(self):
|
794
|
|
- computed = dict.fromkeys(self.formulas.keys())
|
795
|
|
- for fname, fml in self.formulas.iteritems():
|
|
887
|
+ computed = dict.fromkeys(self.formulas)
|
|
888
|
+ for fname, fml in self.formulas.items():
|
796
|
889
|
try:
|
797
|
890
|
v = fml(self.generic_stats, self.driver_stats)
|
798
|
891
|
except ZeroDivisionError:
|
|
@@ -801,18 +894,24 @@ class StatCounter(object):
|
801
|
894
|
return computed
|
802
|
895
|
|
803
|
896
|
def add_formula(self, vname, formula):
|
804
|
|
- """Add a function to work with generic_stats, driver_stats."""
|
|
897
|
+ """
|
|
898
|
+ Add a function to work with generic_stats, driver_stats.
|
|
899
|
+ """
|
805
|
900
|
self.formulas[vname] = formula
|
806
|
901
|
|
807
|
902
|
def add(self, vname, value):
|
808
|
|
- """Add a value to generic stat counter."""
|
|
903
|
+ """
|
|
904
|
+ Add a value to generic stat counter.
|
|
905
|
+ """
|
809
|
906
|
if vname in self.generic_stats:
|
810
|
907
|
self.generic_stats[vname] += value
|
811
|
908
|
else:
|
812
|
909
|
self.generic_stats[vname] = value
|
813
|
910
|
|
814
|
911
|
def add_for(self, dclass, vname, value):
|
815
|
|
- """Add a value to driver stat counter."""
|
|
912
|
+ """
|
|
913
|
+ Add a value to driver stat counter.
|
|
914
|
+ """
|
816
|
915
|
dname = dclass.__name__
|
817
|
916
|
if dname not in self.driver_stats:
|
818
|
917
|
self._register(dname)
|
|
@@ -822,25 +921,32 @@ class StatCounter(object):
|
822
|
921
|
self.driver_stats[dname][vname] = value
|
823
|
922
|
|
824
|
923
|
def count(self, vname):
|
825
|
|
- """Alias to add(vname, 1)"""
|
|
924
|
+ """
|
|
925
|
+ Alias to add(vname, 1)
|
|
926
|
+ """
|
826
|
927
|
self.add(vname, 1)
|
827
|
928
|
|
828
|
929
|
def count_for(self, dclass, vname):
|
829
|
|
- """Alias to add_for(vname, 1)"""
|
|
930
|
+ """
|
|
931
|
+ Alias to add_for(vname, 1)
|
|
932
|
+ """
|
830
|
933
|
self.add_for(dclass, vname, 1)
|
831
|
934
|
|
832
|
935
|
def all_stats(self):
|
833
|
|
- """Compute stats from formulas and add them to colledted data."""
|
|
936
|
+ """
|
|
937
|
+ Compute stats from formulas and add them to colledted data.
|
|
938
|
+ """
|
834
|
939
|
stats = self.generic_stats
|
835
|
|
- for dname, dstats in self.driver_stats.iteritems():
|
836
|
|
- for key, value in dstats.iteritems():
|
|
940
|
+ for dname, dstats in self.driver_stats.items():
|
|
941
|
+ for key, value in dstats.items():
|
837
|
942
|
stats[dname + "_" + key] = value
|
838
|
943
|
stats.update(self._computed_stats())
|
839
|
944
|
return stats
|
840
|
945
|
|
841
|
946
|
|
842
|
947
|
class Tracker(dict):
|
843
|
|
- """Error tracker to allow for usable reports from huge regression tests.
|
|
948
|
+ """
|
|
949
|
+ Error tracker to allow for usable reports from huge regression tests.
|
844
|
950
|
|
845
|
951
|
Best used as a result bearer from `regression_test`, this class keeps
|
846
|
952
|
a simple in-memory "database" of errors seen during the regression
|
|
@@ -855,13 +961,14 @@ class Tracker(dict):
|
855
|
961
|
a dict) that caused the error.
|
856
|
962
|
|
857
|
963
|
If boolean value of the result is False, the object is thrown away
|
858
|
|
- and nothing happen. Otherwise, its string value is used as a key
|
|
964
|
+ and nothing happens. Otherwise, its string value is used as a key
|
859
|
965
|
under which the argument set is saved.
|
860
|
966
|
|
861
|
|
- As you can see, the string is supposed to be ''as deterministic
|
862
|
|
- as possible'', i.e. it should provide as little information
|
863
|
|
- about the error as is necessary. Do not include any timestamps
|
864
|
|
- or "volatile" values.
|
|
967
|
+ The string interpretation of the result is supposed to be
|
|
968
|
+ "as deterministic as possible", i.e. it should provide only
|
|
969
|
+ necessary information about the error: do not include any
|
|
970
|
+ timestamps or "volatile" values such as PID's, version numbers
|
|
971
|
+ or tempfile names.
|
865
|
972
|
|
866
|
973
|
3. At final stage, you can retrieve statistics as how many (distinct)
|
867
|
974
|
errors have been recorded, what was the duration of the whole test,
|
|
@@ -891,21 +998,29 @@ class Tracker(dict):
|
891
|
998
|
self.driver_stats = {}
|
892
|
999
|
|
893
|
1000
|
def _csv_fname(self, errstr, prefix):
|
894
|
|
- """Format name of file for this error string"""
|
|
1001
|
+ """
|
|
1002
|
+ Format name of file for this error string
|
|
1003
|
+ """
|
895
|
1004
|
return '%s/%s.csv' % (prefix, self._eid(errstr))
|
896
|
1005
|
|
897
|
1006
|
def _eid(self, errstr):
|
898
|
|
- """Return EID for the error string (first 7 chars of SHA1)."""
|
|
1007
|
+ """
|
|
1008
|
+ Return EID for the error string (first 7 chars of SHA1).
|
|
1009
|
+ """
|
899
|
1010
|
return hashlib.sha1(errstr).hexdigest()[:7]
|
900
|
1011
|
|
901
|
1012
|
def _insert(self, errstr, argset):
|
902
|
|
- """Insert the argset into DB."""
|
|
1013
|
+ """
|
|
1014
|
+ Insert the argset into DB.
|
|
1015
|
+ """
|
903
|
1016
|
if errstr not in self._db:
|
904
|
1017
|
self._db[errstr] = []
|
905
|
1018
|
self._db[errstr].append(argset)
|
906
|
1019
|
|
907
|
1020
|
def _format_error(self, errstr, max_aa=0):
|
908
|
|
- """Format single error for output."""
|
|
1021
|
+ """
|
|
1022
|
+ Format single error for output.
|
|
1023
|
+ """
|
909
|
1024
|
argsets_affected = self._db[errstr]
|
910
|
1025
|
num_aa = len(argsets_affected)
|
911
|
1026
|
|
|
@@ -928,11 +1043,15 @@ class Tracker(dict):
|
928
|
1043
|
#
|
929
|
1044
|
|
930
|
1045
|
def errors_found(self):
|
931
|
|
- """Return number of non-distinct errors in db."""
|
|
1046
|
+ """
|
|
1047
|
+ Return number of non-distinct errors in db.
|
|
1048
|
+ """
|
932
|
1049
|
return bool(self._db)
|
933
|
1050
|
|
934
|
1051
|
def format_report(self, max_aa=0):
|
935
|
|
- """Return complete report formatted as string."""
|
|
1052
|
+ """
|
|
1053
|
+ Return complete report formatted as string.
|
|
1054
|
+ """
|
936
|
1055
|
error_list = "\n".join([self._format_error(e, max_aa=max_aa)
|
937
|
1056
|
for e in self._db])
|
938
|
1057
|
return ("Found %(total_errors)s (%(distinct_errors)s distinct) errors"
|
|
@@ -942,20 +1061,28 @@ class Tracker(dict):
|
942
|
1061
|
+ "\n\n" + error_list)
|
943
|
1062
|
|
944
|
1063
|
def getstats(self):
|
945
|
|
- """Return basic and driver stats
|
|
1064
|
+ """
|
|
1065
|
+ Return basic and driver stats
|
946
|
1066
|
|
947
|
|
- argsets_done - this should must be raised by outer code,
|
948
|
|
- once per each unique argset
|
949
|
|
- tests_done - how many times Tracker.update() was called
|
950
|
|
- distinct_errors - how many distinct errors (same `str(error)`)
|
|
1067
|
+ Returns dictionary with following values:
|
|
1068
|
+
|
|
1069
|
+ 'tests_done' - how many times Tracker.update() was called
|
|
1070
|
+
|
|
1071
|
+ 'distinct_errors' - how many distinct errors (same `str(error)`)
|
951
|
1072
|
were seen by Tracker.update()
|
952
|
|
- total_errors - how many times `Tracker.update()` saw an
|
|
1073
|
+
|
|
1074
|
+ 'total_errors' - how many times `Tracker.update()` saw an
|
953
|
1075
|
error, i.e. how many argsets are in DB
|
954
|
|
- time - how long since init (seconds)
|
|
1076
|
+
|
|
1077
|
+ 'time' - how long since init (seconds)
|
955
|
1078
|
"""
|
956
|
1079
|
|
957
|
1080
|
def total_errors():
|
958
|
|
- return reduce(lambda x, y: x + len(y), self._db.values(), 0)
|
|
1081
|
+ return functools.reduce(
|
|
1082
|
+ lambda x, y: x + len(y),
|
|
1083
|
+ self._db.values(),
|
|
1084
|
+ initial=0,
|
|
1085
|
+ )
|
959
|
1086
|
|
960
|
1087
|
stats = {
|
961
|
1088
|
"argsets": self.argsets_done,
|
|
@@ -968,7 +1095,8 @@ class Tracker(dict):
|
968
|
1095
|
return stats
|
969
|
1096
|
|
970
|
1097
|
def update(self, error, argset):
|
971
|
|
- """Update tracker with test result.
|
|
1098
|
+ """
|
|
1099
|
+ Update tracker with test result.
|
972
|
1100
|
|
973
|
1101
|
If `bool(error)` is true, it is considered error and argset
|
974
|
1102
|
is inserted to DB with `str(error)` as key. This allows for later
|
|
@@ -980,7 +1108,9 @@ class Tracker(dict):
|
980
|
1108
|
self._insert(errstr, argset)
|
981
|
1109
|
|
982
|
1110
|
def write_stats_csv(self, fname):
|
983
|
|
- """Write stats to a simple one row (plus header) CSV."""
|
|
1111
|
+ """
|
|
1112
|
+ Write stats to a simple one row (plus header) CSV.
|
|
1113
|
+ """
|
984
|
1114
|
stats = self.getstats()
|
985
|
1115
|
colnames = sorted(stats.keys())
|
986
|
1116
|
with open(fname, 'a') as fh:
|
|
@@ -989,18 +1119,20 @@ class Tracker(dict):
|
989
|
1119
|
cw.writerow(stats)
|
990
|
1120
|
|
991
|
1121
|
def write_args_csv(self, prefix=''):
|
992
|
|
- """Write out a set of CSV files, one per distinctive error.
|
|
1122
|
+ """
|
|
1123
|
+ Write out a set of CSV files, one per distinctive error.
|
993
|
1124
|
|
994
|
1125
|
Each CSV is named with error EID (first 7 chars of SHA1) and lists
|
995
|
1126
|
all argument sets affected by this error. This is supposed to make
|
996
|
1127
|
easier to further analyse impact and trigerring values of errors,
|
997
|
|
- perhaps using a table processor software."""
|
|
1128
|
+ perhaps using a table processor software.
|
|
1129
|
+ """
|
998
|
1130
|
|
999
|
1131
|
def get_all_colnames():
|
1000
|
1132
|
cn = {}
|
1001
|
|
- for affected in self._db.itervalues():
|
|
1133
|
+ for affected in self._db.values():
|
1002
|
1134
|
for argset in affected:
|
1003
|
|
- cn.update(dict.fromkeys(argset.keys()))
|
|
1135
|
+ cn.update(dict.fromkeys(argset))
|
1004
|
1136
|
return sorted(cn.keys())
|
1005
|
1137
|
|
1006
|
1138
|
all_colnames = get_all_colnames()
|
|
@@ -1013,41 +1145,45 @@ class Tracker(dict):
|
1013
|
1145
|
cw.writerow(argset)
|
1014
|
1146
|
|
1015
|
1147
|
|
1016
|
|
-def dataMatch(pattern, data, rmax=10, _r=0):
|
|
1148
|
+def dataMatch(pattern, data):
|
1017
|
1149
|
"""Check if data structure matches a pattern data structure.
|
1018
|
1150
|
|
1019
|
1151
|
Supports lists, dictionaries and scalars (int, float, string).
|
1020
|
1152
|
|
1021
|
|
- For scalars, simple `==` is used. Lists are converted to sets and
|
1022
|
|
- "to match" means "to have a matching subset (e.g. `[1, 2, 3, 4]`
|
1023
|
|
- matches `[3, 2]`). Both lists and dictionaries are matched recursively.
|
|
1153
|
+ For scalars, simple `==` is used.
|
|
1154
|
+
|
|
1155
|
+ Lists are converted to sets and "to match" means "to have a matching
|
|
1156
|
+ subset (e.g. `[1, 2, 3, 4]` matches `[3, 2]`).
|
|
1157
|
+
|
|
1158
|
+ Both lists and dictionaries are matched recursively.
|
1024
|
1159
|
"""
|
1025
|
1160
|
|
1026
|
1161
|
def listMatch(pattern, data):
|
1027
|
|
- """Match list-like objects"""
|
|
1162
|
+ """
|
|
1163
|
+ Match list-like objects
|
|
1164
|
+ """
|
1028
|
1165
|
assert all([hasattr(o, 'append') for o in [pattern, data]])
|
1029
|
1166
|
results = []
|
1030
|
1167
|
for pv in pattern:
|
1031
|
|
- if any([dataMatch(pv, dv, _r=_r+1) for dv in data]):
|
|
1168
|
+ if any([dataMatch(pv, dv) for dv in data]):
|
1032
|
1169
|
results.append(True)
|
1033
|
1170
|
else:
|
1034
|
1171
|
results.append(False)
|
1035
|
1172
|
return all(results)
|
1036
|
1173
|
|
1037
|
1174
|
def dictMatch(pattern, data):
|
1038
|
|
- """Match dict-like objects"""
|
|
1175
|
+ """
|
|
1176
|
+ Match dict-like objects
|
|
1177
|
+ """
|
1039
|
1178
|
assert all([hasattr(o, 'iteritems') for o in [pattern, data]])
|
1040
|
1179
|
results = []
|
1041
|
1180
|
try:
|
1042
|
|
- for pk, pv in pattern.iteritems():
|
1043
|
|
- results.append(dataMatch(pv, data[pk], _r=_r+1))
|
|
1181
|
+ for pk, pv in pattern.items():
|
|
1182
|
+ results.append(dataMatch(pv, data[pk]))
|
1044
|
1183
|
except KeyError:
|
1045
|
1184
|
results.append(False)
|
1046
|
1185
|
return all(results)
|
1047
|
1186
|
|
1048
|
|
- if _r == rmax:
|
1049
|
|
- raise RuntimeError("recursion limit hit")
|
1050
|
|
-
|
1051
|
1187
|
result = None
|
1052
|
1188
|
if pattern == data:
|
1053
|
1189
|
result = True
|
|
@@ -1061,13 +1197,16 @@ def dataMatch(pattern, data, rmax=10, _r=0):
|
1061
|
1197
|
|
1062
|
1198
|
|
1063
|
1199
|
def jsDump(data):
|
1064
|
|
- """A human-readable JSON dump."""
|
|
1200
|
+ """
|
|
1201
|
+ A human-readable JSON dump.
|
|
1202
|
+ """
|
1065
|
1203
|
return json.dumps(data, sort_keys=True, indent=4,
|
1066
|
1204
|
separators=(',', ': '))
|
1067
|
1205
|
|
1068
|
1206
|
|
1069
|
1207
|
def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
|
1070
|
|
- """JSON-based human-readable diff of two data structures.
|
|
1208
|
+ """
|
|
1209
|
+ JSON-based human-readable diff of two data structures.
|
1071
|
1210
|
|
1072
|
1211
|
'''BETA''' version.
|
1073
|
1212
|
|
|
@@ -1140,7 +1279,7 @@ def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
|
1140
|
1279
|
def is_hdr_B(line):
|
1141
|
1280
|
return line.startswith("+++")
|
1142
|
1281
|
|
1143
|
|
- class Level(object):
|
|
1282
|
+ class Level:
|
1144
|
1283
|
|
1145
|
1284
|
def __init__(self, hint):
|
1146
|
1285
|
self.hint = hint
|
|
@@ -1154,7 +1293,7 @@ def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
|
1154
|
1293
|
self.hinted = True
|
1155
|
1294
|
return self.hint
|
1156
|
1295
|
|
1157
|
|
- class ContextTracker(object):
|
|
1296
|
+ class ContextTracker:
|
1158
|
1297
|
|
1159
|
1298
|
def __init__(self):
|
1160
|
1299
|
self.trace = []
|
|
@@ -1212,7 +1351,7 @@ def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
|
1212
|
1351
|
buffb.append(line)
|
1213
|
1352
|
|
1214
|
1353
|
else:
|
1215
|
|
- raise AssertionError("difflib.unified_diff emited"
|
|
1354
|
+ raise AssertionError("difflib.unified_diff emitted"
|
1216
|
1355
|
" unknown format (%s chars):\n%s"
|
1217
|
1356
|
% (len(line), line))
|
1218
|
1357
|
|
|
@@ -1227,34 +1366,32 @@ def jsDiff(dira, dirb, namea="A", nameb="B", chara="a", charb="b"):
|
1227
|
1366
|
return "\n".join(compress([line for line in udiff]))
|
1228
|
1367
|
|
1229
|
1368
|
|
1230
|
|
-class Cartman(object):
|
1231
|
|
- """Create argument sets from ranges (or ay iterators) of values.
|
|
1369
|
+class Cartman:
|
|
1370
|
+ """
|
|
1371
|
+ Create argument sets from ranges (or ay iterators) of values.
|
1232
|
1372
|
|
1233
|
1373
|
This class is to enable easy definition and generation of dictionary
|
1234
|
|
- argument sets using Cartesian product. You only need to define:
|
1235
|
|
-
|
1236
|
|
- * structure of argument set (can be more than just flat dict)
|
|
1374
|
+ argument sets using Cartesian product.
|
1237
|
1375
|
|
1238
|
|
- * ranges, or arbitrary iterators of values on each "leaf" of the
|
1239
|
|
- argument set
|
|
1376
|
+ To use Cartman iterator, you need to define structure of an argument
|
|
1377
|
+ set. Argument set--typically a dictionary--is a set of values that
|
|
1378
|
+ together constitute a test case. Within the argument set, values
|
|
1379
|
+ will change from test case to test case, so for each changing value,
|
|
1380
|
+ you will also need to define range of values you want to test on.
|
1240
|
1381
|
|
1241
|
|
- Since there is expectation that any argument can have any kind of values
|
1242
|
|
- even another iterables, the pure logic "iterate it if you can"
|
1243
|
|
- is insufficient. Instead, definition is divided in two parts:
|
|
1382
|
+ Cartman initiator expects following arguments:
|
1244
|
1383
|
|
1245
|
|
- * scheme, which is a "prototype" of a final argument set, except
|
1246
|
|
- that for each value that will change, a `Cartman.Iterable`
|
1247
|
|
- sentinel is used. For each leaf that is constant, `Cartman.Scalar`
|
1248
|
|
- is used
|
|
1384
|
+ * *scheme*, which is a "prototype" of a final argument set, except
|
|
1385
|
+ that values are replaced by either `Cartman.Iterable` if the
|
|
1386
|
+ value is changing from test case to another, and `Cartman.Scalar`
|
|
1387
|
+ if the value is constant.
|
1249
|
1388
|
|
1250
|
|
- * source, which has the same structure, except that where in scheme
|
1251
|
|
- is `Iterable`, an iterable object is expected, whereas in places
|
1252
|
|
- where `Scalar` is used, a value is assigned that does not change
|
1253
|
|
- during iteration.
|
|
1389
|
+ * *source*, which has the same structure, except that where in scheme
|
|
1390
|
+ is `Cartman.Iterable`, the source has an iterable. Where scheme has
|
|
1391
|
+ `Cartman.Scalar`, the source can have any value.
|
1254
|
1392
|
|
1255
|
|
- Finally, when such instance is used in loop, argument sets are generated
|
1256
|
|
- uising Cartesian product of each iterable found. This allows for
|
1257
|
|
- relatively easy definition of complex scenarios.
|
|
1393
|
+ Finally, when Cartman instance is used in loop, it uses Cartesian product
|
|
1394
|
+ in order to generate argument sets.
|
1258
|
1395
|
|
1259
|
1396
|
Consider this example:
|
1260
|
1397
|
|
|
@@ -1310,12 +1447,11 @@ class Cartman(object):
|
1310
|
1447
|
optimization became possible based on what was used.
|
1311
|
1448
|
"""
|
1312
|
1449
|
|
1313
|
|
-
|
1314
|
1450
|
# TODO: support for arbitrary ordering (profile / nginx)
|
1315
|
1451
|
# TODO: implement getstats and fmtstats
|
1316
|
1452
|
# TODO: N-wise
|
1317
|
1453
|
|
1318
|
|
- class _BaseMark(object):
|
|
1454
|
+ class _BaseMark:
|
1319
|
1455
|
pass
|
1320
|
1456
|
|
1321
|
1457
|
class Scalar(_BaseMark):
|
|
@@ -1324,13 +1460,9 @@ class Cartman(object):
|
1324
|
1460
|
class Iterable(_BaseMark):
|
1325
|
1461
|
pass
|
1326
|
1462
|
|
1327
|
|
- def __init__(self, source, scheme, recursion_limit=10, _r=0):
|
|
1463
|
+ def __init__(self, source, scheme):
|
1328
|
1464
|
self.source = source
|
1329
|
1465
|
self.scheme = scheme
|
1330
|
|
- self.recursion_limit = recursion_limit
|
1331
|
|
- self._r = _r
|
1332
|
|
- if self._r > self.recursion_limit:
|
1333
|
|
- raise RuntimeError("recursion limit exceeded")
|
1334
|
1466
|
|
1335
|
1467
|
# validate scheme + source and throw useful error
|
1336
|
1468
|
scheme_ok = isinstance(self.scheme, collections.Mapping)
|
|
@@ -1366,16 +1498,14 @@ class Cartman(object):
|
1366
|
1498
|
elif self._means_iterable(subscheme):
|
1367
|
1499
|
return subsource
|
1368
|
1500
|
else: # try to use it as scheme
|
1369
|
|
- return iter(Cartman(subsource, subscheme, _r=self._r+1))
|
|
1501
|
+ return iter(Cartman(subsource, subscheme))
|
1370
|
1502
|
|
1371
|
1503
|
def __iter__(self):
|
1372
|
1504
|
|
1373
|
1505
|
names = []
|
1374
|
1506
|
iterables = []
|
1375
|
1507
|
|
1376
|
|
- keys = self.scheme.keys()
|
1377
|
|
-
|
1378
|
|
- for key in keys:
|
|
1508
|
+ for key in self.scheme:
|
1379
|
1509
|
try:
|
1380
|
1510
|
iterables.append(self._get_iterable_for(key))
|
1381
|
1511
|
except KeyError:
|