1
2
3
4
5
6
7
8 """
9 A driver for testing interactive python examples in text files and
10 docstrings. This doctest driver performs three functions:
11
12 - checking: Runs the interactive examples, and reports any examples
13 whose actual output does not match their expected output.
14
15 - debugging: Runs the interactive examples, and enters the debugger
16 whenever an example's actual output does not match its expected
17 output.
18
19 - updating: Runs the interactive examples, and replaces the expected
20 output with the actual output whenever they don't match. This is
21 used to update the output for new or out-of-date examples.
22
23 A number of other flags can be given; call the driver with the
24 `--help` option for a complete list.
25 """
26
27 import os, os.path, sys, unittest, pdb, bdb, re, tempfile, traceback
28 import textwrap
29 from doctest import *
30 from doctest import DocTestCase
31 from optparse import OptionParser, OptionGroup, Option
32 from StringIO import StringIO
33
34 __version__ = '0.1'
35
36
37
38
39
40
41
44 self.__out = out
45 pdb.Pdb.__init__(self)
46
52
54 excout = StringIO()
55 exc_type, exc_val, exc_tb = exc_info
56 traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
57 return excout.getvalue()
58
61 result = StringIO.getvalue(self)
62 if result and not result.endswith("\n"):
63 result += "\n"
64 if hasattr(self, "softspace"):
65 del self.softspace
66 return result
67
69 StringIO.truncate(self, size)
70 if hasattr(self, "softspace"):
71 del self.softspace
72
73
74
75
76
78 PYLISTING_RE = re.compile(r'''
79 (^\.\.[ ]*pylisting::[ ]?\S*\n # directive
80 (?:[ ]*\n| # blank line or
81 [ ]+.*\n)*) # indented line
82 ''', re.VERBOSE+re.MULTILINE)
83
84 PYLISTING_EX = re.compile(r'''
85 (?:^[^ ].*\n # non-blank line
86 (?:[ ]*\n | # blank line or
87 [ ]+.*\n)*) # indented line
88 ''', re.VERBOSE+re.MULTILINE)
89
90 DOCTEST_OPTION_RE = re.compile(r'''
91 ^[ ]*:\w+:.*\n # :option:
92 (.*\S.*)* # non-blank lines
93 ''', re.VERBOSE+re.MULTILINE)
94
95 - def parse(self, string, name='<string>'):
96 output = []
97 lineno_offset = 0
98
99 for piecenum, piece in enumerate(self.PYLISTING_RE.split(string)):
100 for example in DocTestParser.parse(self, piece, name):
101 if isinstance(example, Example):
102 example.lineno += lineno_offset
103 output.append(example)
104
105
106
107
108 elif piecenum%2 == 1 and example.strip():
109 output.append(example[:example.find('\n')])
110
111 pysrc = example[example.find('\n'):]
112 pysrc = self.DOCTEST_OPTION_RE.sub('', pysrc)
113 pysrc = textwrap.dedent(pysrc)
114
115 for ex in self.PYLISTING_EX.findall(pysrc):
116 source = ex.strip()
117 if not source: continue
118 want = ''
119 exc_msg = None
120 indent = 4
121 lineno = lineno_offset
122 options = self._find_options(source, name, lineno)
123 output.append(Example(source, want, exc_msg,
124 lineno, indent, options))
125 else:
126 output.append(example)
127
128 lineno_offset += piece.count('\n')
129
130
131
132
133
134
135
136
137 return output
138
140 examples = []
141 ignore = False
142
143 for x in self.parse(string, name):
144 if isinstance(x, Example):
145 if not ignore:
146 examples.append(x)
147 else:
148 print '.. doctest-ignore:: %s' % x.source.strip()[:50]
149 else:
150 if re.search(r'\.\.\s*doctest-ignore::?\s*$', x):
151 ignore = True
152 elif x.strip():
153 ignore = False
154 return examples
155
156
157
158
159
161 """
162 A subclass of `DocTestRunner` that checks the output of each
163 example, and replaces the expected output with the actual output
164 for any examples that fail.
165
166 `UpdateRunner` can be used:
167 - To automatically fill in the expected output for new examples.
168 - To correct examples whose output has become out-of-date.
169
170 However, care must be taken not to update an example's expected
171 output with an incorrect value.
172 """
173 - def __init__(self, verbose=False, mark_updates=False):
174 self._mark_updates = mark_updates
175 DocTestRunner.__init__(self, verbose=verbose)
176
177 - def run(self, test, compileflags=None, out=None, clear_globs=True):
178 self._new_want = {}
179 (f,t) = DocTestRunner.run(self, test, compileflags, out, clear_globs)
180
181
182
183
184 old_lines = test.docstring.split('\n')
185 new_lines = []
186 lineno = 0
187 offset = 0
188
189 for example in test.examples:
190
191
192 got_start = example.lineno + example.source.count('\n')
193 new_lines += old_lines[lineno:got_start]
194 lineno = got_start
195
196
197
198 if example.want:
199 assert (example.want.split('\n')[0] ==
200 old_lines[lineno][example.indent:]), \
201 'Line number mismatch at %d' % lineno
202
203 old_len = example.want.count('\n')
204 lineno += old_len
205
206 if self._mark_updates and example in self._new_want:
207 new_lines.append(' '*example.indent + '... ' +
208 '# [!!] OUTPUT AUTOMATICALLY UPDATED [!!]')
209
210 new_want = self._new_want.get(example, example.want)
211 if new_want:
212 new_want = '\n'.join([' '*example.indent+l
213 for l in new_want[:-1].split('\n')])
214 new_lines.append(new_want)
215
216 example.want = new_want
217 example.lineno += offset
218 offset += example.want.count('\n') - old_len
219
220 new_lines += old_lines[lineno:]
221
222
223 test.docstring = '\n'.join(new_lines)
224
225
226 return (f,t)
227
230
233
239
244
246 want = '\n'.join([' '+l for l in example.want.split('\n')[:-1]])
247 repl = '\n'.join([' '+l for l in replacement.split('\n')[:-1]])
248 if want and repl:
249 diff = 'Replacing:\n%s\nWith:\n%s\n' % (want, repl)
250 elif want:
251 diff = 'Removing:\n%s\n' % want
252 elif repl:
253 diff = 'Adding:\n%s\n' % repl
254 out(self._header(test, example) + diff)
255
256 DIVIDER = '-'*70
258 if test.filename is None:
259 tag = ("On line #%s of %s" %
260 (example.lineno+1, test.name))
261 elif test.lineno is None:
262 tag = ("On line #%s of %s in %s" %
263 (example.lineno+1, test.name, test.filename))
264 else:
265 lineno = test.lineno+example.lineno+1
266 tag = ("On line #%s of %s (%s)" %
267 (lineno, test.filename, test.name))
268 source_lines = example.source.rstrip().split('\n')
269 return (self.DIVIDER + '\n' + tag + '\n' +
270 ' >>> %s\n' % source_lines[0] +
271 ''.join([' ... %s\n' % l for l in source_lines[1:]]))
272
273
274
275
276
278 return re.sub('(?m)^(?!$)', indent*' ', s)
279
280 import keyword, token, tokenize
282
283 runner = DocTestRunner()
284
285 - def __init__(self, checker=None, set_trace=None):
286 if checker is None:
287 checker = OutputChecker()
288 self.checker = checker
289 if set_trace is None:
290 set_trace = pdb.Pdb().set_trace
291 self.set_trace = set_trace
292
304
306 want_exc_msg = example.exc_msg
307 optionflags = self._get_optionflags(example)
308 exc_info = sys.exc_info()
309 got_exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
310 if not self.checker.check_output(want_exc_msg, got_exc_msg,
311 optionflags):
312 got = _exception_traceback(exc_info)
313 self.runner.report_failure(self.save_stdout.write,
314 self.test, example, got)
315 return False
316 else:
317 return True
318
320 if args == (None,):
321 pass
322 elif len(args) == 1:
323 print `args[0]`
324 else:
325 print `args`
326
334
348
349 _CHK_OUT = 'if not CHECK_OUTPUT(__examples__[%d]): __set_trace__()'
350 _CHK_EXC = 'if not CHECK_EXCEPTION(__examples__[%d]): __set_trace__()'
351
364
366
367 lines = [0, 0]
368 pos = 0
369 while 1:
370 pos = s.find('\n', pos)+1
371 if not pos: break
372 lines.append(pos)
373 lines.append(len(s))
374
375 oldpos = 0
376 parenlevel = 0
377 deflevel = 0
378 output = []
379 stmt = []
380
381 text = StringIO(s)
382 tok_gen = tokenize.generate_tokens(text.readline)
383 for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
384 newpos = lines[srow] + scol
385 stmt.append(s[oldpos:newpos])
386 if tok != '':
387 stmt.append(tok)
388 oldpos = newpos + len(tok)
389
390
391 if tok in '([{':
392 parenlevel += 1
393 if tok in '}])':
394 parenlevel -= 1
395
396 if tok in ('def', 'class') and deflevel == 0:
397 deflevel = 1
398 if deflevel and toktype == token.INDENT:
399 deflevel += 1
400 if deflevel and toktype == token.DEDENT:
401 deflevel -= 1
402
403
404 if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
405 token.INDENT, token.ENDMARKER) or
406 tok==':') and parenlevel == 0):
407 if deflevel == 0 and self._is_expr(stmt[1:-2]):
408 output += stmt[0]
409 output.append('__print__((')
410 output += stmt[1:-2]
411 output.append('))')
412 output += stmt[-2:]
413 else:
414 output += stmt
415 stmt = []
416 return ''.join(output)
417
419 stmt = [t for t in stmt if t]
420 if not stmt:
421 return False
422
423
424
425 parenlevel = 0
426 for tok in stmt:
427 if tok in '([{': parenlevel += 1
428 if tok in '}])': parenlevel -= 1
429 if (parenlevel == 0 and
430 tok in ('=', '+=', '-=', '*=', '/=', '%=', '&=', '+=',
431 '^=', '<<=', '>>=', '**=', '//=')):
432 return False
433
434
435
436 if stmt[0] in ("assert", "break", "class", "continue", "def",
437 "del", "elif", "else", "except", "exec",
438 "finally", "for", "from", "global", "if",
439 "import", "pass", "print", "raise", "return",
440 "try", "while", "yield"):
441 return False
442 return True
443
445 optionflags = 0
446 for (flag, val) in example.options.items():
447 if val:
448 optionflags |= flag
449 else:
450 optionflags &= ~flag
451 return optionflags
452
453 - def debug(self, test, pm=False):
454 self.test = test
455
456
457 self.save_stdout = sys.stdout
458
459
460 script = self._script_from_examples(test.docstring)
461
462
463 debugger = _OutputRedirectingPdb(sys.stdout)
464
465
466
467 save_set_trace = pdb.set_trace
468 pdb.set_trace = debugger.set_trace
469
470
471
472
473
474
475 srcfilename = tempfile.mktemp(".py", "doctestdebug_")
476 f = open(srcfilename, 'w')
477 f.write(script)
478 f.close()
479
480
481 test.globs['CHECK_OUTPUT'] = self._check_output
482 test.globs['CHECK_EXCEPTION'] = self._check_exception
483 test.globs['__print__'] = self._print_if_not_none
484 test.globs['__set_trace__'] = debugger.set_trace
485 test.globs['__examples__'] = self.test.examples
486 try:
487 if pm is False:
488 debugger.run("execfile(%r)" % srcfilename,
489 test.globs, test.globs)
490 else:
491 try:
492 sys.stdout = _SpoofOut()
493 try:
494 execfile(srcfilename, test.globs)
495 except bdb.BdbQuit:
496 return
497 except:
498 sys.stdout = self.save_stdout
499 exc_info = sys.exc_info()
500 exc_msg = traceback.format_exception_only(
501 exc_info[0], exc_info[1])[-1]
502 self.save_stdout.write(self.runner.DIVIDER+'\n')
503 self.save_stdout.write('Unexpected exception:\n' +
504 _indent(exc_msg))
505 raise
506
507 finally:
508 sys.stdout = self.save_stdout
509 finally:
510 sys.set_trace = save_set_trace
511 os.remove(srcfilename)
512
513 - def post_mortem(self, debugger, t):
514 debugger.reset()
515 while t.tb_next is not None:
516 t = t.tb_next
517 debugger.interaction(t.tb_frame, t)
518
519
520
521
522
523
524
525
526
527
528
530
531 if ':' in name:
532 (name, testname) = name.split(':')
533 else:
534 testname = None
535
536 if os.path.exists(name):
537 filename = os.path.normpath(os.path.abspath(name))
538 ext = os.path.splitext(filename)[-1]
539 if (ext[-3:] != '.py' and ext[-4:-1] != '.py'):
540
541 if testname is not None:
542 raise ValueError("test names can't be specified "
543 "for text files")
544 s = open(filename).read()
545 test = MyDocTestParser().get_doctest(s, {}, name, filename, 0)
546 return [test]
547 else:
548
549
550 basedir, modname = find_module_from_filename(filename)
551 orig_path = sys.path[:]
552 try:
553 sys.path.insert(0, basedir)
554 module = import_from_name(modname)
555 finally:
556 sys.path[:] = orig_path
557 else:
558 module = import_from_name(name)
559
560
561 tests = DocTestFinder().find(module)
562 if testname is not None:
563 testname = '%s.%s' % (module.__name__, testname)
564 tests = [t for t in tests if t.name.startswith(testname)]
565 if len(tests) == 0:
566 raise ValueError("test not found")
567 return tests
568
570 try:
571 return __import__(name, globals(), locals(), ['*'])
572 except Exception, e:
573 raise ValueError, str(e)
574 except:
575 raise ValueError, 'Error importing %r' % name
576
578 """
579 Given a filename, return a tuple `(basedir, module)`, where
580 `module` is the module's name, and `basedir` is the directory it
581 should be loaded from (this directory should be added to the
582 path to import it). Packages are handled correctly.
583 """
584 (basedir, file) = os.path.split(filename)
585 (module_name, ext) = os.path.splitext(file)
586
587
588
589 if module_name == '__init__':
590 (basedir, module_name) = os.path.split(basedir)
591
592
593 if (os.path.exists(os.path.join(basedir, '__init__.py')) or
594 os.path.exists(os.path.join(basedir, '__init__.pyc')) or
595 os.path.exists(os.path.join(basedir, '__init__.pyw'))):
596 package = []
597 while os.path.exists(os.path.join(basedir, '__init__.py')):
598 (basedir,dir) = os.path.split(basedir)
599 if dir == '': break
600 package.append(dir)
601 package.reverse()
602 module_name = '.'.join(package+[module_name])
603
604 return (basedir, module_name)
605
606
607
608
609
610 -def run(names, optionflags, verbosity):
611 suite = unittest.TestSuite()
612 for name in names:
613 try:
614 for test in find(name):
615 suite.addTest(DocTestCase(test, optionflags))
616 except ValueError, e:
617 print >>sys.stderr, ('%s: Error processing %s -- %s' %
618 (sys.argv[0], name, e))
619 unittest.TextTestRunner(verbosity=verbosity).run(suite)
620
621 -def debug(names, optionflags, verbosity, pm=True):
631
632 -def update(names, optionflags, verbosity):
633 parser = MyDocTestParser()
634 runner = UpdateRunner(verbose=True)
635 for name in names:
636 try:
637
638 tests = find(name)
639 if len(tests) != 1 or tests[0].lineno != 0:
640 raise ValueError('update can only be used with text files')
641 test = tests[0]
642
643
644 (failures, tries) = runner.run(test)
645
646
647 if failures == 0:
648 print 'No updates needed!'
649 else:
650 print '*'*70
651 print '%d examples updated.' % failures
652 print '-'*70
653 sys.stdout.write('Accept updates? [y/N] ')
654 sys.stdout.flush()
655 if sys.stdin.readline().lower().strip() in ('y', 'yes'):
656
657 backup = test.filename+'.bak'
658 print 'Renaming %s -> %s' % (name, backup)
659 os.rename(test.filename, backup)
660
661 print 'Writing updated version to %s' % test.filename
662 out = open(test.filename, 'w')
663 out.write(test.docstring)
664 out.close()
665 else:
666 print 'Updates rejected!'
667 except ValueError, e:
668 raise
669 print >>sys.stderr, ('%s: Error processing %s -- %s' %
670 (sys.argv[0], name, e))
671
672
673
674
675
676
677 CHECK_OPT = Option("--check",
678 action="store_const", dest="action", const="check",
679 default="check",
680 help="Verify the output of the doctest examples in the "
681 "given files.")
682
683 UPDATE_OPT = Option("--update", "-u",
684 action="store_const", dest="action", const="update",
685 help="Update the expected output for new or out-of-date "
686 "doctest examples in the given files. In "
687 "particular, find every example whose actual output "
688 "does not match its expected output; and replace its "
689 "expected output with its actual output. You will "
690 "be asked to verify the changes before they are "
691 "written back to the file; be sure to check them over "
692 "carefully, to ensure that you don't accidentally "
693 "create broken test cases.")
694
695 DEBUG_OPT = Option("--debug",
696 action="store_const", dest="action", const="debug",
697 help="Verify the output of the doctest examples in the "
698 "given files. If any example fails, then enter the "
699 "python debugger.")
700
701
702 VERBOSE_OPT = Option("-v", "--verbose",
703 action="count", dest="verbosity", default=1,
704 help="Increase verbosity.")
705
706 QUIET_OPT = Option("-q", "--quiet",
707 action="store_const", dest="verbosity", const=0,
708 help="Decrease verbosity.")
709
710 UDIFF_OPT = Option("--udiff", '-d',
711 action="store_const", dest="udiff", const=1, default=0,
712 help="Display test failures using unified diffs.")
713
714 CDIFF_OPT = Option("--cdiff",
715 action="store_const", dest="cdiff", const=1, default=0,
716 help="Display test failures using context diffs.")
717
718 NDIFF_OPT = Option("--ndiff",
719 action="store_const", dest="ndiff", const=1, default=0,
720 help="Display test failures using ndiffs.")
721
722
723 ELLIPSIS_OPT = Option("--ellipsis",
724 action="store_const", dest="ellipsis", const=1, default=0,
725 help="Allow \"...\" to be used for ellipsis in the "
726 "expected output.")
727 NORMWS_OPT = Option("--normalize_whitespace",
728 action="store_const", dest="normws", const=1, default=0,
729 help="Ignore whitespace differences between "
730 "the expected output and the actual output.")
731
733
734 optparser = OptionParser(usage='%prog [options] NAME ...',
735 version="Edloper's Doctest Driver, "
736 "version %s" % __version__)
737
738 action_group = OptionGroup(optparser, 'Actions (default=check)')
739 action_group.add_options([CHECK_OPT, UPDATE_OPT, DEBUG_OPT])
740 optparser.add_option_group(action_group)
741
742 reporting_group = OptionGroup(optparser, 'Reporting')
743 reporting_group.add_options([VERBOSE_OPT, QUIET_OPT,
744 UDIFF_OPT, CDIFF_OPT, NDIFF_OPT])
745 optparser.add_option_group(reporting_group)
746
747 compare_group = OptionGroup(optparser, 'Output Comparison')
748 compare_group.add_options([ELLIPSIS_OPT, NORMWS_OPT])
749 optparser.add_option_group(compare_group)
750
751
752 optionvals, names = optparser.parse_args()
753 if len(names) == 0:
754 optparser.error("No files specified")
755 optionflags = (optionvals.udiff * REPORT_UDIFF |
756 optionvals.cdiff * REPORT_CDIFF |
757 optionvals.ellipsis * ELLIPSIS |
758 optionvals.normws * NORMALIZE_WHITESPACE)
759
760
761 if optionvals.action == 'check':
762 run(names, optionflags, optionvals.verbosity)
763 elif optionvals.action == 'update':
764 update(names, optionflags, optionvals.verbosity)
765 elif optionvals.action == 'debug':
766 debug(names, optionflags, optionvals.verbosity)
767 else:
768 optparser.error('INTERNAL ERROR: Bad action %s' % optionvals.action)
769
770 if __name__ == '__main__': main()
771