if not isinstance(valname, str):
raise ValueError("DocTestFinder.find: __test__ keys "
if not (inspect.isroutine(val) or inspect.isclass(val) or
inspect.ismodule(val) or isinstance(val, str)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isroutine(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
def _get_test(self, obj, name, module, globs, source_lines):
Return a DocTest for the given object, if it defines a docstring;
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
# Return a DocTest for this object.
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] == ".pyc":
return self._parser.get_doctest(docstring, globs, name,
def _find_lineno(self, obj, source_lines):
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
# Find the line number for modules.
if inspect.ismodule(obj):
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = obj.__code__
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
pat = re.compile(r'(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
# We couldn't find the line number.
######################################################################
######################################################################
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
... print(test.name, '->', runner.run(test))
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
def __init__(self, checker=None, verbose=None, optionflags=0):
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
self._checker = checker or OutputChecker()
verbose = '-v' in sys.argv
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
out('Trying:\n' + _indent(example.source) +
def report_success(self, out, test, example, got):
Report that the given example ran successfully. (Only
displays a message if verbose=True)
def report_failure(self, out, test, example, got):
Report that the given example failed.
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
Report that the given example raised an unexpected exception.
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
out.append(_indent(source))
#/////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
# Keep track of the number of failures and tries.
# Save the option flags (since option directives can be used
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
# Merge in the example's options.
self.optionflags = original_optionflags
for (optionflag, val) in example.options.items():
self.optionflags |= optionflag
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
# Record that we started this example.
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
except KeyboardInterrupt:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
if check(example.want, got, self.optionflags):
# The example raised an exception: check if it was expected.
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
if example.exc_msg is None:
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.report_success(out, test, example, got)
self.report_failure(out, test, example, got)
self.report_unexpected_exception(out, test, example,
assert False, ("unknown outcome", outcome)
if failures and self.optionflags & FAIL_FAST:
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(keepends=True)
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
Run the examples in `test`, and display the results using the
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
compileflags = _extract_future_flags(test.globs)
encoding = save_stdout.encoding
if encoding is None or encoding.lower() == 'utf-8':
# Use backslashreplace error handling on write
s = str(s.encode(encoding, 'backslashreplace'), encoding)
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_trace = sys.gettrace()
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
return self.__run(test, compileflags, out)
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
#/////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.