Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
module_lines = []
for example in enabled_examples:
# Create a unit-testable function for this example
func_name = 'test_' + example.callname.replace('.', '_')
body_lines = []
for part in example._parts:
body_part = part.format_src(linenos=False, want=False,
prefix=False, colored=False,
partnos=False)
if part.want:
want_text = '# doctest want:\n'
want_text += utils.indent(part.want, '# ')
body_part += '\n' + want_text
body_lines.append(body_part)
body = '\n'.join(body_lines)
func_text = 'def {}():\n'.format(func_name) + utils.indent(body)
module_lines.append(func_text)
module_text = '\n\n\n'.join(module_lines)
return module_text
fail_part_lines = []
after_parts_lines = []
temp = [before_part_lines, fail_part_lines, after_parts_lines]
tindex = 0
indent_text = ' ' * (5 + n_digits)
for partx, (part, part_text) in enumerate(zip(self._parts, textgen)):
if part in self._skipped_parts:
# temp[tindex] += [utils.indent(part_text, ' ' * 4)]
# temp[tindex] += [utils.indent(' >>> # skipped', indent_text)]
continue
part_out = r1_strip_nl(self.logged_stdout.get(partx, ''))
if part is self.failed_part:
tindex += 1
# Append the part source code
temp[tindex] += [utils.indent(part_text, ' ' * 4)]
# Append the part stdout (if it exists)
if part_out:
temp[tindex] += [utils.indent(part_out, indent_text)]
if part is self.failed_part:
tindex += 1
# part_eval = self.logged_evals[partx]
# if part_eval is not NOT_EVALED:
# temp[tindex] += [repr(part_eval)]
lines += [self._color(self._block_prefix + ' PART BREAKDOWN', 'white')]
if before_part_lines:
lines += ['Passed Parts:']
lines += before_part_lines
if fail_part_lines:
lines += ['Failed Part:']
"""
TODO: run case is over-duplicated and should be separated into a test utils directory
"""
from xdoctest import utils
from xdoctest import runner
COLOR = 'yellow'
def cprint(msg, color=COLOR):
print(utils.color_text(str(msg), COLOR))
cprint('\n\n'
'\n '
'\n ======== '
'\n', COLOR)
cprint('CASE SOURCE:')
cprint('------------')
print(utils.indent(
utils.add_line_numbers(utils.highlight_code(source, 'python'))))
print('')
import hashlib
hasher = hashlib.sha1()
hasher.update(source.encode('utf8'))
hashid = hasher.hexdigest()[0:8]
with utils.TempDir() as temp:
dpath = temp.dpath
modpath = join(dpath, 'test_linenos_' + hashid + '.py')
with open(modpath, 'w') as file:
file.write(source)
"""
''')
temp = utils.TempDir(persist=True)
temp.ensure()
dpath = temp.dpath
modpath = join(dpath, 'demo_runner_syntax_error.py')
with open(modpath, 'w') as file:
file.write(source)
with utils.CaptureStdout() as cap:
runner.doctest_module(modpath, 'all', argv=[''], style='freeform',
verbose=1)
print('CAPTURED [[[[[[[[')
print(utils.indent(cap.text))
print(']]]]]]]] # CAPTURED')
if six.PY2:
captext = utils.ensure_unicode(cap.text)
else:
captext = cap.text
if True or not six.PY2: # Why does this have issues on the dashboards?
assert '1 run-time warnings' in captext
assert '2 parse-time warnings' in captext
# Assert summary line
assert '3 warnings' in captext
assert '2 failed' in captext
assert '1 passed' in captext
for warn_idx, warn in enumerate(parse_warnlist, start=1):
cprint('--- Parse Warning: {} / {} ---'.format(
warn_idx, len(parse_warnlist)), 'yellow')
_log(utils.indent(
warnings.formatwarning(warn.message, warn.category,
warn.filename, warn.lineno)))
# report run-time warnings
if warned:
cprint('\n=== Found {} run-time warnings ==='.format(len(warned)), 'yellow')
for warn_idx, example in enumerate(warned, start=1):
cprint('--- Runtime Warning: {} / {} ---'.format(warn_idx, len(warned)),
'yellow')
_log('example = {!r}'.format(example))
for warn in example.warn_list:
_log(utils.indent(
warnings.formatwarning(warn.message, warn.category,
warn.filename, warn.lineno)))
if failed and len(enabled_examples) > 1:
# If there is more than one test being run, _log out all the
# errors that occured so they are consolidated in a single place.
cprint('\n=== Found {} errors ==='.format(len(failed)), 'red')
for fail_idx, example in enumerate(failed, start=1):
cprint('--- Error: {} / {} ---'.format(fail_idx, len(failed)), 'red')
_log(utils.indent('\n'.join(example.repr_failure())))
# Print command lines to re-run failed tests
if failed:
cprint('\n=== Failed tests ===', 'red')
for example in failed:
_log(example.cmdline)
tindex = 0
indent_text = ' ' * (5 + n_digits)
for partx, (part, part_text) in enumerate(zip(self._parts, textgen)):
if part in self._skipped_parts:
# temp[tindex] += [utils.indent(part_text, ' ' * 4)]
# temp[tindex] += [utils.indent(' >>> # skipped', indent_text)]
continue
part_out = r1_strip_nl(self.logged_stdout.get(partx, ''))
if part is self.failed_part:
tindex += 1
# Append the part source code
temp[tindex] += [utils.indent(part_text, ' ' * 4)]
# Append the part stdout (if it exists)
if part_out:
temp[tindex] += [utils.indent(part_out, indent_text)]
if part is self.failed_part:
tindex += 1
# part_eval = self.logged_evals[partx]
# if part_eval is not NOT_EVALED:
# temp[tindex] += [repr(part_eval)]
lines += [self._color(self._block_prefix + ' PART BREAKDOWN', 'white')]
if before_part_lines:
lines += ['Passed Parts:']
lines += before_part_lines
if fail_part_lines:
lines += ['Failed Part:']
lines += fail_part_lines
if after_parts_lines: