Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_contexts(testdir, opts):
with open(os.path.join(os.path.dirname(__file__), "contextful.py")) as f:
contextful_tests = f.read()
script = testdir.makepyfile(contextful_tests)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-context=test',
script,
*opts.split()
)
assert result.ret == 0
result.stdout.fnmatch_lines([
'test_contexts* 100%*',
])
data = coverage.CoverageData(".coverage")
data.read()
assert data.measured_contexts() == set(EXPECTED_CONTEXTS)
measured = data.measured_files()
assert len(measured) == 1
test_context_path = list(measured)[0]
assert test_context_path.lower() == os.path.abspath("test_contexts.py").lower()
line_data = find_labels(contextful_tests, r"[crst]\d+(?:-\d+)?")
for context, label in EXPECTED_CONTEXTS.items():
if context == '':
continue
data.set_query_context(context)
actual = data.lines(test_context_path)
assert line_data[label] == actual, "Wrong lines for context {!r}".format(context)
def compute_coverage(branch):
coverage_data = CoverageData()
try:
with project_path.join('.coverage').open() as fp:
coverage_data.read_file(fp)
except Exception:
print("No coverage data found", file=sys.stderr)
git_proc = subprocess.Popen(['git', 'diff', '-U0', branch],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
git_output = git_proc.stdout.read()
files = git_output.split("diff --git")
from collections import defaultdict
file_data = defaultdict(list)
for the_file in files:
filenames = re.findall('a/(.*?) b/(.*)', the_file)
def _str2covdata(self, s):
f = io.StringIO(s)
covdata = coverage.CoverageData()
with contextlib.closing(f):
covdata.read_fileobj(f)
return covdata
def test_append_data(self):
self.make_b_or_c_py()
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
out = self.run_command("coverage run --append b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
# Combine the parallel coverage data files into .coverage .
out = self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assert_exists(".coverage.bad")
warning_regex = (
r"Coverage.py warning: Couldn't use data file '.*\.coverage\.bad': "
r"file (is encrypted or )?is not a database"
)
self.assertRegex(out, warning_regex)
# After combining, those two should be the only data files.
self.assert_file_count(".coverage.*", 1)
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_merged_profiles_get_coveragepy_data():
from covimerage import MergedProfiles
m = MergedProfiles([])
cov_data = m.get_coveragepy_data()
try:
from coverage.data import CoverageJsonData
except ImportError:
assert isinstance(cov_data, coverage.CoverageData)
else:
assert isinstance(cov_data, CoverageJsonData)
self.assert_doesnt_exist(".coverage")
# After running the forking program, there should be two
# .coverage.machine.123 files.
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage file and see that b_or_c.py has all 7 lines
# executed.
data = coverage.CoverageData()
data.read_file(".coverage")
self.assertEqual(data.summary()['fork.py'], 9)
def save_coverage(tree, templates, output_dir, app_name, granularity):
groups = Utils2.get_groupped_classes(tree)
init_row = templates['init_row.pt']
init_table = templates['init_table.pt']
index_template = templates['index.pt']
rows = []
total_coverage_data = CoverageData()
for g in groups:
(package, path, coverage_data) = save_package_indexhtml(g, templates, output_dir, app_name, granularity)
coverage = coverage_data.get_formatted_coverage(granularity)
row = init_row(elementlink=path, type='package', elementname=package,
coverage=coverage,
respath='', coverage_data=coverage_data,
is_instruction_level=Granularity.is_instruction(granularity),
progress_covered=coverage_data.covered(granularity),
progress_missed=coverage_data.missed(granularity))
rows.append(Markup(row))
total_coverage_data.add_data(coverage_data)
total_coverage = total_coverage_data.get_formatted_coverage(granularity)
table = init_table(rows=Markup("\n".join(rows)),
total_coverage=total_coverage,
total_coverage_data=total_coverage_data,
is_instruction_level=Granularity.is_instruction(granularity),
def save_package_indexhtml(class_group, templates, output_dir, app_name, granularity):
folder = class_group[0].folder.replace('\\', '/')
class_name_with_pkg = class_group[0].name
package_name = Utils2.get_standart_package_name(class_name_with_pkg)
init_table = templates['init_table.pt']
init_row = templates['init_row.pt']
index_template = templates['index.pt']
slash_num = class_name_with_pkg.count('/')
root_path = ''
for i in range(slash_num):
root_path += '../'
total_coverage_data = CoverageData()
rows = []
for cl in class_group:
elementlink = os.path.join(root_path, folder, cl.file_name + '.html').replace('\\', '/')
elementname = cl.file_name
coverage_data = CoverageData(
lines=cl.coverable(),
lines_missed=cl.not_covered(),
lines_covered=cl.covered(),
methods_covered=cl.mtds_covered(),
methods_missed=cl.mtds_not_covered(),
methods=cl.mtds_coverable()
)
coverage_data.update_coverage_for_single_class_from_methods()
coverage = coverage_data.get_coverage(granularity)
row = init_row(elementlink=elementlink, type='class', elementname=elementname,
coverage=coverage_data.format_coverage(coverage),