Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_00_notebook_run(similarity_notebooks):
notebook_path = similarity_notebooks["00"]
pm.execute_notebook(
notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["query_feature"].data) == 512
assert min(nb_output.scraps["query_feature"].data) >= 0
assert (
min([dist for (path, dist) in nb_output.scraps["distances"].data])
< 1e-3
)
def test_explain_regression_local():
notebookname = "explain-regression-local"
input_notebook = "notebooks/" + notebookname + ".ipynb"
output_notebook = "./test/" + notebookname + ".output.ipynb"
pm.execute_notebook(input_notebook, output_notebook)
nb = sb.read_notebook(input_notebook)
nb.scraps # print a dict of all scraps by name
return
def test_ner_wikigold_bert(notebooks, tmp):
notebook_path = notebooks["ner_wikigold_bert"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters={
"CACHE_DIR": tmp
},
kernel_name=KERNEL_NAME,
)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
assert pytest.approx(result["precision_1"], 0.90, abs=ABS_TOL)
assert pytest.approx(result["recall_1"], 0.90, abs=ABS_TOL)
assert pytest.approx(result["f1_1"], 0.90, abs=ABS_TOL)
assert pytest.approx(result["precision_2"], 0.80, abs=ABS_TOL)
assert pytest.approx(result["recall_2"], 0.85, abs=ABS_TOL)
assert pytest.approx(result["f1_2"], 0.85, abs=ABS_TOL)
def test_02_notebook_run(detection_notebooks, tiny_od_mask_data_path):
notebook_path = detection_notebooks["02"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_mask_data_path,
EPOCHS=1,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
def test_multibinpois(common_kwargs):
pm.execute_notebook(
'docs/examples/notebooks/multiBinPois.ipynb',
parameters={'validation_datadir': 'validation/data'},
**common_kwargs
)
nb = sb.read_notebook(common_kwargs['output_path'])
assert nb.scraps['number_2d_successpoints'].data > 200
notebook_path = classification_notebooks["11_exploring_hyperparameters"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA=[tiny_ic_data_path],
REPS=1,
LEARNING_RATES=[1e-3],
IM_SIZES=[50],
EPOCHS=[1],
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert nb_output.scraps["nr_elements"].data == 1
def test_12_notebook_run(detection_notebooks):
notebook_path = detection_notebooks["12"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__, EPOCHS=3),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert nb_output.scraps["valid_accs"].data[-1] > 0.5
assert len(nb_output.scraps["valid_accs"].data) == 1
assert len(nb_output.scraps["hard_im_scores"].data) == 10
def test_deep_and_unified_understanding(notebooks):
notebook_path = notebooks["deep_and_unified_understanding"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
kernel_name=KERNEL_NAME)
result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
sigma_numbers = [0.00317593, 0.00172284, 0.00634005, 0.00164305, 0.00317159]
sigma_bert = [0.1735696 , 0.14028822, 0.14590865, 0.2263149 , 0.20640415,
0.21249843, 0.18685372, 0.14112663, 0.25824168, 0.22399105,
0.2393731 , 0.12868434, 0.27386534, 0.35876372]
np.testing.assert_array_almost_equal(result["sigma_numbers"], sigma_numbers, decimal=3)
np.testing.assert_array_almost_equal(result["sigma_bert"], sigma_bert, decimal=1)
def test_02_notebook_run(classification_notebooks):
notebook_path = classification_notebooks["02_multilabel_classification"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_accuracies"].data) == 10
assert nb_output.scraps["training_accuracies"].data[-1] > 0.70
assert nb_output.scraps["acc_hl"].data > 0.70
assert nb_output.scraps["acc_zol"].data > 0.4
papermill_engines.register('dagstermill', DagstermillNBConvertEngine)
papermill.execute_notebook(
intermediate_path, temp_path, engine_name='dagstermill', log_output=True
)
except Exception as exc:
yield Materialization(
label='output_notebook',
description='Location of output notebook on the filesystem',
metadata_entries=[EventMetadataEntry.fspath(temp_path)],
)
raise exc
# deferred import for perf
import scrapbook
output_nb = scrapbook.read_notebook(temp_path)
system_compute_context.log.debug(
'Notebook execution complete for {name}. Data is {data}'.format(
name=name, data=output_nb.scraps
)
)
yield Materialization(
label='output_notebook',
description='Location of output notebook on the filesystem',
metadata_entries=[EventMetadataEntry.fspath(temp_path)],
)
for (output_name, output_def) in system_compute_context.solid_def.output_dict.items():
data_dict = output_nb.scraps.data_dict
if output_name in data_dict: