Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_02_notebook_run(detection_notebooks, tiny_od_mask_data_path):
notebook_path = detection_notebooks["02"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_mask_data_path,
EPOCHS=1,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
def test_od_20_notebook_run(
detection_notebooks,
subscription_id,
resource_group,
workspace_name,
workspace_region,
):
notebook_path = detection_notebooks["20_deployment_on_kubernetes"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
subscription_id=subscription_id,
resource_group=resource_group,
workspace_name=workspace_name,
workspace_region=workspace_region,
),
kernel_name=KERNEL_NAME,
)
def test_10_notebook_run(classification_notebooks, tiny_ic_data_path):
notebook_path = classification_notebooks["10_image_annotation"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
IM_DIR=os.path.join(tiny_ic_data_path, "can"),
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert nb_output.scraps["num_images"].data == 6
def test_01_notebook_run(classification_notebooks):
notebook_path = classification_notebooks["01_training_introduction"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_accuracies"].data) == 10
assert nb_output.scraps["training_accuracies"].data[-1] > 0.70
assert nb_output.scraps["validation_accuracy"].data > 0.70
def test_12_notebook_run(detection_notebooks):
notebook_path = detection_notebooks["12"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__, EPOCHS=3),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert nb_output.scraps["valid_accs"].data[-1] > 0.5
assert len(nb_output.scraps["valid_accs"].data) == 1
assert len(nb_output.scraps["hard_im_scores"].data) == 10
def test_11_notebook_run(similarity_notebooks, tiny_ic_data_path):
notebook_path = similarity_notebooks["11"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
# Speed up testing since otherwise would take ~12 minutes on V100
DATA_PATHS=[tiny_ic_data_path],
REPS=1,
IM_SIZES=[60, 100],
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert min(nb_output.scraps["ranks"].data) <= 30
def test_01_notebook_run(similarity_notebooks, tiny_ic_data_path):
notebook_path = similarity_notebooks["01"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_ic_data_path,
EPOCHS_HEAD=1,
EPOCHS_BODY=1,
IM_SIZE=50,
),
kernel_name=KERNEL_NAME,
)
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'papermill'
copyright = '2018, nteract team'
author = 'nteract team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import papermill
# The short X.Y version.
version = '.'.join(papermill.__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = papermill.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line foexitr these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'UPDATE.md']
# General information about the project.
project = 'papermill'
copyright = '2018, nteract team'
author = 'nteract team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import papermill
# The short X.Y version.
version = '.'.join(papermill.__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = papermill.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line foexitr these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'UPDATE.md']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
"""Returns a notebook object with papermill metadata loaded from the specified path.
Args:
notebook_path (str): Path to the notebook file.
Returns:
nbformat.NotebookNode
"""
nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': __version__,
}
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = [] # Create tags attr if one doesn't exist.
if not hasattr(cell.metadata, 'papermill'):
cell.metadata['papermill'] = dict()
return nb