How to use scrapbook - 10 common examples

To help you get started, we’ve selected a few scrapbook examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / computervision-recipes / tests / unit / similarity / test_similarity_notebooks.py View on Github external
def test_00_notebook_run(similarity_notebooks):
    notebook_path = similarity_notebooks["00"]
    pm.execute_notebook(
        notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME
    )

    nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
    assert len(nb_output.scraps["query_feature"].data) == 512
    assert min(nb_output.scraps["query_feature"].data) >= 0
    assert (
        min([dist for (path, dist) in nb_output.scraps["distances"].data])
        < 1e-3
    )
github interpretml / interpret-community / test / test_notebooks.py View on Github external
def test_explain_regression_local():

    notebookname = "explain-regression-local"
    input_notebook = "notebooks/" + notebookname + ".ipynb"
    output_notebook = "./test/" + notebookname + ".output.ipynb"

    pm.execute_notebook(input_notebook, output_notebook)

    nb = sb.read_notebook(input_notebook)
    nb.scraps  # print a dict of all scraps by name

    return
github microsoft / nlp-recipes / tests / integration / test_notebooks_named_entity_recognition.py View on Github external
def test_ner_wikigold_bert(notebooks, tmp):
    notebook_path = notebooks["ner_wikigold_bert"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        parameters={
            "CACHE_DIR": tmp
        },
        kernel_name=KERNEL_NAME,
    )
    result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
    assert pytest.approx(result["precision_1"], 0.90, abs=ABS_TOL)
    assert pytest.approx(result["recall_1"], 0.90, abs=ABS_TOL)
    assert pytest.approx(result["f1_1"], 0.90, abs=ABS_TOL)

    assert pytest.approx(result["precision_2"], 0.80, abs=ABS_TOL)
    assert pytest.approx(result["recall_2"], 0.85, abs=ABS_TOL)
    assert pytest.approx(result["f1_2"], 0.85, abs=ABS_TOL)
github microsoft / computervision-recipes / tests / unit / detection / test_detection_notebooks.py View on Github external
def test_02_notebook_run(detection_notebooks, tiny_od_mask_data_path):
    notebook_path = detection_notebooks["02"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        parameters=dict(
            PM_VERSION=pm.__version__,
            DATA_PATH=tiny_od_mask_data_path,
            EPOCHS=1,
        ),
        kernel_name=KERNEL_NAME,
    )
    nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
    assert len(nb_output.scraps["training_losses"].data) > 0
    training_aps = nb_output.scraps["training_average_precision"].data
    assert len(training_aps) > 0
    for d in training_aps:
        assert isinstance(d, dict)
    assert len(set([len(d) for d in training_aps])) == 1
github scikit-hep / pyhf / tests / test_notebooks.py View on Github external
def test_multibinpois(common_kwargs):
    pm.execute_notebook(
        'docs/examples/notebooks/multiBinPois.ipynb',
        parameters={'validation_datadir': 'validation/data'},
        **common_kwargs
    )
    nb = sb.read_notebook(common_kwargs['output_path'])
    assert nb.scraps['number_2d_successpoints'].data > 200
github microsoft / computervision-recipes / tests / unit / classification / test_classification_notebooks.py View on Github external
notebook_path = classification_notebooks["11_exploring_hyperparameters"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        parameters=dict(
            PM_VERSION=pm.__version__,
            DATA=[tiny_ic_data_path],
            REPS=1,
            LEARNING_RATES=[1e-3],
            IM_SIZES=[50],
            EPOCHS=[1],
        ),
        kernel_name=KERNEL_NAME,
    )

    nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
    assert nb_output.scraps["nr_elements"].data == 1
github microsoft / computervision-recipes / tests / integration / detection / test_integration_detection.py View on Github external
def test_12_notebook_run(detection_notebooks):
    notebook_path = detection_notebooks["12"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        parameters=dict(PM_VERSION=pm.__version__, EPOCHS=3),
        kernel_name=KERNEL_NAME,
    )

    nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
    assert nb_output.scraps["valid_accs"].data[-1] > 0.5
    assert len(nb_output.scraps["valid_accs"].data) == 1
    assert len(nb_output.scraps["hard_im_scores"].data) == 10
github microsoft / nlp-recipes / tests / integration / test_notebooks_interpretability.py View on Github external
def test_deep_and_unified_understanding(notebooks):
    notebook_path = notebooks["deep_and_unified_understanding"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        kernel_name=KERNEL_NAME)
    
    result = sb.read_notebook(OUTPUT_NOTEBOOK).scraps.data_dict
    sigma_numbers = [0.00317593, 0.00172284, 0.00634005, 0.00164305, 0.00317159]
    sigma_bert = [0.1735696 , 0.14028822, 0.14590865, 0.2263149 , 0.20640415,
       0.21249843, 0.18685372, 0.14112663, 0.25824168, 0.22399105,
       0.2393731 , 0.12868434, 0.27386534, 0.35876372]
    
    np.testing.assert_array_almost_equal(result["sigma_numbers"], sigma_numbers, decimal=3) 
    np.testing.assert_array_almost_equal(result["sigma_bert"], sigma_bert, decimal=1)
github microsoft / computervision-recipes / tests / integration / classification / test_integration_classification_notebooks.py View on Github external
def test_02_notebook_run(classification_notebooks):
    notebook_path = classification_notebooks["02_multilabel_classification"]
    pm.execute_notebook(
        notebook_path,
        OUTPUT_NOTEBOOK,
        parameters=dict(PM_VERSION=pm.__version__),
        kernel_name=KERNEL_NAME,
    )

    nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
    assert len(nb_output.scraps["training_accuracies"].data) == 10
    assert nb_output.scraps["training_accuracies"].data[-1] > 0.70
    assert nb_output.scraps["acc_hl"].data > 0.70
    assert nb_output.scraps["acc_zol"].data > 0.4
github nteract / scrapbook / scrapbook / models.py View on Github external
def _extract_papermill_output_data(self, sig, payload):
        if sig.startswith(RECORD_PAYLOAD_PREFIX):
            # Fetch '+json' and strip the leading '+'
            encoder = sig.split(RECORD_PAYLOAD_PREFIX, 1)[1][1:]
            # First key is the only named payload
            for name, data in payload.items():
                return encoder_registry.decode(Scrap(name, data, encoder))