How to use the fastai.gen_doc.doctest.this_tests function in fastai

To help you get started, we’ve selected a few fastai examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fastai / fastai / tests / test_vision_transform.py View on Github external
def test_crop_without_size():
    this_tests(crop)
    path = untar_data(URLs.MNIST_TINY)/'train'/'3'
    files = get_image_files(path)
    img = open_image(path/files[0])
    tfms = get_transforms()
    img = img.apply_tfms(tfms[0])
github fastai / fastai / tests / test_torch_core.py View on Github external
def test_set_bn_eval():
    this_tests(set_bn_eval)
    m = simple_cnn(b,bn=True)
    requires_grad(m,False)
    set_bn_eval(m)
    assert m[0][2].training == False, "Batch norm layer not properly set to eval mode"
github fastai / fastai / tests / test_core.py View on Github external
def test_chunks():
    this_tests(chunks)
    ls = [0,1,2,3]
    assert([a for a in chunks(ls, 2)] == [[0,1],[2,3]])
    assert([a for a in chunks(ls, 4)] == [[0,1,2,3]])
    assert([a for a in chunks(ls, 1)] == [[0],[1],[2],[3]])
github fastai / fastai / tests / test_utils_mem.py View on Github external
def test_gpu_mem_trace():

    gpu_prepare_clean_slate()

    mtrace = GPUMemTrace()
    this_tests(mtrace.__class__)

    ### 1. more allocated, less released, then all released, w/o counter reset
    # expecting used=~10, peaked=~15
    x1 = gpu_mem_allocate_mbs(10)
    x2 = gpu_mem_allocate_mbs(15)
    del x2
    yield_to_thread() # hack: ensure peak thread gets a chance to measure the peak
    check_mtrace(used_exp=10, peaked_exp=15, mtrace=mtrace, abs_tol=2, ctx="rel some")

    # check `report`'s format including the right numbers
    ctx = "whoah"
    with CaptureStdout() as cs: mtrace.report(ctx)
    used, peaked = parse_mtrace_repr(cs.out, ctx)
    check_mem(used_exp=10,   peaked_exp=15,
              used_rcv=used, peaked_rcv=peaked, abs_tol=2, ctx="trace `report`")
github fastai / fastai / tests / test_vision_data.py View on Github external
def test_verify_images(path):
    this_tests(verify_images)
    tmp_path = path/'tmp'
    os.makedirs(tmp_path, exist_ok=True)
    verify_images(path/'train'/'3', dest=tmp_path, max_size=27, max_workers=4)
    images = list(tmp_path.iterdir())
    assert len(images) == 346
    img = PIL.Image.open(images[0])
    assert img.height == 27 and img.width == 27
    shutil.rmtree(tmp_path)
github fastai / fastai / tests / test_datasets.py View on Github external
def test_creates_config():
    this_tests(Config)
    DEFAULT_CONFIG_PATH = 'config_test/test.yml'

    try:
        config_path = _expand_path(DEFAULT_CONFIG_PATH)
        clean_test_config(config_path)
        assert not config_path.exists(), "config path should not exist"
        config = Config.get(config_path)
        assert config_path.exists(), "Config.get should create config if it doesn't exist"
    finally:
        clean_test_config(config_path)
        assert not config_path.exists(), "config path should not exist"
github fastai / fastai / tests / test_core.py View on Github external
def test_partition_functionality():
    this_tests(partition)

    def test_partition(a, sz, ex):
        result = partition(a, sz)
        assert len(result) == len(ex)
        assert all([a == b for a, b in zip(result, ex)])

    a = [1,2,3,4,5]

    sz = 2
    ex = [[1,2],[3,4],[5]]
    test_partition(a, sz, ex)

    sz = 3
    ex = [[1,2,3],[4,5]]
    test_partition(a, sz, ex)
github fastai / fastai / tests / test_basic_train.py View on Github external
def test_freeze():
    learn = fake_learner(layer_group_count=3)
    this_tests(learn.freeze)
    learn.freeze()
    for i, param in enumerate(learn.model.parameters()):
        # 2 layer groups with 1 param in each should be frozen
        if i >= 4: assert param.requires_grad == True
        else:      assert param.requires_grad == False
github fastai / fastai / tests / test_callbacks_csv_logger.py View on Github external
def test_logger():
    learn = fake_learner()
    learn.metrics = [accuracy, error_rate]
    learn.callback_fns.append(callbacks.CSVLogger)
    this_tests(callbacks.CSVLogger)
    with CaptureStdout() as cs: learn.fit_one_cycle(3)
    csv_df = learn.csv_logger.read_logged_file()
    stdout_df = convert_into_dataframe(cs.out)
    pd.testing.assert_frame_equal(csv_df, stdout_df, check_exact=False, check_less_precise=2)
    recorder_df = create_metrics_dataframe(learn)
    # XXX: there is a bug in pandas:
    # https://github.com/pandas-dev/pandas/issues/25068#issuecomment-460014120
    # which quite often fails on CI.
    # once it's resolved can change the setting back to check_less_precise=True (or better =3), until then using =2 as it works, but this check is less good.
    csv_df_notime = csv_df.drop(['time'], axis=1)
    pd.testing.assert_frame_equal(csv_df_notime, recorder_df, check_exact=False, check_less_precise=2)
github fastai / fastai / tests / test_core.py View on Github external
def test_listy():
    this_tests(is_listy)
    assert is_listy([1,1,3,3,5])      == True
    assert is_listy((1,1,3,3,5))      == True
    assert is_listy([1,"2",3,3,5])    == True
    assert is_listy((1,"2",3,3,5))    == True
    assert is_listy(1)                == False
    assert is_listy("2")              == False
    assert is_listy({1, 2})           == False
    assert is_listy(set([1,1,3,3,5])) == False