How to use fastai - 10 common examples

To help you get started, we’ve selected a few fastai examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / computervision-recipes / utils_cv / classification / model.py View on Github external
Args:
        learn: Learner object that will be used for prediction
        dl: DataLoader the model will use to load samples
        with_loss: If True, it will also return the loss on each prediction
        n_batch: Number of batches to predict. If not specified, it will run the predictions for n batches
            where n = sample size // BATCH_SIZE
        pbar: ProgressBar object
    """

    # Note: In Fastai, for DatasetType.Train, only the output of complete minibatches is computed. Ie if one has 101 images,
    # and uses a minibatch size of 16, then len(feats) is 96 and not 101. For DatasetType.Valid this is not the case,
    # and len(feats) is as expected 101. A way around this is to use DatasetType.Fix instead when referring to the training set.
    # See e.g. issue: https://forums.fast.ai/t/get-preds-returning-less-results-than-length-of-original-dataset/34148
    if dl == DatasetType.Train:
        dl = DatasetType.Fix

    lf = learn.loss_func if with_loss else None
    return fastai.basic_train.get_preds(
        learn.model,
        dl,
        cb_handler=CallbackHandler(learn.callbacks),
        activ=_loss_func2activ(learn.loss_func),
        loss_func=lf,
        n_batch=n_batch,
        pbar=pbar,
    )
github optuna / optuna / tests / integration_tests / test_fastai.py View on Github external
def objective(trial):
        # type: (optuna.trial.Trial) -> float

        model = nn.Sequential(nn.Linear(20, 1), nn.Sigmoid())
        learn = Learner(data_bunch, model, metrics=[accuracy], callback_fns=[
            partial(FastAIPruningCallback, trial=trial, monitor='valid_loss')
        ])

        learn.fit(1)

        return 1.0
github microsoft / computervision-recipes / tests / unit / classification / test_classification_model.py View on Github external
tmr = TrainMetricsRecorder(learn)
        learn.callbacks.append(tmr)
        learn.unfreeze()
        learn.fit(epochs, lr)
        return tmr

    # multiple metrics
    learn = cnn_learner(tiny_ic_data, model, metrics=[accuracy, error_rate])
    cb = test_callback(learn)
    assert len(cb.train_metrics) == len(cb.valid_metrics) == epochs
    assert (
        len(cb.train_metrics[0]) == len(cb.valid_metrics[0]) == 2
    )  # we used 2 metrics

    # no metrics
    learn = cnn_learner(tiny_ic_data, model)
    cb = test_callback(learn)
    assert len(cb.train_metrics) == len(cb.valid_metrics) == 0  # no metrics

    # no validation set
    learn = cnn_learner(tiny_ic_data, model, metrics=accuracy)
    learn.data.valid_dl = None
    cb = test_callback(learn)
    assert len(cb.train_metrics) == epochs
    assert len(cb.train_metrics[0]) == 1  # we used 1 metrics
    assert len(cb.valid_metrics) == 0  # no validation
github fastai / fastai / tests / test_vision_transform.py View on Github external
def test_crop_without_size():
    this_tests(crop)
    path = untar_data(URLs.MNIST_TINY)/'train'/'3'
    files = get_image_files(path)
    img = open_image(path/files[0])
    tfms = get_transforms()
    img = img.apply_tfms(tfms[0])
github fastai / fastai / tests / test_torch_core.py View on Github external
def test_set_bn_eval():
    this_tests(set_bn_eval)
    m = simple_cnn(b,bn=True)
    requires_grad(m,False)
    set_bn_eval(m)
    assert m[0][2].training == False, "Batch norm layer not properly set to eval mode"
github fastai / fastai / tests / test_core.py View on Github external
def test_chunks():
    this_tests(chunks)
    ls = [0,1,2,3]
    assert([a for a in chunks(ls, 2)] == [[0,1],[2,3]])
    assert([a for a in chunks(ls, 4)] == [[0,1,2,3]])
    assert([a for a in chunks(ls, 1)] == [[0],[1],[2],[3]])
github fastai / fastai / tests / test_utils_mem.py View on Github external
def test_gpu_mem_trace():

    gpu_prepare_clean_slate()

    mtrace = GPUMemTrace()
    this_tests(mtrace.__class__)

    ### 1. more allocated, less released, then all released, w/o counter reset
    # expecting used=~10, peaked=~15
    x1 = gpu_mem_allocate_mbs(10)
    x2 = gpu_mem_allocate_mbs(15)
    del x2
    yield_to_thread() # hack: ensure peak thread gets a chance to measure the peak
    check_mtrace(used_exp=10, peaked_exp=15, mtrace=mtrace, abs_tol=2, ctx="rel some")

    # check `report`'s format including the right numbers
    ctx = "whoah"
    with CaptureStdout() as cs: mtrace.report(ctx)
    used, peaked = parse_mtrace_repr(cs.out, ctx)
    check_mem(used_exp=10,   peaked_exp=15,
              used_rcv=used, peaked_rcv=peaked, abs_tol=2, ctx="trace `report`")
github fastai / fastai / tests / test_vision_data.py View on Github external
def test_verify_images(path):
    this_tests(verify_images)
    tmp_path = path/'tmp'
    os.makedirs(tmp_path, exist_ok=True)
    verify_images(path/'train'/'3', dest=tmp_path, max_size=27, max_workers=4)
    images = list(tmp_path.iterdir())
    assert len(images) == 346
    img = PIL.Image.open(images[0])
    assert img.height == 27 and img.width == 27
    shutil.rmtree(tmp_path)
github fastai / fastai / tests / test_datasets.py View on Github external
def test_creates_config():
    this_tests(Config)
    DEFAULT_CONFIG_PATH = 'config_test/test.yml'

    try:
        config_path = _expand_path(DEFAULT_CONFIG_PATH)
        clean_test_config(config_path)
        assert not config_path.exists(), "config path should not exist"
        config = Config.get(config_path)
        assert config_path.exists(), "Config.get should create config if it doesn't exist"
    finally:
        clean_test_config(config_path)
        assert not config_path.exists(), "config path should not exist"
github fastai / fastai / tests / test_core.py View on Github external
def test_partition_functionality():
    this_tests(partition)

    def test_partition(a, sz, ex):
        result = partition(a, sz)
        assert len(result) == len(ex)
        assert all([a == b for a, b in zip(result, ex)])

    a = [1,2,3,4,5]

    sz = 2
    ex = [[1,2],[3,4],[5]]
    test_partition(a, sz, ex)

    sz = 3
    ex = [[1,2,3],[4,5]]
    test_partition(a, sz, ex)