How to use the learn2learn.vision function in learn2learn

To help you get started, we’ve selected a few learn2learn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github learnables / learn2learn / tests / integration / maml_miniimagenet_test_notravis.py View on Github external
meta_batch_size=32,
        adaptation_steps=1,
        num_iterations=60000,
        cuda=False,
        seed=42,
):
    random.seed(seed)
    np.random.seed(seed)
    th.manual_seed(seed)
    device = th.device('cpu')
    if cuda and th.cuda.device_count():
        th.cuda.manual_seed(seed)
        device = th.device('cuda')

    # Create Datasets
    train_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='train')
    valid_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='validation')
    test_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='test')
    train_dataset = l2l.data.MetaDataset(train_dataset)
    valid_dataset = l2l.data.MetaDataset(valid_dataset)
    test_dataset = l2l.data.MetaDataset(test_dataset)

    train_transforms = [
        l2l.data.transforms.NWays(train_dataset, ways),
        l2l.data.transforms.KShots(train_dataset, 2*shots),
        l2l.data.transforms.LoadData(train_dataset),
        l2l.data.transforms.RemapLabels(train_dataset),
        l2l.data.transforms.ConsecutiveLabels(train_dataset),
    ]
    train_tasks = l2l.data.TaskDataset(train_dataset,
                                       task_transforms=train_transforms,
                                       num_tasks=20000)
github learnables / learn2learn / tests / integration / maml_miniimagenet_test_notravis.py View on Github external
task_transforms=valid_transforms,
                                       num_tasks=600)

    test_transforms = [
        l2l.data.transforms.NWays(test_dataset, ways),
        l2l.data.transforms.KShots(test_dataset, 2*shots),
        l2l.data.transforms.LoadData(test_dataset),
        l2l.data.transforms.RemapLabels(test_dataset),
        l2l.data.transforms.ConsecutiveLabels(train_dataset),
    ]
    test_tasks = l2l.data.TaskDataset(test_dataset,
                                      task_transforms=test_transforms,
                                      num_tasks=600)

    # Create model
    model = l2l.vision.models.MiniImagenetCNN(ways)
    model.to(device)
    maml = l2l.algorithms.MAML(model, lr=fast_lr, first_order=False)
    opt = optim.Adam(maml.parameters(), meta_lr)
    loss = nn.CrossEntropyLoss(size_average=True, reduction='mean')

    for iteration in range(num_iterations):
        opt.zero_grad()
        meta_train_error = 0.0
        meta_train_accuracy = 0.0
        meta_valid_error = 0.0
        meta_valid_accuracy = 0.0
        meta_test_error = 0.0
        meta_test_accuracy = 0.0
        for task in range(meta_batch_size):
            # Compute meta-training loss
            learner = maml.clone()
github learnables / learn2learn / examples / vision / maml_miniimagenet.py View on Github external
num_iterations=60000,
        cuda=True,
        seed=42,
):
    random.seed(seed)
    np.random.seed(seed)
    th.manual_seed(seed)
    device = th.device('cpu')
    if cuda and th.cuda.device_count():
        th.cuda.manual_seed(seed)
        device = th.device('cuda')

    # Create Datasets
    train_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='train')
    valid_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='validation')
    test_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='test')
    train_dataset = l2l.data.MetaDataset(train_dataset)
    valid_dataset = l2l.data.MetaDataset(valid_dataset)
    test_dataset = l2l.data.MetaDataset(test_dataset)

    train_transforms = [
        NWays(train_dataset, ways),
        KShots(train_dataset, 2*shots),
        LoadData(train_dataset),
        RemapLabels(train_dataset),
        ConsecutiveLabels(train_dataset),
    ]
    train_tasks = l2l.data.TaskDataset(train_dataset,
                                       task_transforms=train_transforms,
                                       num_tasks=20000)

    valid_transforms = [
github learnables / learn2learn / examples / vision / maml_miniimagenet.py View on Github external
meta_batch_size=32,
        adaptation_steps=1,
        num_iterations=60000,
        cuda=True,
        seed=42,
):
    random.seed(seed)
    np.random.seed(seed)
    th.manual_seed(seed)
    device = th.device('cpu')
    if cuda and th.cuda.device_count():
        th.cuda.manual_seed(seed)
        device = th.device('cuda')

    # Create Datasets
    train_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='train')
    valid_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='validation')
    test_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='test')
    train_dataset = l2l.data.MetaDataset(train_dataset)
    valid_dataset = l2l.data.MetaDataset(valid_dataset)
    test_dataset = l2l.data.MetaDataset(test_dataset)

    train_transforms = [
        NWays(train_dataset, ways),
        KShots(train_dataset, 2*shots),
        LoadData(train_dataset),
        RemapLabels(train_dataset),
        ConsecutiveLabels(train_dataset),
    ]
    train_tasks = l2l.data.TaskDataset(train_dataset,
                                       task_transforms=train_transforms,
                                       num_tasks=20000)
github learnables / learn2learn / examples / vision / protonet_omniglot.py View on Github external
n_epochs = 20000
    num_input_channels = 1
    drop_lr_every = 20

    assert torch.cuda.is_available()
    device = torch.device('cuda')
    torch.backends.cudnn.benchmark = True

    param_str = f'omniglot_nt={args.n_train}_kt={args.k_train}_qt={args.q_train}_' \
        f'nv={args.n_test}_kv={args.k_test}_qv={args.q_test}'

    filepath = f'./data/{param_str}.pth'

    omniglot = FullOmniglot(root='./data',
                            transform=transforms.Compose([
                                l2l.vision.transforms.RandomDiscreteRotation(
                                    [0.0, 90.0, 180.0, 270.0]),
                                transforms.Resize(28, interpolation=LANCZOS),
                                transforms.ToTensor(),
                                lambda x: 1.0 - x,
                            ]),
                            download=True)
    omniglot = l2l.data.MetaDataset(omniglot)
    classes = list(range(1623))
    random.shuffle(classes)
    train_generator = l2l.data.TaskGenerator(dataset=omniglot,
                                             ways=args.k_train,
                                             classes=classes[:1100],
                                             tasks=20000)
    valid_generator = l2l.data.TaskGenerator(dataset=omniglot,
                                             ways=args.k_test,
                                             classes=classes[1100:1200],
github learnables / learn2learn / examples / vision / protonet_miniimagenet.py View on Github external
args = parser.parse_args()
    print(args)

    device = torch.device('cpu')
    if args.gpu:
        print("Using gpu")
        torch.cuda.manual_seed(43)
        device = torch.device('cuda')

    model = Convnet()
    model.to(device)

    path_data = '/datadrive/few-shot/miniimagenetdata'
    train_dataset = l2l.vision.datasets.MiniImagenet(
        root=path_data, mode='train')
    valid_dataset = l2l.vision.datasets.MiniImagenet(
        root=path_data, mode='validation')
    test_dataset = l2l.vision.datasets.MiniImagenet(
        root=path_data, mode='test')

    train_sampler = l2l.data.NShotKWayTaskSampler(
        train_dataset.y, 100, args.train_way, args.shot, args.train_query)

    train_loader = DataLoader(dataset=train_dataset, batch_sampler=train_sampler,
                              num_workers=8, pin_memory=True)

    val_sampler = l2l.data.NShotKWayTaskSampler(
        valid_dataset.y, 400, args.test_way, args.shot, args.train_query)

    val_loader = DataLoader(dataset=valid_dataset, batch_sampler=val_sampler,
                            num_workers=8, pin_memory=True)
github learnables / learn2learn / examples / vision / maml_omniglot.py View on Github external
l2l.data.transforms.RemapLabels(dataset),
        l2l.data.transforms.ConsecutiveLabels(dataset),
        l2l.vision.transforms.RandomClassRotation(dataset, [0.0, 90.0, 180.0, 270.0])
    ]
    train_tasks = l2l.data.TaskDataset(dataset,
                                       task_transforms=train_transforms,
                                       num_tasks=20000)

    valid_transforms = [
        l2l.data.transforms.FilterLabels(dataset, classes[1100:1200]),
        l2l.data.transforms.NWays(dataset, ways),
        l2l.data.transforms.KShots(dataset, 2*shots),
        l2l.data.transforms.LoadData(dataset),
        l2l.data.transforms.RemapLabels(dataset),
        l2l.data.transforms.ConsecutiveLabels(dataset),
        l2l.vision.transforms.RandomClassRotation(dataset, [0.0, 90.0, 180.0, 270.0])
    ]
    valid_tasks = l2l.data.TaskDataset(dataset,
                                       task_transforms=valid_transforms,
                                       num_tasks=1024)

    test_transforms = [
        l2l.data.transforms.FilterLabels(dataset, classes[1200:]),
        l2l.data.transforms.NWays(dataset, ways),
        l2l.data.transforms.KShots(dataset, 2*shots),
        l2l.data.transforms.LoadData(dataset),
        l2l.data.transforms.RemapLabels(dataset),
        l2l.data.transforms.ConsecutiveLabels(dataset),
        l2l.vision.transforms.RandomClassRotation(dataset, [0.0, 90.0, 180.0, 270.0])
    ]
    test_tasks = l2l.data.TaskDataset(dataset,
                                      task_transforms=test_transforms,
github learnables / learn2learn / examples / vision / protonet_miniimagenet.py View on Github external
parser.add_argument('--train-way', type=int, default=30)
    parser.add_argument('--gpu', default='0')
    args = parser.parse_args()
    print(args)

    device = torch.device('cpu')
    if args.gpu:
        print("Using gpu")
        torch.cuda.manual_seed(43)
        device = torch.device('cuda')

    model = Convnet()
    model.to(device)

    path_data = '/datadrive/few-shot/miniimagenetdata'
    train_dataset = l2l.vision.datasets.MiniImagenet(
        root=path_data, mode='train')
    valid_dataset = l2l.vision.datasets.MiniImagenet(
        root=path_data, mode='validation')
    test_dataset = l2l.vision.datasets.MiniImagenet(
        root=path_data, mode='test')

    train_sampler = l2l.data.NShotKWayTaskSampler(
        train_dataset.y, 100, args.train_way, args.shot, args.train_query)

    train_loader = DataLoader(dataset=train_dataset, batch_sampler=train_sampler,
                              num_workers=8, pin_memory=True)

    val_sampler = l2l.data.NShotKWayTaskSampler(
        valid_dataset.y, 400, args.test_way, args.shot, args.train_query)

    val_loader = DataLoader(dataset=valid_dataset, batch_sampler=val_sampler,
github learnables / learn2learn / examples / vision / maml_omniglot.py View on Github external
fast_lr=0.5,
        meta_batch_size=32,
        adaptation_steps=1,
        num_iterations=60000,
        cuda=True,
        seed=42,
):
    random.seed(seed)
    np.random.seed(seed)
    th.manual_seed(seed)
    device = th.device('cpu')
    if cuda:
        th.cuda.manual_seed(seed)
        device = th.device('cuda')

    omniglot = l2l.vision.datasets.FullOmniglot(root='./data',
                                                transform=transforms.Compose([
                                                    transforms.Resize(28, interpolation=LANCZOS),
                                                    transforms.ToTensor(),
                                                    lambda x: 1.0 - x,
                                                ]),
                                                download=True)
    dataset = l2l.data.MetaDataset(omniglot)
    classes = list(range(1623))
    random.shuffle(classes)

    train_transforms = [
        l2l.data.transforms.FilterLabels(dataset, classes[:1100]),
        l2l.data.transforms.NWays(dataset, ways),
        l2l.data.transforms.KShots(dataset, 2*shots),
        l2l.data.transforms.LoadData(dataset),
        l2l.data.transforms.RemapLabels(dataset),