How to use gluoncv - 10 common examples

To help you get started, we’ve selected a few gluoncv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sufeidechabei / gluon-mobilenet-yolov3 / train_yolo3_mobilenet.py View on Github external
if args.no_wd:
        for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
            v.wd_mult = 0.0

    if args.label_smooth:
        net._target_generator._label_smooth = True

    if args.lr_decay_period > 0:
        lr_decay_epoch = list(
            range(
                args.lr_decay_period,
                args.epochs,
                args.lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
    lr_scheduler = LRScheduler(mode=args.lr_mode,
                               baselr=args.lr,
                               niters=args.num_samples // args.batch_size,
                               nepochs=args.epochs,
                               step=lr_decay_epoch,
                               step_factor=args.lr_decay, power=2,
                               warmup_epochs=args.warmup_epochs)

    trainer = gluon.Trainer(
        net.collect_params(), 'sgd',
        {'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler},
        kvstore='local')

    # targets
    sigmoid_ce = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    l1_loss = gluon.loss.L1Loss()
github dmlc / gluon-cv / scripts / detection / yolo / train_yolo3.py View on Github external
net.collect_params().reset_ctx(ctx)
    if args.no_wd:
        for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
            v.wd_mult = 0.0

    if args.label_smooth:
        net._target_generator._label_smooth = True

    if args.lr_decay_period > 0:
        lr_decay_epoch = list(range(args.lr_decay_period, args.epochs, args.lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - args.warmup_epochs for e in lr_decay_epoch]
    num_batches = args.num_samples // args.batch_size
    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=args.lr,
                    nepochs=args.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(args.lr_mode, base_lr=args.lr,
                    nepochs=args.epochs - args.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=args.lr_decay, power=2),
    ])

    trainer = gluon.Trainer(
        net.collect_params(), 'sgd',
        {'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler},
        kvstore='local')

    # targets
    sigmoid_ce = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    l1_loss = gluon.loss.L1Loss()
github dmlc / gluon-cv / scripts / detection / yolo / train_yolo3.py View on Github external
for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
            v.wd_mult = 0.0

    if args.label_smooth:
        net._target_generator._label_smooth = True

    if args.lr_decay_period > 0:
        lr_decay_epoch = list(range(args.lr_decay_period, args.epochs, args.lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - args.warmup_epochs for e in lr_decay_epoch]
    num_batches = args.num_samples // args.batch_size
    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=args.lr,
                    nepochs=args.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(args.lr_mode, base_lr=args.lr,
                    nepochs=args.epochs - args.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=args.lr_decay, power=2),
    ])

    trainer = gluon.Trainer(
        net.collect_params(), 'sgd',
        {'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler},
        kvstore='local')

    # targets
    sigmoid_ce = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    l1_loss = gluon.loss.L1Loss()

    # metrics
github dmlc / gluon-cv / scripts / classification / imagenet / train_imagenet_nasnet.py View on Github external
num_gpus = opt.num_gpus
    batch_size *= max(1, num_gpus)
    context = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
    num_workers = opt.num_workers

    lr_decay = opt.lr_decay
    lr_decay_period = opt.lr_decay_period
    if opt.lr_decay_period > 0:
        lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - opt.warmup_epochs for e in lr_decay_epoch]
    num_batches = num_training_samples // batch_size

    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=opt.lr,
                    nepochs=opt.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(opt.lr_mode, base_lr=opt.lr, target_lr=0,
                    nepochs=opt.num_epochs - opt.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=lr_decay, power=2)
    ])

    model_name = opt.model

    kwargs = {'ctx': context, 'pretrained': opt.use_pretrained, 'classes': classes}
    if model_name.startswith('vgg'):
        kwargs['batch_norm'] = opt.batch_norm
    elif model_name.startswith('resnext'):
        kwargs['use_se'] = opt.use_se
github mnikitin / EfficientNet / train_imagenet / train.py View on Github external
context = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
    num_workers = opt.num_workers

    lr_decay = opt.lr_decay
    lr_decay_period = opt.lr_decay_period
    if opt.lr_decay_period > 0:
        lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - opt.warmup_epochs for e in lr_decay_epoch]
    num_batches = num_training_samples // batch_size

    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=opt.lr,
                    nepochs=opt.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(opt.lr_mode, base_lr=opt.lr, target_lr=0,
                    nepochs=opt.num_epochs - opt.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=lr_decay, power=2)
    ])

    optimizer = 'nag'
    optimizer_params = {'wd': opt.wd, 'momentum': opt.momentum, 'lr_scheduler': lr_scheduler}
    if opt.dtype != 'float32':
        optimizer_params['multi_precision'] = True

    model_name = opt.model
    if 'lite' in model_name:
        net, input_size = get_efficientnet_lite(model_name, num_classes=classes)
    else:
        net, input_size = get_efficientnet(model_name, num_classes=classes)
github researchmm / DBTNet / code / train_imagenet_dbt.py View on Github external
batch_size = opt.batch_size

num_gpus = opt.num_gpus
batch_size *= max(1, num_gpus)
context = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
num_workers = opt.num_workers


lr_decay = opt.lr_decay
lr_decay_period = opt.lr_decay_period
if opt.lr_decay_period > 0:
    lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
else:
    lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(',')]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(mode=opt.lr_mode, baselr=opt.lr,
                           niters=num_batches, nepochs=opt.num_epochs,
                           step=lr_decay_epoch, step_factor=opt.lr_decay, power=2,
                           warmup_epochs=opt.warmup_epochs)

model_name = opt.model

kwargs = {'ctx': context, 'pretrained': opt.use_pretrained, 'classes': classes}
if model_name.startswith('vgg'):
    kwargs['batch_norm'] = opt.batch_norm
elif model_name.startswith('resnext'):
    kwargs['use_se'] = opt.use_se

if opt.last_gamma:
    kwargs['last_gamma'] = True

optimizer = 'nag'
github dmlc / gluon-cv / scripts / instance / mask_rcnn / train_mask_rcnn.py View on Github external
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
        starting_id = 0
        if args.horovod and MPI:
            length = len(val_dataset)
            shard_len = length // hvd.size()
            rest = length % hvd.size()
            # Compute the start index for this partition
            starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest)
        val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval',
                                        use_ext=args.use_ext, starting_id=starting_id)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    if args.horovod and MPI:
        val_dataset = val_dataset.shard(hvd.size(), hvd.rank())
    return train_dataset, val_dataset, val_metric
github THUFutureLab / gluon-face / examples / mnist / train_mnist_lgmloss.py View on Github external
val_set = MNIST(train=False, transform=transform_val)
    val_data = gluon.data.DataLoader(val_set, batch_size, shuffle=False, num_workers=4)

    net = MnistNet(embedding_size=2, need_cls_layer=False)
    net.initialize(init=mx.init.MSRAPrelu(), ctx=ctx)
    net.hybridize()

    loss = LGMLoss(10, 2, 0.2, 0.1, 0.06)
    loss.initialize(ctx=ctx)
    loss.hybridize()

    num_batches = len(train_set) // batch_size
    train_params = net.collect_params()
    train_params.update(loss.params)

    lr_scheduler = LRScheduler("cosine", lr,  niters=num_batches, nepochs=epochs, targetlr=1e-8,
                               warmup_epochs=10, warmup_lr=0.001)
    trainer = gluon.Trainer(train_params, 'nag', {'lr_scheduler': lr_scheduler, 'momentum': momentum, 'wd': wd})

    metric = mtc.Accuracy()
    num_batch = len(train_data)

    for epoch in range(epochs):

        plot = True if (epoch % plot_period) == 0 else False

        train_loss = 0
        metric.reset()
        tic = time.time()
        ebs, lbs = [], []

        for i, batch in enumerate(train_data):
github JJXiangJiaoJun / gluon_PyramidBox / train_end2end.py View on Github external
def train(net, train_samples, train_data, val_data, eval_metric, ctx, args):
    """Training pipline"""
    net.collect_params().reset_ctx(ctx)
    # training_patterns = '.*vgg'
    # net.collect_params(training_patterns).setattr('lr_mult', 0.1)

    num_batches = train_samples // args.batch_size

    if args.start_epoch == 0:
        lr_scheduler = LRSequential([
            LRScheduler('linear', base_lr=0, target_lr=args.lr,
                        nepochs=args.warmup_epochs, iters_per_epoch=num_batches),
            LRScheduler('cosine', base_lr=args.lr, target_lr=0,
                        nepochs=args.epochs - args.warmup_epochs
                        , iters_per_epoch=num_batches)])
    else:
        offset = args.start_epoch
        lr_scheduler = LRSequential([
            LRScheduler('cosine', base_lr=args.lr, target_lr=0,
                        nepochs=args.epochs - offset
                        , iters_per_epoch=num_batches)
        ])

    opt_params = {'learning_rate': args.lr, 'momentum': args.momentum, 'wd': args.wd,
                  'lr_scheduler': lr_scheduler}

    trainer = gluon.Trainer(
        net.collect_params(),
        'nag',
github Guanghan / mxnet-centernet / unit_test / test_custom_dataloader.py View on Github external
def test_load():
    from opts import opts
    opt = opts().init()

    batch_size = 16
    #batchify_fn = Tuple(Stack(), Stack(), Stack(), Stack())  # stack image, heatmaps, scale, offset
    batchify_fn = Tuple(Stack(), Stack(), Stack(), Stack(), Stack(), Stack())  # stack image, heatmaps, scale, offset, ind, mask
    num_workers = 2

    train_dataset = CenterCOCODataset(opt, split = 'train')
    train_loader = gluon.data.DataLoader( train_dataset,
        batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
    ctx = [mx.gpu(int(i)) for i in opt.gpus_str.split(',') if i.strip()]
    ctx = ctx if ctx else [mx.cpu()]

    for i, batch in enumerate(train_loader):
        print("{} Batch".format(i))
        print("image batch shape: ", batch[0].shape)
        print("heatmap batch shape", batch[1].shape)
        print("scale batch shape", batch[2].shape)
        print("offset batch shape", batch[3].shape)
        print("indices batch shape", batch[4].shape)
        print("mask batch shape", batch[5].shape)