How to use the gluoncv.utils.LRScheduler function in gluoncv

To help you get started, we’ve selected a few gluoncv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sufeidechabei / gluon-mobilenet-yolov3 / train_yolo3_mobilenet.py View on Github external
if args.no_wd:
        for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
            v.wd_mult = 0.0

    if args.label_smooth:
        net._target_generator._label_smooth = True

    if args.lr_decay_period > 0:
        lr_decay_epoch = list(
            range(
                args.lr_decay_period,
                args.epochs,
                args.lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
    lr_scheduler = LRScheduler(mode=args.lr_mode,
                               baselr=args.lr,
                               niters=args.num_samples // args.batch_size,
                               nepochs=args.epochs,
                               step=lr_decay_epoch,
                               step_factor=args.lr_decay, power=2,
                               warmup_epochs=args.warmup_epochs)

    trainer = gluon.Trainer(
        net.collect_params(), 'sgd',
        {'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler},
        kvstore='local')

    # targets
    sigmoid_ce = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    l1_loss = gluon.loss.L1Loss()
github dmlc / gluon-cv / scripts / detection / yolo / train_yolo3.py View on Github external
net.collect_params().reset_ctx(ctx)
    if args.no_wd:
        for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
            v.wd_mult = 0.0

    if args.label_smooth:
        net._target_generator._label_smooth = True

    if args.lr_decay_period > 0:
        lr_decay_epoch = list(range(args.lr_decay_period, args.epochs, args.lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - args.warmup_epochs for e in lr_decay_epoch]
    num_batches = args.num_samples // args.batch_size
    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=args.lr,
                    nepochs=args.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(args.lr_mode, base_lr=args.lr,
                    nepochs=args.epochs - args.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=args.lr_decay, power=2),
    ])

    trainer = gluon.Trainer(
        net.collect_params(), 'sgd',
        {'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler},
        kvstore='local')

    # targets
    sigmoid_ce = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    l1_loss = gluon.loss.L1Loss()
github dmlc / gluon-cv / scripts / detection / yolo / train_yolo3.py View on Github external
for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
            v.wd_mult = 0.0

    if args.label_smooth:
        net._target_generator._label_smooth = True

    if args.lr_decay_period > 0:
        lr_decay_epoch = list(range(args.lr_decay_period, args.epochs, args.lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - args.warmup_epochs for e in lr_decay_epoch]
    num_batches = args.num_samples // args.batch_size
    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=args.lr,
                    nepochs=args.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(args.lr_mode, base_lr=args.lr,
                    nepochs=args.epochs - args.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=args.lr_decay, power=2),
    ])

    trainer = gluon.Trainer(
        net.collect_params(), 'sgd',
        {'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler},
        kvstore='local')

    # targets
    sigmoid_ce = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    l1_loss = gluon.loss.L1Loss()

    # metrics
github dmlc / gluon-cv / scripts / classification / imagenet / train_imagenet_nasnet.py View on Github external
num_gpus = opt.num_gpus
    batch_size *= max(1, num_gpus)
    context = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
    num_workers = opt.num_workers

    lr_decay = opt.lr_decay
    lr_decay_period = opt.lr_decay_period
    if opt.lr_decay_period > 0:
        lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - opt.warmup_epochs for e in lr_decay_epoch]
    num_batches = num_training_samples // batch_size

    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=opt.lr,
                    nepochs=opt.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(opt.lr_mode, base_lr=opt.lr, target_lr=0,
                    nepochs=opt.num_epochs - opt.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=lr_decay, power=2)
    ])

    model_name = opt.model

    kwargs = {'ctx': context, 'pretrained': opt.use_pretrained, 'classes': classes}
    if model_name.startswith('vgg'):
        kwargs['batch_norm'] = opt.batch_norm
    elif model_name.startswith('resnext'):
        kwargs['use_se'] = opt.use_se
github mnikitin / EfficientNet / train_imagenet / train.py View on Github external
context = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
    num_workers = opt.num_workers

    lr_decay = opt.lr_decay
    lr_decay_period = opt.lr_decay_period
    if opt.lr_decay_period > 0:
        lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(',')]
    lr_decay_epoch = [e - opt.warmup_epochs for e in lr_decay_epoch]
    num_batches = num_training_samples // batch_size

    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=0, target_lr=opt.lr,
                    nepochs=opt.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(opt.lr_mode, base_lr=opt.lr, target_lr=0,
                    nepochs=opt.num_epochs - opt.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=lr_decay, power=2)
    ])

    optimizer = 'nag'
    optimizer_params = {'wd': opt.wd, 'momentum': opt.momentum, 'lr_scheduler': lr_scheduler}
    if opt.dtype != 'float32':
        optimizer_params['multi_precision'] = True

    model_name = opt.model
    if 'lite' in model_name:
        net, input_size = get_efficientnet_lite(model_name, num_classes=classes)
    else:
        net, input_size = get_efficientnet(model_name, num_classes=classes)
github researchmm / DBTNet / code / train_imagenet_dbt.py View on Github external
batch_size = opt.batch_size

num_gpus = opt.num_gpus
batch_size *= max(1, num_gpus)
context = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
num_workers = opt.num_workers


lr_decay = opt.lr_decay
lr_decay_period = opt.lr_decay_period
if opt.lr_decay_period > 0:
    lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
else:
    lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(',')]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(mode=opt.lr_mode, baselr=opt.lr,
                           niters=num_batches, nepochs=opt.num_epochs,
                           step=lr_decay_epoch, step_factor=opt.lr_decay, power=2,
                           warmup_epochs=opt.warmup_epochs)

model_name = opt.model

kwargs = {'ctx': context, 'pretrained': opt.use_pretrained, 'classes': classes}
if model_name.startswith('vgg'):
    kwargs['batch_norm'] = opt.batch_norm
elif model_name.startswith('resnext'):
    kwargs['use_se'] = opt.use_se

if opt.last_gamma:
    kwargs['last_gamma'] = True

optimizer = 'nag'
github THUFutureLab / gluon-face / examples / mnist / train_mnist_lgmloss.py View on Github external
val_set = MNIST(train=False, transform=transform_val)
    val_data = gluon.data.DataLoader(val_set, batch_size, shuffle=False, num_workers=4)

    net = MnistNet(embedding_size=2, need_cls_layer=False)
    net.initialize(init=mx.init.MSRAPrelu(), ctx=ctx)
    net.hybridize()

    loss = LGMLoss(10, 2, 0.2, 0.1, 0.06)
    loss.initialize(ctx=ctx)
    loss.hybridize()

    num_batches = len(train_set) // batch_size
    train_params = net.collect_params()
    train_params.update(loss.params)

    lr_scheduler = LRScheduler("cosine", lr,  niters=num_batches, nepochs=epochs, targetlr=1e-8,
                               warmup_epochs=10, warmup_lr=0.001)
    trainer = gluon.Trainer(train_params, 'nag', {'lr_scheduler': lr_scheduler, 'momentum': momentum, 'wd': wd})

    metric = mtc.Accuracy()
    num_batch = len(train_data)

    for epoch in range(epochs):

        plot = True if (epoch % plot_period) == 0 else False

        train_loss = 0
        metric.reset()
        tic = time.time()
        ebs, lbs = [], []

        for i, batch in enumerate(train_data):
github JJXiangJiaoJun / gluon_PyramidBox / train_end2end.py View on Github external
def train(net, train_samples, train_data, val_data, eval_metric, ctx, args):
    """Training pipline"""
    net.collect_params().reset_ctx(ctx)
    # training_patterns = '.*vgg'
    # net.collect_params(training_patterns).setattr('lr_mult', 0.1)

    num_batches = train_samples // args.batch_size

    if args.start_epoch == 0:
        lr_scheduler = LRSequential([
            LRScheduler('linear', base_lr=0, target_lr=args.lr,
                        nepochs=args.warmup_epochs, iters_per_epoch=num_batches),
            LRScheduler('cosine', base_lr=args.lr, target_lr=0,
                        nepochs=args.epochs - args.warmup_epochs
                        , iters_per_epoch=num_batches)])
    else:
        offset = args.start_epoch
        lr_scheduler = LRSequential([
            LRScheduler('cosine', base_lr=args.lr, target_lr=0,
                        nepochs=args.epochs - offset
                        , iters_per_epoch=num_batches)
        ])

    opt_params = {'learning_rate': args.lr, 'momentum': args.momentum, 'wd': args.wd,
                  'lr_scheduler': lr_scheduler}

    trainer = gluon.Trainer(
        net.collect_params(),
        'nag',
github awslabs / autogluon / autogluon / utils / learning_rate.py View on Github external
self._lr_decay_period = lr_decay_period
        self._warmup_epochs = warmup_epochs
        self._warmup_lr= warmup_lr
        self._num_epochs = num_epochs
        self._lr = lr
        self._lr_mode = lr_mode
        if lr_decay_period > 0:
            lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
        else:
            lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(',')]
        self._lr_decay_epoch = [e - warmup_epochs for e in lr_decay_epoch]

        self._lr_scheduler = LRSequential([
            LRScheduler('linear', base_lr=self._warmup_lr, target_lr=lr,
                        nepochs=warmup_epochs, iters_per_epoch=self._num_batches),
            LRScheduler(lr_mode, base_lr=lr, target_lr=0,
                        nepochs=num_epochs - warmup_epochs,
                        iters_per_epoch=self._num_batches,
                        step_epoch=self._lr_decay_epoch,
                        step_factor=lr_decay, power=2)
        ])
github dmlc / gluon-cv / scripts / action-recognition / train_recognizer.py View on Github external
net.cast(opt.dtype)
    net.collect_params().reset_ctx(context)
    logger.info(net)

    if opt.resume_params is not '':
        net.load_parameters(opt.resume_params, ctx=context)
        print('Continue training from model %s.' % (opt.resume_params))

    if opt.kvstore is not None:
        train_data, val_data, batch_fn = get_data_loader(opt, batch_size, num_workers, logger, kv)
    else:
        train_data, val_data, batch_fn = get_data_loader(opt, batch_size, num_workers, logger)

    num_batches = len(train_data)
    lr_scheduler = LRSequential([
        LRScheduler('linear', base_lr=opt.warmup_lr, target_lr=opt.lr,
                    nepochs=opt.warmup_epochs, iters_per_epoch=num_batches),
        LRScheduler(opt.lr_mode, base_lr=opt.lr, target_lr=0,
                    nepochs=opt.num_epochs - opt.warmup_epochs,
                    iters_per_epoch=num_batches,
                    step_epoch=lr_decay_epoch,
                    step_factor=lr_decay, power=2)
    ])
    optimizer_params['lr_scheduler'] = lr_scheduler

    train_metric = mx.metric.Accuracy()
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)

    def test(ctx, val_data, kvstore=None):
        acc_top1.reset()
        acc_top5.reset()