How to use the nncf.helpers.load_state function in nncf

To help you get started, we’ve selected a few nncf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github opencv / openvino_training_extensions / pytorch_toolkit / nncf / examples / segmentation / utils / checkpoint.py View on Github external
- optimizer (``torch.optim``): The stored optimizer state is copied to this
    optimizer instance.
    - compression_algo: The compression scheduler for the saved state
                        to be loaded into

    Returns:
    The ``model``, ``optimizer``, epoch, mean IoU and ``compression_scheduler``, loaded from the
    checkpoint.

    """
    assert os.path.isfile(
        model_path), "The model file \"{0}\" doesn't exist.".format(model_path)

    # Load the stored model parameters to the model instance
    checkpoint = torch.load(model_path, map_location=device_name)
    load_state(model, checkpoint['state_dict'], is_resume=True)
    if optimizer is not None:
        optimizer.load_state_dict(checkpoint['optimizer'])
    epoch = checkpoint['epoch']
    miou = checkpoint['miou']

    if "scheduler" in checkpoint and compression_scheduler is not None:
        compression_scheduler.load_state_dict(checkpoint['scheduler'])

    return model, optimizer, epoch, miou, compression_scheduler
github opencv / openvino_training_extensions / pytorch_toolkit / nncf / examples / classification / main.py View on Github external
def resume_from_checkpoint(resuming_checkpoint, model, config, optimizer, compression_algo):
    best_acc1 = 0
    if osp.isfile(resuming_checkpoint):
        print("=> loading checkpoint '{}'".format(resuming_checkpoint))
        checkpoint = torch.load(resuming_checkpoint, map_location='cpu')
        load_state(model, checkpoint['state_dict'], is_resume=True)
        if config.mode.lower() == 'train' and config.to_onnx is None:
            config.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            compression_algo.scheduler.load_state_dict(checkpoint['scheduler'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch: {}, best_acc1: {:.3f})"
                  .format(resuming_checkpoint, checkpoint['epoch'], best_acc1))
        else:
            print("=> loaded checkpoint '{}'".format(resuming_checkpoint))
    else:
        raise FileNotFoundError("no checkpoint found at '{}'".format(resuming_checkpoint))
    return model, config, optimizer, compression_algo, best_acc1
github opencv / openvino_training_extensions / pytorch_toolkit / nncf / examples / classification / main.py View on Github external
if config.seed is not None:
        manual_seed(config.seed)
        cudnn.deterministic = True
        cudnn.benchmark = False

    # create model
    model_name = config['model']
    weights = config.get('weights')
    model = load_model(model_name,
                       pretrained=config.get('pretrained', True) if weights is None else False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params'))
    compression_algo, model = create_compressed_model(model, config)
    if weights:
        load_state(model, torch.load(weights, map_location='cpu'))
    model, _ = prepare_model_for_execution(model, config)
    if config.distributed:
        compression_algo.distributed()

    is_inception = 'inception' in model_name

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(config.device)

    params_to_optimize = get_parameter_groups(model, config)
    optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)

    resuming_checkpoint = config.resuming_checkpoint
    best_acc1 = 0
    # optionally resume from a checkpoint
github opencv / openvino_training_extensions / pytorch_toolkit / nncf / examples / object_detection / main.py View on Github external
###########################

    test_data_loader, train_data_loader = create_dataloaders(config)

    ###########################
    # Load checkpoint
    ###########################

    resuming_checkpoint = config.resuming_checkpoint
    if resuming_checkpoint:
        print('Resuming training, loading {}...'.format(resuming_checkpoint))
        checkpoint = torch.load(resuming_checkpoint, map_location='cpu')
        # use checkpoint itself in case of only state dict is saved
        # i.e. checkpoint is created with `torch.save(module.state_dict())`
        state_dict = checkpoint.get('state_dict', checkpoint)
        load_state(net, state_dict, is_resume=True)
        if config.mode.lower() == 'train' and config.to_onnx is None:
            compression_algo.scheduler.load_state_dict(checkpoint['scheduler'])
            optimizer.load_state_dict(checkpoint.get('optimizer', optimizer.state_dict()))
            config.start_iter = checkpoint.get('iter', 0) + 1

    if config.to_onnx:
        compression_algo.export_model(config.to_onnx)
        print("Saved to {}".format(config.to_onnx))
        return

    if config.mode.lower() == 'test':
        with torch.no_grad():
            print_statistics(compression_algo.statistics())
            net.eval()
            test_net(net, config.device, test_data_loader, distributed=config.distributed)
            return
github opencv / openvino_training_extensions / pytorch_toolkit / nncf / examples / object_detection / models / ssd_vgg.py View on Github external
def build_ssd_vgg(cfg, size, num_classes, config):
    ssd_vgg = SSD_VGG(cfg, size, num_classes, batch_norm=config.get('batchnorm', False))
    print('Initializing weights...')

    # ssd_vgg.apply(weights_init)

    if config.basenet:
        print('Loading base network...')
        basenet_weights = torch.load(config.basenet)
        new_weights = {}
        for wn, wv in basenet_weights.items():
            wn = wn.replace('features.', '')
            new_weights[wn] = wv

        load_state(ssd_vgg.basenet, new_weights, is_resume=False)
    return ssd_vgg
github opencv / openvino_training_extensions / pytorch_toolkit / nncf / examples / object_detection / main.py View on Github external
def create_model(config):
    ssd_net = build_ssd(config.model, config.ssd_params, config.input_sample_size[-1], config.num_classes, config)
    ssd_net.to(config.device)
    compression_algo = create_compression_algorithm(ssd_net, config)
    ssd_net = compression_algo.model
    weights = config.get('weights')
    if weights:
        sd = torch.load(weights, map_location='cpu')
        load_state(ssd_net, sd)
    ssd_net.train()
    model, _ = prepare_model_for_execution(ssd_net, config)
    return compression_algo, model
github opencv / openvino_training_extensions / pytorch_toolkit / nncf / examples / segmentation / main.py View on Github external
print(config)

    config.device = get_device(config)
    dataset = get_dataset(config.dataset)
    color_encoding = dataset.color_encoding
    num_classes = len(color_encoding)

    weights = config.get('weights')
    model = load_model(config.model,
                       pretrained=config.get('pretrained', True) if weights is None else False,
                       num_classes=num_classes,
                       model_params=config.get('model_params', {}))
    compression_algo, model = create_compressed_model(model, config)
    if weights:
        sd = torch.load(weights, map_location='cpu')
        load_state(model, sd)

    model, model_without_dp = prepare_model_for_execution(model, config)

    if config.distributed:
        compression_algo.distributed()

    resuming_checkpoint = config.resuming_checkpoint

    if resuming_checkpoint is not None:
        if not config.pretrained:
            # Load the previously saved model state
            model, _, _, _, _ = \
                load_checkpoint(model, resuming_checkpoint, config.device,
                                compression_scheduler=compression_algo.scheduler)

    if config.to_onnx is not None:
github opencv / openvino_training_extensions / pytorch_toolkit / nncf / tools / debug / common.py View on Github external
def load_torch_model(config, cuda=False):
    weights = config.get('weights')
    model = load_model(config.model,
                       pretrained=config.get('pretrained', True) if weights is None else False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params', {}))
    compression_algo, model = create_compressed_model(model, config)
    if weights:
        sd = torch.load(weights, map_location='cpu')
        load_state(model, sd)
    if cuda:
        model = model.cuda()
        model = torch.nn.DataParallel(model)
    print_statistics(compression_algo.statistics())
    return model