How to use memcnn - 10 common examples

To help you get started, we’ve selected a few memcnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tychovdo / RevGAN / models / networks3d.py View on Github external
def __init__(self, dim, use_bias, use_naive=False):
        super(ThickBlock3d, self).__init__()
        F = self.build_conv_block(dim // 2, True)
        G = self.build_conv_block(dim // 2, True)
        if use_naive:
            self.rev_block = ReversibleBlock(F, G, 'additive',
                                             keep_input=True, implementation_fwd=2, implementation_bwd=2)
        else:
            self.rev_block = ReversibleBlock(F, G, 'additive')
github tychovdo / RevGAN / models / networks.py View on Github external
def __init__(self, dim, padding_type, norm_layer, use_naive, use_bias, coupling):
        super(ReversibleResnetBlock, self).__init__()
        F = self.build_conv_block(dim // 2, padding_type, norm_layer, use_bias)
        G = self.build_conv_block(dim // 2, padding_type, norm_layer, use_bias)
        
        if use_naive:
            self.rev_block = ReversibleBlock(F, G, coupling,
                                             keep_input=True, implementation_fwd=2, implementation_bwd=2)
        else:
            self.rev_block = ReversibleBlock(F, G, coupling)
github silvandeleemput / memcnn / memcnn / models / resnet.py View on Github external
def __init__(self, inplanes, planes, stride=1, downsample=None, noactivation=False):
        super(RevBasicBlock, self).__init__()
        if downsample is None and stride == 1:
            gm = BasicBlockSub(inplanes // 2, planes // 2, stride, noactivation)
            fm = BasicBlockSub(inplanes // 2, planes // 2, stride, noactivation)
            self.revblock = ReversibleBlock(gm, fm)
        else:
            self.basicblock_sub = BasicBlockSub(inplanes, planes, stride, noactivation)
        self.downsample = downsample
        self.stride = stride
github tychovdo / RevGAN / models / networks.py View on Github external
def __init__(self, dim, padding_type, norm_layer, use_naive, use_bias, coupling):
        super(ReversibleResnetBlock, self).__init__()
        F = self.build_conv_block(dim // 2, padding_type, norm_layer, use_bias)
        G = self.build_conv_block(dim // 2, padding_type, norm_layer, use_bias)
        
        if use_naive:
            self.rev_block = ReversibleBlock(F, G, coupling,
                                             keep_input=True, implementation_fwd=2, implementation_bwd=2)
        else:
            self.rev_block = ReversibleBlock(F, G, coupling)
github tychovdo / RevGAN / models / networks3d.py View on Github external
def __init__(self, dim, use_bias, use_naive=False):
        super(ThickBlock3d, self).__init__()
        F = self.build_conv_block(dim // 2, True)
        G = self.build_conv_block(dim // 2, True)
        if use_naive:
            self.rev_block = ReversibleBlock(F, G, 'additive',
                                             keep_input=True, implementation_fwd=2, implementation_bwd=2)
        else:
            self.rev_block = ReversibleBlock(F, G, 'additive')
github tychovdo / RevGAN / models / networks3d.py View on Github external
def __init__(self, dim, use_bias, norm_layer, use_naive):
        super(RevBlock3d, self).__init__()
        self.F = self.build_conv_block(dim // 2, True, norm_layer)
        self.G = self.build_conv_block(dim // 2, True, norm_layer)
        if use_naive:
            self.rev_block = ReversibleBlock(F, G, 'additive',
                                             keep_input=True, implementation_fwd=2, implementation_bwd=2)
        else:
            self.rev_block = ReversibleBlock(F, G, 'additive')
github tychovdo / RevGAN / models / networks3d.py View on Github external
def __init__(self, dim, use_bias, norm_layer, use_naive):
        super(RevBlock3d, self).__init__()
        self.F = self.build_conv_block(dim // 2, True, norm_layer)
        self.G = self.build_conv_block(dim // 2, True, norm_layer)
        if use_naive:
            self.rev_block = ReversibleBlock(F, G, 'additive',
                                             keep_input=True, implementation_fwd=2, implementation_bwd=2)
        else:
            self.rev_block = ReversibleBlock(F, G, 'additive')
github silvandeleemput / memcnn / memcnn / trainers / classification.py View on Github external
model, optimizer = manager.model, manager.optimizer

    logger.info('Model parameters: {}'.format(get_model_parameters_count(model)))

    if use_cuda:
        model_mem_allocation = torch.cuda.memory_allocated(device)
        logger.info('Model memory allocation: {}'.format(model_mem_allocation))
    else:
        model_mem_allocation = None

    writer = SummaryWriter(manager.log_dir)
    data_time = AverageMeter()
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    act_mem_activations = AverageMeter()

    ceriterion = loss
    # ensure train_loader enumerates to max_epoch
    max_iterations = train_loader.sampler.nsamples // train_loader.batch_size
    train_loader.sampler.nsamples = train_loader.sampler.nsamples - start_iter
    end = time.time()
    for ind, (x, label) in enumerate(train_loader):
        iteration = ind + 1 + start_iter

        if iteration > max_iterations:
            logger.info('maximum number of iterations reached: {}/{}'.format(iteration, max_iterations))
            break

        if iteration == 40000 or iteration == 60000:
            for param_group in optimizer.param_groups:
                param_group['lr'] *= 0.1
github silvandeleemput / memcnn / memcnn / trainers / classification.py View on Github external
"""train loop"""

    device = torch.device('cpu' if not use_cuda else 'cuda')
    model, optimizer = manager.model, manager.optimizer

    logger.info('Model parameters: {}'.format(get_model_parameters_count(model)))

    if use_cuda:
        model_mem_allocation = torch.cuda.memory_allocated(device)
        logger.info('Model memory allocation: {}'.format(model_mem_allocation))
    else:
        model_mem_allocation = None

    writer = SummaryWriter(manager.log_dir)
    data_time = AverageMeter()
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    act_mem_activations = AverageMeter()

    ceriterion = loss
    # ensure train_loader enumerates to max_epoch
    max_iterations = train_loader.sampler.nsamples // train_loader.batch_size
    train_loader.sampler.nsamples = train_loader.sampler.nsamples - start_iter
    end = time.time()
    for ind, (x, label) in enumerate(train_loader):
        iteration = ind + 1 + start_iter

        if iteration > max_iterations:
            logger.info('maximum number of iterations reached: {}/{}'.format(iteration, max_iterations))
            break
github silvandeleemput / memcnn / memcnn / trainers / classification.py View on Github external
device = torch.device('cpu' if not use_cuda else 'cuda')
    model, optimizer = manager.model, manager.optimizer

    logger.info('Model parameters: {}'.format(get_model_parameters_count(model)))

    if use_cuda:
        model_mem_allocation = torch.cuda.memory_allocated(device)
        logger.info('Model memory allocation: {}'.format(model_mem_allocation))
    else:
        model_mem_allocation = None

    writer = SummaryWriter(manager.log_dir)
    data_time = AverageMeter()
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    act_mem_activations = AverageMeter()

    ceriterion = loss
    # ensure train_loader enumerates to max_epoch
    max_iterations = train_loader.sampler.nsamples // train_loader.batch_size
    train_loader.sampler.nsamples = train_loader.sampler.nsamples - start_iter
    end = time.time()
    for ind, (x, label) in enumerate(train_loader):
        iteration = ind + 1 + start_iter

        if iteration > max_iterations:
            logger.info('maximum number of iterations reached: {}/{}'.format(iteration, max_iterations))
            break

        if iteration == 40000 or iteration == 60000:
            for param_group in optimizer.param_groups: