Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
assert template.size() == (prod(map), 4)
template = torch.cat([template, template[:, 1:]], dim=1)
assert template.size() == (prod(map), 7)
self.lc = [5, 6] # learnable columns
super().__init__(
in_rank=3, out_size=(glimpses, co, ho, wo),
temp_indices=template,
learn_cols=self.lc,
chunk_size=1,
gadditional=gadditional, radditional=radditional, region=region,
bias_type=util.Bias.NONE)
self.num_glimpses = glimpses
modules = prep(ci, hi, wi, pool, coord) + [nn.ReLU(), nn.Linear(HIDLIN, (2 * 3) * glimpses), util.Reshape((glimpses, 2, 3))]
self.preprocess = nn.Sequential(*modules)
self.register_buffer('grid', util.interpolation_grid((k, k)))
self.register_buffer('identity', torch.FloatTensor([0.4, 0, 0, 0, 0.4, 0]).view(2, 3))
self.register_buffer('corners', torch.FloatTensor([-2, 2, 2, 2, 2, -2, -2, -2]).view(4, 2))
# One sigma per glimpse
self.sigmas = Parameter(torch.randn( (glimpses, ) ))
# All values 1, no bias. Glimpses extract only pixel information.
def __init__(self, in_size, out_size, k, gadditional=0, sigma_scale=0.2, fix_values=False, has_bias=False,
min_sigma=0.0, range=None, radditional=None, subsample=None):
super().__init__(in_rank=len(in_size), gadditional=gadditional, out_shape=out_size,
bias_type=Bias.DENSE if has_bias else Bias.NONE,
range=range,
radditional=radditional, subsample=subsample)
self.k = k
self.in_size = in_size
self.out_size = out_size
self.sigma_scale = sigma_scale
self.fix_values = fix_values
self.has_bias = has_bias
self.min_sigma = min_sigma
self.w_rank = len(in_size) + len(out_size)
p = torch.randn(k, self.w_rank + 2)
# p[:, self.w_rank:self.w_rank + 1] = p[:, self.w_rank:self.w_rank + 1]
pixel_indices = template[:, 2:].clone()
template = torch.cat([template, template[:, 1:]], dim=1)
assert template.size() == (prod(map), 7)
self.lc = [5, 6] # learnable columns
super().__init__(
in_rank=3, out_size=(glimpses, co, ho, wo),
temp_indices=template,
learn_cols=self.lc,
chunk_size=1,
gadditional=gadditional, radditional=radditional, region=region,
bias_type=util.Bias.NONE)
# scale to [0,1] in each dim
pixel_indices = pixel_indices.float() / torch.FloatTensor([[k, k]]).expand_as(pixel_indices)
self.register_buffer('pixel_indices', pixel_indices)
modules = prep(ci, hi, wi) + [nn.ReLU(), nn.Linear(HIDLIN, 4 * glimpses), util.Reshape((glimpses, 4))]
self.preprocess = nn.Sequential(*modules)
self.register_buffer('bbox_offset', torch.FloatTensor([-1, 1, -1, 1]))
# -- added to the bounding box, to make sure there's a training signal
# from the initial weights (i.e. in case all outputs are close to zero)
# One sigma per glimpse
self.sigmas = Parameter(torch.randn( (glimpses, ) ))
# All values 1, no bias. Glimpses extract only pixel information.
bfx = x_flat.view(1, -1).squeeze(0)
spm = sparsemult(self.use_cuda)
bfy = spm(vindices, bfvalues, bfsize, bfx)
y_flat = bfy.unsqueeze(0).view(batchsize, -1)
y_shape = [batchsize]
y_shape.extend(self.out_size)
y = y_flat.view(y_shape) # reshape y into a tensor
### Handle the bias
if self.bias_type == Bias.DENSE:
y = y + bias
if self.bias_type == Bias.SPARSE:
raise Exception('Not implemented yet.')
return y
bfvalues = values.view(1, -1).squeeze(0)
bfx = x_flat.view(1, -1).squeeze(0)
spm = sparsemult(self.use_cuda)
bfy = spm(vindices, bfvalues, bfsize, bfx)
y_flat = bfy.unsqueeze(0).view(batchsize, -1)
y_shape = [batchsize]
y_shape.extend(self.out_size)
y = y_flat.view(y_shape) # reshape y into a tensor
### Handle the bias
if self.bias_type == Bias.DENSE:
y = y + bias
if self.bias_type == Bias.SPARSE: # untested!
pass
return y
:param fix_values:
:param has_bias:
:param min_sigma:
:param gadditional:
:param region:
:param radditional:
:param clamp:
:param template: LongTensor Template for the matrix of index tuples. Learnable columns are updated through backprop
other values are taken from the template.
:param learn_cols: tuple of integers. Learnable columns of the template.
"""
super().__init__(in_rank=len(in_size),
out_size=out_size,
bias_type=Bias.DENSE if has_bias else Bias.NONE,
gadditional=gadditional,
radditional=radditional,
region=region,
temp_indices=template,
learn_cols=learn_cols,
chunk_size=chunk_size)
self.k = k
self.in_size = in_size
self.out_size = out_size
self.sigma_scale = sigma_scale
self.fix_values = fix_values
self.has_bias = has_bias
self.min_sigma = min_sigma
self.rank = len(in_size) + len(out_size)
bfvalues = values.view(1, -1).squeeze(0)
bfx = x_flat.view(1, -1).squeeze(0)
spm = sparsemult(self.use_cuda)
bfy = spm(vindices, bfvalues, bfsize, bfx)
y_flat = bfy.unsqueeze(0).view(batchsize, -1)
y_shape = [batchsize]
y_shape.extend(self.out_size)
y = y_flat.view(y_shape) # reshape y into a tensor
### Handle the bias
if self.bias_type == Bias.DENSE:
y = y + bias
if self.bias_type == Bias.SPARSE:
raise Exception('Not implemented yet.')
return y
def __init__(self, in_rank, out_size,
temp_indices=None,
learn_cols=None,
chunk_size=None, gadditional=0, radditional=0, region=None,
bias_type=Bias.DENSE, sparse_input=False, subsample=None):
"""
:param in_rank:
:param out_size:
:param temp_indices: The template
:param learn_cols: Which columns of the template are 'free' (to be learned). The rest are fixed.
:param chunk_size: Size of the 'chunk' of indices that is updated in this pass. This can be used to train a
large sparse layer (i.e. one with many index tuples) in multiple forward/backward passes.
:param gadditional:
:param radditional:
:param region:
:param bias_type:
:param sparse_input:
:param subsample:
"""
:param input:
:param mrange: Specifies a subrange of index tuples to compute the gradient over. This is helpful for gradient
accumulation methods. This doesn;t work together with templating.
:param seed:
:param kwargs:
:return:
"""
assert mrange is None or not self.templated, "Templating and gradient accumulation do not work together"
### Compute and unpack output of hypernetwork
bias = None
if self.bias_type == Bias.NONE:
means, sigmas, values = self.hyper(input, **kwargs)
elif self.bias_type == Bias.DENSE:
means, sigmas, values, bias = self.hyper(input, **kwargs)
elif self.bias_type == Bias.SPARSE:
raise Exception('Sparse bias not supported yet.')
else:
raise Exception('bias type {} not recognized.'.format(self.bias_type))
b, n, r = means.size()
dv = 'cuda' if self.is_cuda() else 'cpu'
# We divide the list of index tuples into 'chunks'. Each chunk represents a kind of context:
# - duplicate integer index tuples within the chunk are removed
# - proportions are normalized over all index tuples within the chunk
# This is useful in the templated setting. If no chunk size is requested, we just add a singleton dimension.
k = self.chunk_size if self.chunk_size is not None else n # chunk size