Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['in_height'] = self.get_input_variable().dim_names[0]
params['in_width'] = self.get_input_variable().dim_names[1]
params['n_chan'] = self.get_input_variable().dim_names[2]
params['out_height'] = self.get_output_variable().dim_names[0]
params['out_width'] = self.get_output_variable().dim_names[1]
params['n_filt'] = self.get_output_variable().dim_names[2]
params['nzeros'] = self.get_weights('weight').nzeros
return self._config_template.format(**params)
class Pooling1D(Layer):
def initialize(self):
shape = [self.attributes['n_out'], self.attributes['n_filt']]
dims = ['N_OUTPUTS_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
self.add_output_variable(shape, dims)
self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])
def function_cpp(self):
params = self._default_function_params()
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['n_in'] = self.get_input_variable().size_cpp()
params['n_out'] = self.get_output_variable().size_cpp()
self.add_output_variable(shape, dims)
self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])
def function_cpp(self):
params = self._default_function_params()
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['n_in'] = self.get_input_variable().size_cpp()
params['n_out'] = self.get_output_variable().size_cpp()
return self._config_template.format(**params)
class Pooling2D(Layer):
def initialize(self):
shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']]
dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
self.add_output_variable(shape, dims)
self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])
def function_cpp(self):
params = self._default_function_params()
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['n_in'] = self.get_input_variable().dim_names[0]
params['in_width'] = self.get_input_variable().dim_names[1]
params['out_height'] = self.get_output_variable().dim_names[0]
self.add_weights_variable(name='bias', var_name='b{index}', data=bias)
def function_cpp(self):
params = self._default_function_params()
params['scale'] = self.get_weights('scale').name
params['bias'] = self.get_weights('bias').name
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['n_in'] = self.get_input_variable().size_cpp()
return self._config_template.format(**params)
class Merge(Layer):
def initialize(self):
assert(len(self.inputs) == 2)
inp1 = self.get_input_variable(self.inputs[0])
inp2 = self.get_input_variable(self.inputs[1])
shape = inp1.shape
assert(inp1.shape == inp2.shape)
dims = inp1.dim_names
self.add_output_variable(shape, dims)
def function_cpp(self):
params = {}
params['merge'] = self.get_attr('op').lower()
params['config'] = 'config{}'.format(self.index)
params['input1_t'] = self.get_input_variable(self.inputs[0]).type.name
params['input2_t'] = self.get_input_variable(self.inputs[1]).type.name
params['output_t'] = self.get_output_variable().type.name
def function_cpp(self):
params = self._default_function_params()
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['n_in'] = self.get_input_variable().dim_names[0]
params['in_width'] = self.get_input_variable().dim_names[1]
params['out_height'] = self.get_output_variable().dim_names[0]
params['out_width'] = self.get_output_variable().dim_names[1]
params['n_filt'] = self.get_output_variable().dim_names[2]
return self._config_template.format(**params)
class Activation(Layer):
def initialize(self):
inp = self.get_input_variable()
shape = inp.shape
dims = inp.dim_names
self.add_output_variable(shape, dims)
def function_cpp(self):
params = self._default_function_params()
params['activation'] = self.get_attr('activation')
params['config'] = '{}_config{}'.format(self.get_attr('activation'), self.index)
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['type'] = self.get_attr('activation')
# parameters.h
def config_cpp(self):
raise NotImplementedError
def get_numbers_cpp(self):
numbers = ''
for k, v in self.get_output_variable().get_shape():
numbers += '#define {} {}\n'.format(k,v)
return numbers
def precision_cpp(self):
return 'typedef {precision} layer{index}_t;'.format(precision=self.get_output_variable().precision, index=self.index)
class Input(Layer):
def initialize(self):
shape = self.attributes['input_shape']
if shape[0] is None:
shape = shape[1:]
dims = ['N_INPUT_{}_{}'.format(i, self.index) for i in range(1, len(shape) + 1)]
self.add_output_variable(shape, dims, var_name=self.name, type_name='input_t')
def function_cpp(self):
return None
def config_cpp(self):
return None
class Dense(Layer):
def initialize(self):
shape = [self.attributes['n_out']]
class Input(Layer):
def initialize(self):
shape = self.attributes['input_shape']
if shape[0] is None:
shape = shape[1:]
dims = ['N_INPUT_{}_{}'.format(i, self.index) for i in range(1, len(shape) + 1)]
self.add_output_variable(shape, dims, var_name=self.name, type_name='input_t')
def function_cpp(self):
return None
def config_cpp(self):
return None
class Dense(Layer):
def initialize(self):
shape = [self.attributes['n_out']]
dims = ['N_LAYER_{}'.format(self.index)]
quantize = self.get_attr('quantize', default=0)
compression = self.model.config.get_compression(self)
if self.model.config.is_resource_strategy(self):
if self.model.config.get_reuse_factor(self) == 1:
print('WARNING: Using ReuseFactor 1 with "Resource" strategy. This may not work.')
if compression:
self.set_attr('strategy', 'compressed')
else:
self.set_attr('strategy', 'large')
else:
self.set_attr('strategy', 'latency')
self.add_output_variable(shape, dims)
self.add_weights(quantize=quantize, compression=compression)
return act # ELU activation
class PReLU(Activation):
def initialize(self):
super(PReLU, self).initialize()
self.add_weights_variable(name='alpha', var_name='a{index}')
def function_cpp(self):
params = self._default_function_params()
params['activation'] = self.get_attr('activation').lower()
params['param'] = self.get_weights('alpha').name
params['config'] = '{}_config{}'.format(self.get_attr('activation'), self.index)
return [self._function_template.format(**params)]
class BatchNormalization(Layer):
def initialize(self):
inp = self.get_input_variable()
shape = inp.shape
dims = inp.dim_names
self.add_output_variable(shape, dims)
gamma = self.model.get_weights_data(self.name, 'gamma')
beta = self.model.get_weights_data(self.name, 'beta')
mean = self.model.get_weights_data(self.name, 'moving_mean')
var = self.model.get_weights_data(self.name, 'moving_variance')
scale = gamma / np.sqrt(var + self.get_attr('epsilon'))
bias = beta - gamma * mean / np.sqrt(var + self.get_attr('epsilon'))
self.add_weights_variable(name='scale', var_name='s{index}', data=scale)
self.add_weights_variable(name='bias', var_name='b{index}', data=bias)
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['n_in'] = self.get_input_variable().dim_names[0]
params['n_chan'] = self.get_input_variable().dim_names[1]
params['filt_width'] = self.get_attr('y_filt')
params['dilation'] = self.get_attr('dilation', 1)
params['n_filt'] = 'N_FILT_{}'.format(self.index)
params['n_out'] = 'Y_OUTPUTS_{}'.format(self.index)
params['nzeros'] = self.get_weights('weight').nzeros
return self._config_template.format(**params)
class Conv2D(Layer):
def initialize(self):
shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']]
dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
self.add_output_variable(shape, dims)
self.add_weights()
self.add_bias()
def function_cpp(self):
params = self._default_function_params()
params['w'] = self.get_weights('weight').name
params['b'] = self.get_weights('bias').name
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['strategy'] = self.get_attr('strategy')
params['w'] = self.get_weights('weight').name
params['b'] = self.get_weights('bias').name
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()
params['n_in'] = self.get_input_variable().size_cpp()
params['n_out'] = self.get_output_variable().size_cpp()
params['nzeros'] = self.get_weights('weight').nzeros
params['nonzeros'] = self.get_weights('weight').nonzeros
return self._config_template.format(**params)
class Conv1D(Layer):
def initialize(self):
shape = [self.attributes['y_out'], self.attributes['n_filt']]
dims = ['Y_OUTPUTS_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]
self.add_output_variable(shape, dims)
self.add_weights()
self.add_bias()
def function_cpp(self):
params = self._default_function_params()
params['w'] = self.get_weights('weight').name
params['b'] = self.get_weights('bias').name
return [self._function_template.format(**params)]
def config_cpp(self):
params = self._default_config_params()