Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
hidden_size=hidden_size,
input_size=input_size,
input_names=[
node.name + '_in_expand',
node.name + '_h_prev_expand',
node.name + '_c_prev_expand'
],
output_names=[
node.name + '_lstm_out',
node.name + '_lstm_h',
node.name + '_lstm_c',
],
forget_bias=has_forget_bias,
output_all=True,
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_squeeze(
name=node.name + '_out',
input_name=node.name + '_lstm_out',
output_name=node.name + '_out',
axes=[-1, -2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_copy(
name=node.name + '_temp_h',
input_name=node.name + '_lstm_out',
output_name=node.name + '_temp_h'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_cast(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_round(
name=node.name,
input_name=input_names[0],
output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_logical_not(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_logical(
name=node.name,
input_names=input_names,
output_name=node.name,
mode='NOT')
shapes.propagate_single_layer(layer, self.tensor_shapes)
for s in slices:
begin_indices.append(s[0])
end_indices.append(s[1])
strides.append(s[2])
layer = builder.add_slice_static(
name=slice_output_name,
input_name=input_names[0],
output_name=slice_output_name,
begin_ids=begin_indices,
end_ids=end_indices,
strides=strides,
begin_masks=begin_masks,
end_masks=end_masks)
shapes.propagate_single_layer(layer, self.tensor_shapes)
if has_squeeze:
input_shape = self._get_tensor_shape_from_type(input_types[0])
input_rank = len(input_shape)
squeeze_all = (input_rank == len(axes))
layer = builder.add_squeeze(
name=node.name,
input_name=slice_output_name,
output_name=node.name,
axes=axes if not squeeze_all else None,
squeeze_all=squeeze_all)
shapes.propagate_single_layer(layer, self.tensor_shapes)
slice_output_name = node.name + '_slice_'
layer = self._get_builder().add_gather(
name=node.name + '_gather_',
input_names=input_names[::-1],
output_name=slice_output_name,
axis=0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
# tensorarray_read should generate only 1 slice, so adding a squeeze should be enough
layer = self._get_builder().add_squeeze(
name=node.name + '_squeeze_',
input_name=slice_output_name,
output_name=node.name,
axes=[0])
shapes.propagate_single_layer(layer, self.tensor_shapes)
'for ResizeBilinear')
mode = 'STRICT_ALIGN_ENDPOINTS_MODE' if node.attr.get(
'align_corners', False) else 'UPSAMPLE_MODE'
builder = self._get_builder()
layer = builder.add_resize_bilinear(
name=node.name,
input_name=input_names[0],
output_name=node.name,
target_height=target_size[0],
target_width=target_size[1],
mode=mode)
output_shape = self._get_tensor_shape_from_type(node.datatype)
shapes.propagate_single_layer(layer, self.tensor_shapes,
output_shapes=[output_shape])
assert len(node.inputs) == 3
input_nodes, input_names, input_types = self._get_input_tensors(node)
block_shape = input_nodes[1].value.val
if len(block_shape.flatten()) != 2 or block_shape[0] != block_shape[1]:
raise NotImplementedError('non-equal block shape is not yet supported')
paddings = input_nodes[2].value.val
needs_paddings = any(paddings.flatten())
builder = self._get_builder()
layer = builder.add_transpose(
name=node.name + '_transpose1',
input_name=input_names[0],
output_name=node.name + '_transpose1',
axes=[3, 0, 1, 2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
if needs_paddings:
left, right = paddings[1][0], paddings[1][1]
top, bottom = paddings[0][0], paddings[0][1]
layer = builder.add_padding(
name=node.name + '_padding',
left=left,
right=right,
top=top,
bottom=bottom,
input_name=node.name + '_transpose1',
output_name=node.name + '_padding'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_reorganize_data(
axes=[0])
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = ifbranch.add_concat_nd(
name=array_name + "_updated",
input_names=[array_name, array_name + "_new_element"],
output_name=array_name + "_updated",
axis=0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = ifbranch.add_copy(
name=array_name + '_assign',
input_name=array_name + "_updated",
output_name=array_name
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
values_name = node.name + '_expanded'
layer = self._get_builder().add_expand_dims(
name=values_name, input_name=value_name, output_name=values_name, axes=[0])
shapes.propagate_single_layer(layer, self.tensor_shapes)
# 3 inputs: [Scatter target, indices, scatter source]
layer = self._get_builder().add_scatter(
name=node.name,
input_names=[array_name, index_name, values_name],
output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
output_names=output_names,
axis=axis,
num_splits=num_splits)
else:
layer = self._get_builder().add_split_nd(
name=node.name,
input_name=input_names[tensor_id],
output_names=output_names,
axis=axis,
split_sizes=list(split))
if not has_equal_splits:
for i, name in enumerate(output_names):
self.tensor_shapes[name] = self._get_tensor_shape_from_type(node.datatype.T[i])
else:
shapes.propagate_single_layer(layer, self.tensor_shapes)
# If custom conversion method is provided, use it
# Otherwise, invoke internal conversion method
if custom_conversion_name is not None:
self.custom_conversion_functions[custom_conversion_name](self, node)
else:
convert_func(node)
# Make a buffer between variable inputs
builder = self._get_builder()
for name, var in self.net_ensemble.variables.items():
layer = builder.add_copy(
name=name + '_copy_r',
input_name=name,
output_name=name + '__outvar__')
shapes.propagate_single_layer(layer, self.tensor_shapes)