Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
self.visit(node.inputs[1])
s = self.gdict[node.inputs[1]].attr['symbolic_value']
if not s:
attr_type = self._get_type_from_attr(node)
if not attr_type and self.gdict[node.inputs[1]].datatype and not any_symbolic_or_unknown(
self.gdict[node.inputs[1]].datatype.T[1]):
# at least we can get a rank
rank = self.gdict[node.inputs[1]].datatype.T[1][0]
ret_shape = [make_symbol(node.name + "_" + str(i)) for i in range(rank)]
return builtins.tensor(lefttype.get_primitive(), ret_shape)
else:
return attr_type
s = s.val
assert len(s.shape) == 2, "padding specs must be of shape [r, 2]" \
+ "where r is rank of input tensor"
if not builtins.is_tensor(lefttype):
raise RuntimeError("Pad only operates on tensor type, but got " + str(lefttype))
retshape = list(lefttype.get_shape())
for i in range(len(retshape)):
retshape[i] = retshape[i] + s[i][0] + s[i][1]
rettype = builtins.tensor(lefttype.get_primitive(), retshape)
left_sym_val = self.gdict[node.inputs[0]].attr["symbolic_value"]
if left_sym_val:
node.attr["symbolic_value"] = rettype()
node.attr["symbolic_value"].val = np.pad(
left_sym_val.val, s, "constant", constant_values=node.attr['constant_values'])
return rettype
def visit_Shape(self, node):
# need to parse node itself.
parent_type = self.visit(node.inputs[0])
shape = []
if parent_type is None or not builtins.is_tensor(parent_type):
return builtins.tensor(builtins.int32, [make_symbol(node.name + '_shape')])
if parent_type is not None:
shape = parent_type.get_shape()
rettype = builtins.tensor(builtins.int32, [len(shape)])
else:
rettype = builtins.tensor(builtins.int32, [make_symbol(node.name + '_shape')])
if len(shape) > 0:
# we have the true value
node.attr['symbolic_value'] = rettype()
node.attr['symbolic_value'].val = np.array(shape)
return rettype
def visit_Slice(self, node):
for i in node.inputs:
self.visit(i)
input_type = self.visit(node.inputs[0])
input_shape = input_type.get_shape()
input_value = self.gdict[node.inputs[0]].attr['symbolic_value']
try:
begin = list(self.gdict[node.inputs[1]].attr['symbolic_value'].val)
size = list(self.gdict[node.inputs[2]].attr['symbolic_value'].val)
end = [
int(begin[i] + size[i]) if size[i] != -1 else 2147483647 for i in range(len(begin))
]
assert builtins.is_tensor(input_type)
input_shape = input_type.get_shape()
end = [min(i, j) for i, j in zip(end, input_shape)]
size = [min(s, e - b) for s, b, e in zip(size, begin, end)]
slices = [[int(begin[i]), int(end[i]), 1] for i in range(len(begin))]
node.attr['slice'] = slices
node.attr['begin_masks'] = [idx for idx, value in enumerate(begin) if value == 0]
node.attr['end_masks'] = [idx for idx, value in enumerate(end) if value == 2147483647]
node.attr['squeeze'] = []
output_value = None
if input_value is not None:
slices = [slice(*i) for i in slices]
slices = tuple(slices)
res = input_value.val[slices]
if isscalar(res):
rettype = input_type.get_primitive()
if not builtins.is_int(depth_type):
raise ValueError('depth must be integral in {} node {}'.format(node.op, node.name))
if not builtins.utils.is_primitive(on_type) or not builtins.utils.is_primitive(off_type):
raise ValueError(
'On and off types must be primitive in {} node {}'.format(node.op, node.name))
if on_type != off_type:
raise ValueError(
'On and off types must be the same in {} node {}'.format(node.op, node.name))
axis = node.attr.get('axis')
if not isinstance(axis, six.integer_types) or axis < -1:
raise ValueError('axis must be integer >= -1 in {} node {}'.format(node.op, node.name))
if builtins.is_tensor(indices_type):
indices_shape = list(indices_type.get_shape())
else:
indices_shape = [1]
depth_value = self._get_symbolic_value(node.inputs[1]).val
if depth_value is None:
depth_value = make_symbol(node.name + '_depth')
elif depth_value < 0:
raise ValueError('depth must be non-negative in {} node {}'.format(node.op, node.name))
if 'dtype' in node.attr:
ret_primitive = node.attr.get('T')
if ret_primitive is None or not builtins.is_primitive(ret_primitive):
raise ValueError(
'Output tensor data type must be primitive in {} node {}'.format(
node.op, node.name))
def _get_tensor_shape_from_type(self, type_):
if _is_scalar(type_):
shape = (1,)
elif builtins.is_tensor(type_):
shape = type_.get_shape()
elif builtins.is_list(type_):
element_shape = type_.T[0].get_shape()
for ashape in type_.T:
assert ashape.get_shape() == element_shape
shape = [-1] + list(element_shape)
else:
shape = None
return shape
def __compare_propagated_and_inferred_shape(self, name, type_):
propagated_shape = tuple(self.tensor_shapes[name])
if _is_scalar(type_):
inferred_shape = (1,)
elif builtins.is_tensor(type_):
inferred_shape = type_.get_shape()
elif builtins.is_list(type_):
element_shape = type_.T[0].get_shape()
for ashape in type_.T:
assert ashape.get_shape() == element_shape
inferred_shape = [-1] + list(element_shape)
else:
raise ValueError('[SSAConverter] Failed to infer shape for tensor %s' % name)
mismatch = '[SSAConverter] Shape mismatch for {}: inferred {} vs. propagated {}.'.format(
name, inferred_shape, propagated_shape)
if len(propagated_shape) != len(inferred_shape):
raise ValueError(mismatch)
for pdim, idim in zip(propagated_shape, inferred_shape):
def _promoted_primitive_type(self, type1, type2):
"""
Given a pair of tensor or primitive types, find the smallest type that can store an instance
of their primitive type.
"""
ptype1 = type1.get_primitive() if builtins.is_tensor(type1) else type1
ptype2 = type2.get_primitive() if builtins.is_tensor(type2) else type2
return promote_types(ptype1, ptype2)
def _is_scalar(type_):
if type_ is None:
return False
result = builtins.is_int(type_) or builtins.is_float(type_) or builtins.is_bool(type_)
if builtins.is_tensor(type_) and (len(type_.get_shape()) == 0):
result = True
return result
conversion_message = ''
if custom_conversion_name is not None:
conversion_message = ' with custom conversion function'
elif op_type in self.CONVERT_FUNCTION_MAP:
convert_func = self.CONVERT_FUNCTION_MAP[op_type]
elif self.add_custom_layers:
# Add custom layer
convert_func = self._convert_custom_layer
conversion_message = ' with custom layer'
else:
raise NotImplementedError(
'[SSAConverter] Conversion for op %s not implemented, terminating...' % op_type)
print('[SSAConverter] [{}/{}] Converting op type: \'{}\', name: \'{}\'{}{}'.format(
idx + 1, len(instruction_order), op_type, node_name, conversion_message,
((', output_shape: ' + str(node.datatype.get_shape()) + '.') if builtins.is_tensor(node.datatype) else '.')))
# If custom conversion method is provided, use it
# Otherwise, invoke internal conversion method
if custom_conversion_name is not None:
self.custom_conversion_functions[custom_conversion_name](self, node)
else:
convert_func(node)
# Make a buffer between variable inputs
builder = self._get_builder()
for name, var in self.net_ensemble.variables.items():
layer = builder.add_copy(
name=name + '_copy_r',
input_name=name,
output_name=name + '__outvar__')
shapes.propagate_single_layer(layer, self.tensor_shapes)
def recursive_replace_symbols_in_type_with_unknown(dtype):
if builtins.is_list(dtype):
return builtins.list(recursive_replace_symbols_in_type_with_unknown(dtype.T[0]))
elif builtins.is_tuple(dtype):
return builtins.tuple(
tuple(recursive_replace_symbols_in_type_with_unknown(t) for t in dtype.T))
elif builtins.is_tensor(dtype):
return builtins.tensor(
dtype.get_primitive(),
tuple(-1 if issubclass(type(t), sm.Basic) else int(t) for t in dtype.get_shape()))
else:
return dtype