How to use the onnx.numpy_helper.from_array function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github seetaresearch / Dragon / Dragon / python / dragon / vm / onnx / nodes / ndarray.py View on Github external
if s != output_shape[axis]:
                        raise ValueError('Expected shape[{}] to be {}, but go {}.\n'
                            'Please follow the static data shape on exporting.'.format(
                                axis, s, output_shape[axis]))
        elif arg.name == 'dims_desc':
            for axis, s in enumerate(arg.strings):
                s = fetch_argument(op_def, s, ws)
                if s == -1 or s == 0:
                    output_shape[axis] = s
                else:
                    if s != output_shape[axis]:
                        raise ValueError('Expected shape[{}] to be {}, but go {}.\n'
                            'Please follow the static data shape on exporting.'.format(
                                axis, s, output_shape[axis]))

    shape = numpy_helper.from_array(
        np.array(output_shape, dtype=np.int64),
            name=op_def.input[0] + '/onnx/shape')
    node_proto.input.extend([shape.name])

    return node_proto, [shape]
github microsoft / onnxruntime / onnxruntime / python / tools / quantization / quantize.py View on Github external
input_scale_name = self.quantized_value_map[node.input[0]].scale_name
            inputscale_initializer = _find_by_name(input_scale_name, self.model.graph.initializer)
            input_scale = self.find_weight_data(inputscale_initializer)        

            # calcuate scale for bias
            bias_scale_name = node.input[2] + "_scale"
            bias_scale = input_scale * weight_scale
            print(bias_scale)
     
            # quantize bias
            quantized_data = (np.asarray(bias_data) / bias_scale).round().astype(np.int32)
            print(quantized_data)

            #update bias initializer        
            bias_np_data = np.asarray(quantized_data, dtype=np.int32).reshape(bias_initializer.dims)
            packed_bias_initializer = onnx.numpy_helper.from_array(bias_np_data, quantized_bias_name)
            self.model.graph.initializer.extend([packed_bias_initializer])

            bias_value_info = onnx.helper.make_tensor_value_info(quantized_bias_name, onnx_proto.TensorProto.INT32, bias_initializer.dims)
            self.model.graph.input.extend([bias_value_info])

            # log entries for this quantized bias value
            quantized_bias_entry = QuantizedInitializer(bias_name, bias_initializer, [0], [0], [0], [bias_scale],
                            bias_data, quantized_data, qType=onnx_proto.TensorProto.INT32)
            self._quantized_weights.append(quantized_bias_entry)
        
            assert(bias_name not in self.quantized_value_map)
            quantized_value = QuantizedValue(bias_name, quantized_bias_name, "", "", QuantizedValueType.Initializer, None, onnx_proto.TensorProto.INT32)
            self.quantized_value_map[bias_name] = quantized_value

        return quantized_bias_name
github onnx / tensorflow-onnx / tf2onnx / onnx_opset / generator.py View on Github external
def version_9(cls, ctx, node, **kwargs):
        node.type = "ConstantOfShape"
        # both shape and value in tensorflow are passed as tensor.
        # In onnx the value is an attribute so we need to fetch the value as const which
        # sooner or later will be a problem for tensorflow-onnx.
        # ConstantOfShape in onnxruntime only support int64, so insert cast op
        input_dtype_is_int64 = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[0])) == np.int64
        if not input_dtype_is_int64:
            ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=onnx_pb.TensorProto.INT64)
        dtype = ctx.get_dtype(node.output[0])
        value = np.array([node.inputs[1].get_tensor_value()]).astype(utils.map_onnx_to_numpy_type(dtype))
        value_proto = numpy_helper.from_array(value)
        node.set_attr("value", value_proto)
        del node.input[1]
github microsoft / onnxconverter-common / onnxconverter_common / optimizer.py View on Github external
def _update_broadcast_from_initializers(node, init_pred_value, cur_perm, init_idx):
    for axis_ in range(len(cur_perm) - len(init_pred_value.shape)):
        init_pred_value = np.expand_dims(init_pred_value, axis=axis_)
    init_pred_value = np.transpose(init_pred_value, tuple(_get_reverse_perm(cur_perm)))
    add_initilizer = numpy_helper.from_array(init_pred_value, name=node.origin.name + '_initializer_' + str(
        PushTransposeSolution.transpose_number))
    PushTransposeSolution.transpose_number += 1
    node.initializers = [add_initilizer]
    prev = node.get_precedence_by_idx(init_idx)
    prev.successor.remove(node)
    node.precedence.remove(prev)
    node.in_redirect(node.get_input_by_idx(init_idx), add_initilizer.name)
    return node
github chainer / onnx-chainer / onnx_chainer / functions / math / basic_math.py View on Github external
def convert_PowVarConst(
        func, input_names, param_names, parameters, input_tensors):
    layer_name = 'Pow_{}'.format(str(id(func.value)))
    value = np.asarray([func.value], dtype=func.inputs[0].get_variable().dtype)
    param_names[id(func.value)] = os.path.join(layer_name, 'value')

    parameters.append(
        numpy_helper.from_array(
            value,
            param_names[id(func.value)]
        )
    )
    input_tensors.append(
        helper.make_tensor_value_info(
            param_names[id(func.value)],
            mapping.dtypes[value.dtype],
            value.shape
        )
    )
    input_names.append(param_names[id(func.value)])

    return convert_binary_operator(
        func, input_names, param_names, parameters, input_tensors)
github onnx / tensorflow-onnx / tf2onnx / tf_utils.py View on Github external
def tf_to_onnx_tensor(tensor, name=""):
    """Convert tensorflow tensor to onnx tensor."""
    np_data = get_tf_tensor_data(tensor)
    if np_data.dtype == np.object:
        # assume np_data is string, numpy_helper.from_array accepts ndarray,
        # in which each item is of str while the whole dtype is of object.
        try:
            np_data = np_data.astype(np.str).astype(np.object)
        except: # pylint: disable=bare-except
            raise RuntimeError("Not support type: {}".format(type(np_data.flat[0])))
    return numpy_helper.from_array(np_data, name=name)
github pfnet-research / chainer-compiler / scripts / onnx_script.py View on Github external
def make_graph(self):
        inputs_vi = [_extract_value_info(a, n)
                     for n, a in self.inputs + self.params]
        outputs_vi = [_extract_value_info(a, n) for n, a in self.outputs]
        initializer = []
        for name, value in self.params:
            typ = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype]
            tensor = numpy_helper.from_array(value, name=name)
            initializer.append(tensor)
        graph = onnx.helper.make_graph(self.nodes, self.graph_name,
                                       inputs=inputs_vi, outputs=outputs_vi,
                                       initializer=initializer)
        return graph
github chainer / onnx-chainer / onnx_chainer / functions / array / tile.py View on Github external
def convert_Tile(func, input_names, param_names, parameters, input_tensors):

    # Add tiles and axis to graph
    if isinstance(func.reps, int):
        func.reps = [func.reps]
    tiles = np.asarray(func.reps, dtype=np.float32)
    axis = np.array([i for i, _ in enumerate(func.reps)], dtype=np.float32)
    layer_name = 'tile_{}'.format(str(id(tiles)))

    param_names[id(tiles)] = os.path.join(layer_name, 'tiles')
    parameters.append(
        numpy_helper.from_array(
            tiles,
            param_names[id(tiles)]
        )
    )
    input_tensors.append(
        helper.make_tensor_value_info(
            param_names[id(tiles)],
            mapping.dtypes[tiles.dtype],
            tiles.shape
        )
    )
    input_names.append(param_names[id(tiles)])

    param_names[id(axis)] = os.path.join(layer_name, 'axis')
    parameters.append(
        numpy_helper.from_array(
github chainer / onnx-chainer / onnx_chainer / functions / normalization / batch_normalization.py View on Github external
def convert_BatchNormalization(
        func, input_names, param_names, parameters, input_tensors):

    layer_name = os.path.dirname(param_names[id(func.gamma)])

    # Add running_mean and running_var to graph
    param_names[id(func.running_mean)] = os.path.join(
        layer_name, 'running_mean')
    parameters.append(
        numpy_helper.from_array(
            func.running_mean,
            param_names[id(func.running_mean)]))
    input_tensors.append(
        helper.make_tensor_value_info(
            param_names[id(func.running_mean)],
            mapping.dtypes[func.running_mean.dtype],
            func.running_mean.shape)
    )

    param_names[id(func.running_var)] = os.path.join(
        layer_name, 'running_var')
    parameters.append(
        numpy_helper.from_array(
            func.running_var,
            param_names[id(func.running_var)]))
    input_tensors.append(