How to use the onnx.numpy_helper function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chainer / onnx-chainer / tests / test_external_converter.py View on Github external
is_set_domain = domain is not None
    if is_set_domain:
        external_opset_imports[domain] = version
    if is_set_domain and onnx_helper.is_support_non_standard_domain():
        export_testcase(
            model, x, path, external_converters=addon_converters,
            external_opset_imports=external_opset_imports)
    else:
        with testing.assert_warns(UserWarning):
            export_testcase(
                model, x, path, external_converters=addon_converters,
                external_opset_imports=external_opset_imports)

    output_path = os.path.join(path, 'test_data_set_0', 'output_0.pb')
    assert os.path.isfile(output_path)
    output = onnx.numpy_helper.to_array(onnx.load_tensor(output_path))
    expected_output = np.ones_like(x)
    np.testing.assert_allclose(output, expected_output, rtol=1e-5, atol=1e-5)
github onnxbot / onnx-fb-universe / test / verify.py View on Github external
def equalAndThen(self, x, y, msg, k):
        """
        Helper for implementing 'requireEqual' and 'checkEqual'.  Upon failure,
        invokes continuation 'k' with the error message.
        """
        if isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto):
            self.equalAndThen(x.name, y.name, msg, k)
            # Use numpy for the comparison
            t1 = onnx.numpy_helper.to_array(x)
            t2 = onnx.numpy_helper.to_array(y)
            new_msg = "{}In embedded parameter '{}'".format(colonize(msg), x.name)
            self.equalAndThen(t1, t2, new_msg, k)
        elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
            try:
                np.testing.assert_equal(x, y)
            except AssertionError as e:
                raise
                k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
        else:
            if x != y:
                # TODO: Better algorithm for lists
                sx = str(x)
                sy = str(y)
                if len(sx) > 40 or len(sy) > 40 or '\n' in sx or '\n' in sy:
                    # long form
github onnxbot / onnx-fb-universe / test / test_operators.py View on Github external
os.makedirs(output_dir)
            with open(os.path.join(output_dir, "model.onnx"), 'wb') as file:
                file.write(model_def.SerializeToString())
            data_dir = os.path.join(output_dir, "test_data_set_0")
            os.makedirs(data_dir)
            if isinstance(args, Variable):
                args = (args,)
            for index, var in enumerate(flatten(args)):
                tensor = numpy_helper.from_array(var.data.numpy())
                with open(os.path.join(data_dir, "input_{}.pb".format(index)), 'wb') as file:
                    file.write(tensor.SerializeToString())
            outputs = m(*args)
            if isinstance(outputs, Variable):
                outputs = (outputs,)
            for index, var in enumerate(flatten(outputs)):
                tensor = numpy_helper.from_array(var.data.numpy())
                with open(os.path.join(data_dir, "output_{}.pb".format(index)), 'wb') as file:
                    file.write(tensor.SerializeToString())
github chainer / chainer / tests / onnx_chainer_tests / test_replace_func.py View on Github external
def load_tensor(path):
                tensor = onnx.load_tensor(path)
                return onnx.numpy_helper.to_array(tensor)
github Xilinx / finn / tests / transformation / test_streamline.py View on Github external
def test_streamline_lfc_w1a1():
    lfc = LFC(weight_bit_width=1, act_bit_width=1, in_bit_width=1)
    checkpoint = torch.load(trained_lfc_checkpoint, map_location="cpu")
    lfc.load_state_dict(checkpoint["state_dict"])
    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"global_in": nph.to_array(input_tensor)}
    expected_ctx = oxe.execute_onnx(model, input_dict, True)
    expected = expected_ctx[model.graph.output[0].name]
    model = model.transform(Streamline())
    produced_ctx = oxe.execute_onnx(model, input_dict, True)
    produced = produced_ctx[model.graph.output[0].name]
    assert np.isclose(expected, produced, atol=1e-3).all()
    os.remove(export_onnx_path)
github pytorch / pytorch / test / onnx / export_onnx_tests_generator.py View on Github external
shutil.rmtree(output_dir)
            os.makedirs(output_dir)
            with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
                file.write(onnx_model.SerializeToString())

            for i in range(sets):
                output = module(input)
                data_dir = os.path.join(output_dir, "test_data_set_{}".format(i))
                os.makedirs(data_dir)

                for index, var in enumerate([input]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(os.path.join(data_dir, "input_{}.pb".format(index)), "wb") as file:
                        file.write(tensor.SerializeToString())
                for index, var in enumerate([output]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(os.path.join(data_dir, "output_{}.pb".format(index)), "wb") as file:
                        file.write(tensor.SerializeToString())
                input = gen_input(t)
                if (module_name != "FunctionalModule"):
                    nn_module[module_name] |= 1
        except:  # noqa: E722
            traceback.print_exc()
            if (module_name != "FunctionalModule"):
                nn_module[module_name] |= 2
            failed += 1

    print("Collect {} test cases from PyTorch repo, failed to export {} cases.".format(
        len(testcases), failed))
    print("PyTorch converted cases are stored in {}.".format(test_onnx_common.pytorch_converted_dir))
    print_stats(FunctionalModule_nums, nn_module)
github ONNC / onnc / models / mnist / create_model.py View on Github external
Parameter87_tensor = numpy_helper.from_array(Parameter87_array)

Parameter88_array = np.load('./quantized_weight/quant_Parameter88.npy')
Parameter88_tensor = numpy_helper.from_array(Parameter88_array)

Parameter193_array = np.load('./quantized_weight/quant_Parameter193.npy')
Parameter193_tensor = numpy_helper.from_array(Parameter193_array)

Parameter194_array = np.load('./quantized_weight/quant_Parameter194.npy')
Parameter194_tensor = numpy_helper.from_array(Parameter194_array)

Pooling160_Output_0_reshape0_shape_array = np.load('./quantized_weight/Pooling160_Output_0_reshape0_shape.npy')
Pooling160_Output_0_reshape0_shape_tensor = numpy_helper.from_array(Pooling160_Output_0_reshape0_shape_array)

Parameter193_reshape1_shape_array = np.load('./quantized_weight/Parameter193_reshape1_shape.npy')
Parameter193_reshape1_shape_tensor = numpy_helper.from_array(Parameter193_reshape1_shape_array)

# create tensors
Input3 =        helper.make_tensor_value_info('Input3', TensorProto.FLOAT, [1,1,28,28])
Parameter5 =        helper.make_tensor_value_info('Parameter5', TensorProto.FLOAT, [8,1,5,5])
Parameter6 =        helper.make_tensor_value_info('Parameter6', TensorProto.FLOAT, [8,1,1])
Parameter87 =        helper.make_tensor_value_info('Parameter87', TensorProto.FLOAT, [16,8,5,5])
Parameter88 =        helper.make_tensor_value_info('Parameter88', TensorProto.FLOAT, [16,1,1])
Pooling160_Output_0_reshape0_shape =        helper.make_tensor_value_info('Pooling160_Output_0_reshape0_shape', TensorProto.FLOAT, [1,256])
Parameter193_reshape1_shape =        helper.make_tensor_value_info('Parameter193_reshape1_shape', TensorProto.FLOAT, [256,10])
Parameter193 =        helper.make_tensor_value_info('Parameter193', TensorProto.FLOAT, [16,4,4,10])
Parameter194 =        helper.make_tensor_value_info('Parameter194', TensorProto.FLOAT, [1,10])

Convolution28_Output_0 =        helper.make_tensor_value_info('Convolution28_Output_0', TensorProto.FLOAT, [1,8,28,28])
Plus30_Output_0 =               helper.make_tensor_value_info('Plus30_Output_0', TensorProto.FLOAT, [1,8,28,28])
ReLU32_Output_0 =               helper.make_tensor_value_info('ReLU32_Output_0', TensorProto.FLOAT, [1,8,28,28])
Pooling66_Output_0 =            helper.make_tensor_value_info('Pooling66_Output_0', TensorProto.FLOAT, [1,8,14,14])
github onnx / onnxmltools / onnxutils / onnxconverter_common / optimizer.py View on Github external
update_tensor = lambda x: \
        helper.make_tensor(x.name, x.data_type, (x.dims[0], 1, 1),
                           onnx.numpy_helper.to_array(x).flatten())
    new_initializer = [init_ if init_.name not in alter_tensors else update_tensor(init_)
github pfnet-research / chainer-compiler / utils / run_onnx_util.py View on Github external
def load_test_data(data_dir, input_names, output_names):
    inout_values = []
    for kind, names in [('input', input_names), ('output', output_names)]:
        names = list(names)
        values = []
        for pb in sorted(glob.glob(os.path.join(data_dir, '%s_*.pb' % kind))):
            with open(pb, 'rb') as f:
                tensor = onnx.TensorProto()
                tensor.ParseFromString(f.read())
            if tensor.name in names:
                name = tensor.name
                names.remove(name)
            else:
                name = names.pop(0)
            values.append((name, onnx.numpy_helper.to_array(tensor)))
        inout_values.append(values)
    return tuple(inout_values)
github microsoft / OLive / docker-images / onnx-converter / src / check_model.py View on Github external
def gen_input_list(input_path, isPytorch=False):
    inputs = []
    i = 0
    print(input_path)
    full_path = os.path.join(input_path, "input_%s.pb" % i)
    while os.path.isfile(full_path):
        if isPytorch:
            inputs.append(torch.tensor(numpy_helper.to_array(readInputFromFile(full_path))))
        else:
            inputs.append(numpy_helper.to_array(readInputFromFile(full_path)))
        i += 1
        full_path = os.path.join(input_path, "input_%s.pb" % i)
    if len(inputs) == 1:
        return inputs[0]
    else:
        return inputs