How to use the onnxmltools.convert.common._apply_operation.apply_reshape function in onnxmltools

To help you get started, we’ve selected a few onnxmltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / onnxmltools / onnxmltools / convert / keras / operator_converters / Bidirectional.py View on Github external
container.add_node('LSTM', lstm_input_names, lstm_output_names, op_version=op_version, **lstm_attrs)

    if output_seq:
        # The output shape of runtime is 3-D while ONNX says 4-D, so we do a Reshape to fix it.
        lstm_y_name_fixed = scope.get_unique_variable_name(operator.full_name + '_Y_fixed')
        apply_reshape(scope, lstm_y_name, lstm_y_name_fixed, container, desired_shape=[seq_length, 2, -1, hidden_size])

        if merge_concat:
            # In this case, only one Keras output with shape (N, T, 2 * C') should be produced

            # Transpose ONNX LSTM Y with shape (T, D, N, C') into (T, N, D, C')
            transposed_y_name = scope.get_unique_variable_name(operator.full_name + '_Y_transposed')
            apply_transpose(scope, lstm_y_name_fixed, transposed_y_name, container, perm=[0, 2, 1, 3])

            # Change shape (T, N, D, C') to (N, T, D * C') to meet Keras spec
            apply_reshape(scope, transposed_y_name, operator.outputs[0].full_name, container,
                          desired_shape=[-1, seq_length, 2 * hidden_size])
        else:
            # If merge_mode=None, two tensors should be generated. The first/second tensor is the output of
            # forward/backward pass.

            # Transpose ONNX LSTM Y with shape (T, D, N, C') into (T, N, D, C')
            transposed_y_name = scope.get_unique_variable_name(operator.full_name + '_Y_transposed')
            apply_transpose(scope, lstm_y_name_fixed, transposed_y_name, container, perm=[0, 2, 1, 3])

            # Split the transposed Y with shape (T, N, D, C') into (T, N, 1, C') and (T, N, 1, C')
            forward_y_name = scope.get_unique_variable_name(operator.full_name + '_Y_forward')
            backward_y_name = scope.get_unique_variable_name(operator.full_name + '_Y_backward')
            apply_split(scope, transposed_y_name, [forward_y_name, backward_y_name], container, axis=2)

            # Change (T, N, 1, C') into (T, N, C') to meet Keras spec
            container.add_node('Squeeze', forward_y_name, operator.outputs[0].full_name,
github onnx / onnxmltools / onnxmltools / convert / sklearn / operator_converters / KNN.py View on Github external
container.add_initializer(distance_power_name, onnx_proto.TensorProto.FLOAT,
                              [], [distance_power])
    container.add_initializer(negate_name, onnx_proto.TensorProto.FLOAT,
                              [], [-1])

    if operator.type == 'SklearnKNeighborsRegressor':
        container.add_initializer(training_labels_name, onnx_proto.TensorProto.FLOAT,
                                  training_labels.shape, training_labels)

    apply_sub(scope, [operator.inputs[0].full_name, training_examples_name], sub_results_name, container, broadcast=1)
    apply_abs(scope, sub_results_name, abs_results_name, container)
    container.add_node('Pow', [abs_results_name, distance_power_name],
                       distance_name, name=scope.get_unique_operator_name('Pow'))
    container.add_node('ReduceSum', distance_name,
                       reduced_sum_name, name=scope.get_unique_operator_name('ReduceSum'), axes=[1])
    apply_reshape(scope, reduced_sum_name, reshaped_result_name, container, desired_shape=length)
    apply_mul(scope, [reshaped_result_name, negate_name], negated_reshaped_result_name, container, broadcast=1)
    container.add_node('TopK', negated_reshaped_result_name,
                       [topk_values_name, topk_indices_name], name=scope.get_unique_operator_name('TopK'), k=knn.n_neighbors)

    if operator.type == 'SklearnKNeighborsClassifier':
        classes = knn.classes_
        concat_labels_name = scope.get_unique_variable_name('concat_labels')
        classes_name = scope.get_unique_variable_name('classes')
        predicted_label_name = scope.get_unique_variable_name('predicted_label')
        final_label_name = scope.get_unique_variable_name('final_label')
        reshaped_final_label_name = scope.get_unique_variable_name('reshaped_final_label')
        
        class_type = onnx_proto.TensorProto.STRING
        labels_name = [None] * len(classes)
        output_label_name = [None] * len(classes)
        output_cast_label_name = [None] * len(classes)
github onnx / onnxmltools / onnxmltools / convert / coreml / operator_converters / neural_network / Embed.py View on Github external
def convert_embedding(scope, operator, container):
    params = operator.raw_operator.embedding
    gather_op_name = scope.get_unique_operator_name('Gather')
    gather_attrs = {'name': gather_op_name}

    # Reshape the indexes we want to embed to 1-D tensor. Otherwise, ONNX Gather's output may get wrong shape.
    reshaped_input_name = scope.get_unique_variable_name(gather_op_name + 'input_reshaped')  # 2nd input of Gather
    apply_reshape(scope, operator.inputs[0].full_name, reshaped_input_name, container, desired_shape=[-1])

    # ONNX Gather accepts integers so we add a Cast to enforce this before feeding input into ONNX Gather.
    casted_input_name = scope.get_unique_variable_name(gather_op_name + 'input_casted')  # 2nd input of Gather
    apply_cast(scope, reshaped_input_name, casted_input_name, container, to=onnx_proto.TensorProto.INT64)

    # Load the embedding matrix. Its shape is outputChannels-by-inputDim.
    weights = np.array(params.weights.floatValue).reshape(params.outputChannels, params.inputDim)
    weights_name = scope.get_unique_variable_name(gather_op_name + '_W')  # 1st input of Gather
    container.add_initializer(weights_name, onnx_proto.TensorProto.FLOAT,
                              [params.inputDim, params.outputChannels], weights.transpose().flatten().tolist())

    # To support the bias term in an embedding (if exists), we need to create one extra node.
    if params.hasBias:
        # Put the embedded result onto a temporal tensor
        gather_output_name = scope.get_unique_variable_name(gather_op_name + '_output')
        container.add_node('Gather', [weights_name, casted_input_name], gather_output_name, **gather_attrs)
github onnx / onnxmltools / onnxmltools / convert / keras / operator_converters / Embed.py View on Github external
apply_cast(scope, cast1_name, reshaped_input_name, container, to=onnx_proto.TensorProto.INT64)

    # Prepare the weight matrix (i.e., the vectors of all input indices) as an initializer so that the following main
    # operator can access it.
    embedding_tensor_name = scope.get_unique_variable_name('W')
    weights = np.array(op.get_weights()[0].T).reshape(op.output_shape[-1], op.input_dim).transpose().flatten().tolist()
    container.add_initializer(embedding_tensor_name, onnx_proto.TensorProto.FLOAT,
                              [op.input_dim, op.output_shape[-1]], weights)

    # Create a Gather operator to extract the latent representation of each index
    op_type = 'Gather'
    attrs = {'name': operator.full_name}
    gather_name = scope.get_unique_variable_name('embedding_gather')
    container.add_node(op_type, [embedding_tensor_name, reshaped_input_name], gather_name, **attrs)
    output_shape = [-1 if dim == 'None' else dim for dim in operator.outputs[0].type.shape]
    apply_reshape(scope, gather_name, operator.output_full_names, container, desired_shape=output_shape)
github onnx / onnxmltools / onnxmltools / convert / keras / operator_converters / LSTM.py View on Github external
# We declare some names to store the outputs produced by ONNX LSTM. Then, create ONNX LSTM. Subsequently, its
    # outputs may be adjusted to match Keras format.
    lstm_y_name = scope.get_unique_variable_name('lstm_y')
    lstm_output_names.append(lstm_y_name)
    lstm_h_name = scope.get_unique_variable_name('lstm_h')
    lstm_output_names.append(lstm_h_name)
    lstm_c_name = scope.get_unique_variable_name('lstm_c')
    lstm_output_names.append(lstm_c_name)
    container.add_node(lstm__type, lstm_input_names, lstm_output_names, op_version=op_version, **lstm_attrs)

    # Create output-adjusting operators
    if output_seq:
        lstm_y_name_transposed = scope.get_unique_variable_name('lstm_y_transposed')
        apply_transpose(scope, lstm_y_name, lstm_y_name_transposed, container, perm=[1, 0, 2])
        apply_reshape(scope, lstm_y_name_transposed, operator.outputs[0].full_name, container,
                      desired_shape=[-1, seq_length, hidden_size])
    else:
        apply_reshape(scope, lstm_h_name, operator.outputs[0].full_name, container, desired_shape=[-1, hidden_size])

    if output_state:
        # state_h
        apply_reshape(scope, lstm_h_name, operator.outputs[1].full_name, container, desired_shape=[-1, hidden_size])
        # state_c
        apply_reshape(scope, lstm_c_name, operator.outputs[2].full_name, container, desired_shape=[-1, hidden_size])
github onnx / onnxmltools / onnxmltools / convert / sklearn / operator_converters / KNN.py View on Github external
to=onnx_proto.TensorProto.INT32)
            container.add_node('ReduceSum', output_cast_label_name[i],
                                output_label_reduced_name[i], axes=[1])

        container.add_node('Concat', [s for s in output_label_reduced_name],
                           concat_labels_name, name=scope.get_unique_operator_name('Concat'), axis=0)
        container.add_node('ArgMax', concat_labels_name, 
                           predicted_label_name, name=scope.get_unique_operator_name('ArgMax'))
        container.add_node('ArrayFeatureExtractor', [classes_name, predicted_label_name], final_label_name,
                           name=scope.get_unique_operator_name('ArrayFeatureExtractor'), op_domain='ai.onnx.ml')
        if class_type == onnx_proto.TensorProto.INT32:
            apply_reshape(scope, final_label_name, reshaped_final_label_name, container, desired_shape=[-1,])
            apply_cast(scope, reshaped_final_label_name, operator.outputs[0].full_name, container,
                       to=onnx_proto.TensorProto.INT64)
        else:
            apply_reshape(scope, final_label_name, operator.outputs[0].full_name, container, desired_shape=[-1,])

        # Calculation of class probability
        pred_label_shape = [-1]

        cast_pred_label_name = scope.get_unique_variable_name('cast_pred_label')
        reshaped_pred_label_name = scope.get_unique_variable_name('reshaped_pred_label')
        reduced_prob_name = scope.get_unique_variable_name('reduced_prob')
        ohe_result_name = scope.get_unique_variable_name('ohe_result')

        apply_cast(scope, topk_labels_name, cast_pred_label_name, container, to=onnx_proto.TensorProto.INT64)
        apply_reshape(scope, cast_pred_label_name, reshaped_pred_label_name, container, desired_shape=pred_label_shape)
        if class_type == onnx_proto.TensorProto.STRING:
            container.add_node('OneHotEncoder', reshaped_pred_label_name, ohe_result_name,
                     name=scope.get_unique_operator_name('OneHotEncoder'), cats_strings=classes,
                     op_domain='ai.onnx.ml')
        else:
github onnx / onnxmltools / onnxmltools / convert / keras / operator_converters / GRU.py View on Github external
def convert_keras_gru(scope, operator, container):
    op = operator.raw_operator
    if hasattr(op, 'return_state') and op.return_state:
        raise RuntimeError('support state in outputs not supported')
    hidden_size = op.units
    input_size = op.input_shape[-1]
    seq_length = op.input_shape[-2]
    output_seq = op.return_sequences
    reverse_input = op.go_backwards

    op_type = 'GRU'
    attrs = {'name': operator.full_name}
    gru_input_names = []

    gru_x_name = scope.get_unique_variable_name('gru_x')
    apply_reshape(scope, operator.inputs[0].full_name, gru_x_name, container, desired_shape=[-1, 1, input_size])
    gru_input_names.append(gru_x_name)

    tensor_w_name = scope.get_unique_variable_name('tensor_w')
    W = op.get_weights()[0].T
    container.add_initializer(tensor_w_name, onnx_proto.TensorProto.FLOAT,
                              [1, 3 * hidden_size, input_size], W.flatten())
    gru_input_names.append(tensor_w_name)

    tensor_r_name = scope.get_unique_variable_name('tensor_r')
    R = op.get_weights()[1].T
    container.add_initializer(tensor_r_name, onnx_proto.TensorProto.FLOAT,
                              [1, 3 * hidden_size, hidden_size], R.flatten())
    gru_input_names.append(tensor_r_name)

    B = op.get_weights()[2]
    if op.use_bias and len(B) > 0:
github onnx / onnxmltools / onnxmltools / convert / keras / operator_converters / Embed.py View on Github external
def convert_keras_embed(scope, operator, container):
    op = operator.raw_operator  # Keras Embedding layer object
    if hasattr(op, 'mask_zero') and op.mask_zero == True:
        raise NotImplementedError("Embedding layer mask_zero attribute cannot be converted")

    # Reshape the indexes we want to embed to 1-D tensor. Otherwise, Gather's output may get wrong shape, which is the
    # same as our CoreML Embedding converter.
    reshaped_input_name = scope.get_unique_variable_name('embedding_reshaped')
    if container.target_opset < 7:
        apply_reshape(scope, operator.inputs[0].full_name, reshaped_input_name, container, desired_shape=[-1])
    else:
        cast0_name = scope.get_unique_variable_name('embedding_cast0')
        cast1_name = scope.get_unique_variable_name('embedding_cast1')
        # workaround for resahpe in ONNX 1.2 not supporting INT64
        apply_cast(scope, operator.inputs[0].full_name, cast0_name, container, to=onnx_proto.TensorProto.DOUBLE)
        apply_reshape(scope, cast0_name, cast1_name, container, desired_shape=[-1])
        apply_cast(scope, cast1_name, reshaped_input_name, container, to=onnx_proto.TensorProto.INT64)

    # Prepare the weight matrix (i.e., the vectors of all input indices) as an initializer so that the following main
    # operator can access it.
    embedding_tensor_name = scope.get_unique_variable_name('W')
    weights = np.array(op.get_weights()[0].T).reshape(op.output_shape[-1], op.input_dim).transpose().flatten().tolist()
    container.add_initializer(embedding_tensor_name, onnx_proto.TensorProto.FLOAT,
                              [op.input_dim, op.output_shape[-1]], weights)

    # Create a Gather operator to extract the latent representation of each index
github onnx / onnxmltools / onnxmltools / convert / coreml / operator_converters / neural_network / InnerProduct.py View on Github external
elif container.target_opset < 7:
        attrs['broadcast'] = 1
        op_version = 6
    elif container.target_opset < 9:
        op_version = 7
    elif container.target_opset < 11:
        op_version = 9
    else:
        op_version = 11
        
    # Create the major ONNX operator, Gemm, to do CoreML inner product and possibly add shape adjustment
    if len(operator.inputs[0].type.shape) == 4:
        # Input shape is [N, C, 1, 1] so we expect output is also 4-D, [N, C', 1, 1].
        buffer_tensor_name = scope.get_unique_variable_name(operator.full_name + '_buffer')
        container.add_node('Gemm', [name_a, name_b, name_c], buffer_tensor_name, op_version=op_version, **attrs)
        apply_reshape(scope, buffer_tensor_name, operator.outputs[0].full_name, container,
                      desired_shape=[-1, int(params.outputChannels), 1, 1])
    else:
        # Input shape is [N, C], so we don't need to change Gemm's output shape.
        container.add_node('Gemm', [name_a, name_b, name_c], operator.outputs[0].full_name,
                           op_version=op_version, **attrs)
github onnx / onnxmltools / onnxmltools / convert / keras / operator_converters / RepeatVector.py View on Github external
def convert_keras_repeat_vector(scope, operator, container):
    op = operator.raw_operator

    intermediate_tensor_name = scope.get_unique_variable_name(operator.inputs[0].full_name + '_reshaped')
    apply_reshape(scope, operator.inputs[0].full_name, intermediate_tensor_name, container,
                  desired_shape=[-1, 1, op.input_shape[1]])

    repeats = [1, int(op.n), 1]
    apply_tile(scope, intermediate_tensor_name, operator.outputs[0].full_name, container, repeats=repeats)