How to use the tf2onnx.constants.NHWC_TO_NCHW function in tf2onnx

To help you get started, we’ve selected a few tf2onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / tensorflow-onnx / tf2onnx / onnx_opset / nn.py View on Github external
def add_padding(ctx, node, kernel_shape, strides, dilations=None, spatial=2):
    padding = node.get_attr("padding")
    if padding:
        if dilations is None:
            dilations = [1] * spatial * 2
        padding = padding.s.decode("utf-8")
        if padding == 'SAME':
            pads = [0] * spatial * 2
            input_shape = ctx.get_shape(node.input[0])
            output_shape = ctx.get_shape(node.output[0])
            # check if the input shape is valid
            if len(input_shape) != len(pads):
                logger.error("node %s input needs to be rank %d, is %d", node.name, len(pads), len(input_shape))
            # transpose shape to nchw
            if node.is_nhwc():
                input_shape = spatial_map(input_shape, constants.NHWC_TO_NCHW)
                output_shape = spatial_map(output_shape, constants.NHWC_TO_NCHW)
            # calculate pads
            if any(input_shape[i + 2] == -1 or output_shape[i + 2] == -1 for i in range(spatial)):
                logger.debug(
                    "node %s has unknown dim for pads calculation, fallback to auto_pad: "
                    "input_shape=%s, output_shape=%s",
                    node.name, input_shape, output_shape)
                node.set_attr("auto_pad", "SAME_UPPER")
            else:
                for i in range(spatial):
                    pad = (output_shape[i + 2] - 1) * strides[i] + dilations[i] * kernel_shape[i] - input_shape[i + 2]
                    pad = max(pad, 0)
                    pads[i] = pad // 2
                    pads[i + spatial] = pad - pad // 2
                node.set_attr("pads", pads)
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / onnx_opset / nn.py View on Github external
padding = node.get_attr("padding")
    if padding:
        if dilations is None:
            dilations = [1] * spatial * 2
        padding = padding.s.decode("utf-8")
        if padding == 'SAME':
            pads = [0] * spatial * 2
            input_shape = ctx.get_shape(node.input[0])
            output_shape = ctx.get_shape(node.output[0])
            # check if the input shape is valid
            if len(input_shape) != len(pads):
                logger.error("node %s input needs to be rank %d, is %d", node.name, len(pads), len(input_shape))
            # transpose shape to nchw
            if node.is_nhwc():
                input_shape = spatial_map(input_shape, constants.NHWC_TO_NCHW)
                output_shape = spatial_map(output_shape, constants.NHWC_TO_NCHW)
            # calculate pads
            if any(input_shape[i + 2] == -1 or output_shape[i + 2] == -1 for i in range(spatial)):
                logger.debug(
                    "node %s has unknown dim for pads calculation, fallback to auto_pad: "
                    "input_shape=%s, output_shape=%s",
                    node.name, input_shape, output_shape)
                node.set_attr("auto_pad", "SAME_UPPER")
            else:
                for i in range(spatial):
                    pad = (output_shape[i + 2] - 1) * strides[i] + dilations[i] * kernel_shape[i] - input_shape[i + 2]
                    pad = max(pad, 0)
                    pads[i] = pad // 2
                    pads[i + spatial] = pad - pad // 2
                node.set_attr("pads", pads)

        elif padding == 'VALID':
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / onnx_opset / math.py View on Github external
def version_1(cls, ctx, node, **kwargs):
        # ONNX: Each input value is divided by (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta
        # TF: sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
        #     output = input / (bias + alpha * sqr_sum) ** beta

        # by default, depth_radius is 5 in tensorflow
        size = node.get_attr_value("depth_radius", 5) * 2 + 1

        node.set_attr("size", size)
        node.set_attr("alpha", size * node.get_attr("alpha").f)

        shapes = node.output_shapes[0]
        dtypes = node.output_dtypes[0]

        ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=constants.NHWC_TO_NCHW)
        ctx.update_node_shape_dtype(node, override=True)
        op_name = utils.make_name(node.name)
        ctx.insert_new_node_on_output("Transpose", node.output[0], perm=constants.NCHW_TO_NHWC,
                                      name=op_name, shapes=shapes, dtypes=dtypes)
github onnx / tensorflow-onnx / tf2onnx / optimizer / transpose_optimizer.py View on Github external
if not self._nodes_has_single_consumer_node([trans]):
            return False

        input_index = self._get_input_index_for_trans(node, trans)

        ops = self._g.get_nodes()
        self._g.replace_all_inputs(ops, node.output[0], trans.output[0])
        node.input[input_index] = trans.input[0]
        trans.input[0] = node.output[0]

        # need to transpose node shape in backward direction as well after switch
        # otherwise, reshape added in post_optimize_action may not work correctly
        shape = self._g.get_shape(node.output[0])
        if shape:
            # only nhwc transpose can reach here
            new_shape = [shape[i] for i in NHWC_TO_NCHW]
            self._g.set_shape(node.output[0], new_shape)
        return True
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / onnx_opset / nn.py View on Github external
ori_shape = ctx.make_node("Shape", [node.input[0]])
            attr = {"axes": [0], "starts": [1], "ends": [3]}
            inputs_map = {"data": ori_shape.output[0], **attr}
            ori_shape_hw = GraphBuilder(ctx).make_slice(inputs_map)
            ori_shape_hw_float = ctx.make_node("Cast", [ori_shape_hw], attr={"to": onnx_pb.TensorProto.FLOAT})

            target_hw = node.inputs[1]
            target_hw_float = ctx.make_node("Cast", target_hw.output, attr={"to": onnx_pb.TensorProto.FLOAT})

            scales_hw = ctx.make_node("Div", [target_hw_float.output[0], ori_shape_hw_float.output[0]])

            const_one_array = ctx.make_const(utils.make_name("one"), np.array([1.0, 1.0]).astype(np.float32))
            # scales is nchw
            scales = ctx.make_node("Concat", [const_one_array.output[0], scales_hw.output[0]], {"axis": 0})
        # because onnxruntime only supports to scale the last two dims so transpose is inserted
        input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": constants.NHWC_TO_NCHW})
        upsample = ctx.make_node(op_type, [input_nchw.output[0], scales.output[0]], attr={"mode": mode})

        shapes = node.output_shapes
        dtypes = node.output_dtypes
        ctx.remove_node(node.name)
        ctx.make_node("Transpose", upsample.output, {"perm": constants.NCHW_TO_NHWC},
                      name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / optimizer / transpose_optimizer.py View on Github external
if not self._nodes_has_single_consumer_node([trans]):
            return False

        input_index = self._get_input_index_for_trans(node, trans)

        ops = self._g.get_nodes()
        self._g.replace_all_inputs(ops, node.output[0], trans.output[0])
        node.input[input_index] = trans.input[0]
        trans.input[0] = node.output[0]

        # need to transpose node shape in backward direction as well after switch
        # otherwise, reshape added in post_optimize_action may not work correctly
        shape = self._g.get_shape(node.output[0])
        if shape:
            # only nhwc transpose can reach here
            new_shape = [shape[i] for i in NHWC_TO_NCHW]
            self._g.set_shape(node.output[0], new_shape)
        return True
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / onnx_opset / nn.py View on Github external
new_kernel_shape: reshape the kernel
    """

    if input_indices is None:
        input_indices = [0]
    if output_indices is None:
        output_indices = [0]

    if node.is_nhwc():
        # transpose input if needed, no need to record shapes on input
        for idx in input_indices:
            parent = node.inputs[idx]
            if node.inputs[idx].is_const() and len(ctx.find_output_consumers(node.input[1])) == 1:
                # if input is a constant, transpose that one if we are the only consumer
                val = parent.get_tensor_value(as_list=False)
                parent.set_tensor_value(val.transpose(constants.NHWC_TO_NCHW))
            else:
                # if input comes from a op, insert transpose op
                input_name = node.input[idx]
                transpose = ctx.insert_new_node_on_input(node, "Transpose", input_name)
                transpose.set_attr("perm", constants.NHWC_TO_NCHW)
                transpose.skip_conversion = True
                shape = ctx.get_shape(input_name)
                if shape is not None:
                    new_shape = spatial_map(shape, constants.NHWC_TO_NCHW)
                    ctx.set_shape(transpose.output[0], new_shape)

    # kernel must to be transposed
    if with_kernel:
        parent = node.inputs[1]
        need_transpose = True
        if node.inputs[1].is_const():
github onnx / tensorflow-onnx / tf2onnx / optimizer / transpose_optimizer.py View on Github external
def is_nchw_transpose(transpose_node):
    perm_attr = transpose_node.get_attr('perm')
    return transpose_node.type == "Transpose" and perm_attr and perm_attr.ints == NHWC_TO_NCHW
github onnx / tensorflow-onnx / tf2onnx / onnx_opset / math.py View on Github external
def version_1(cls, ctx, node, **kwargs):
        # ONNX: Each input value is divided by (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta
        # TF: sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
        #     output = input / (bias + alpha * sqr_sum) ** beta

        # by default, depth_radius is 5 in tensorflow
        size = node.get_attr_value("depth_radius", 5) * 2 + 1

        node.set_attr("size", size)
        node.set_attr("alpha", size * node.get_attr("alpha").f)

        shapes = node.output_shapes[0]
        dtypes = node.output_dtypes[0]

        ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=constants.NHWC_TO_NCHW)
        ctx.update_node_shape_dtype(node, override=True)
        op_name = utils.make_name(node.name)
        ctx.insert_new_node_on_output("Transpose", node.output[0], perm=constants.NCHW_TO_NHWC,
                                      name=op_name, shapes=shapes, dtypes=dtypes)
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / optimizer / transpose_optimizer.py View on Github external
expand_rank = self._g.make_node("Sub", [const_4, rank_node]).output[0]
                array_fill_1 = self._g.make_node("ConstantOfShape", [expand_rank], attr={"value": tensor_1}).output[0]
                new_shape = self._g.make_node("Concat", [array_fill_1, shape_node], attr={"axis": 0}).output[0]
                reshape = self._g.make_node("Reshape", [input_id, new_shape]).output[0]
                input_of_new_trans = reshape
            elif len(shape) == 4:
                input_of_new_trans = input_id
            else:
                shape_4d = shape_after_expand(shape)
                if shape_4d is None:
                    return False
                const = self._g.make_const(utils.make_name("reshape_shape"), np.array(shape_4d, np.int64)).output[0]
                reshape = self._g.make_node("Reshape", [input_id, const]).output[0]
                input_of_new_trans = reshape

            nchw_node = self._g.make_node("Transpose", [input_of_new_trans], attr={"perm": NHWC_TO_NCHW})
            nhwc_node = self._g.make_node("Transpose", [nchw_node.output[0]], attr={"perm": NCHW_TO_NHWC})
            self._g.replace_input(node, input_id, nhwc_node.output[0])
        return True