How to use the tf2onnx.rewriter.rnn_utils.get_np_val_for_const function in tf2onnx

To help you get started, we’ve selected a few tf2onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bilstm_rewriter.py View on Github external
def process_bilstm(g, bi_lstms):
    for lstm_fw, lstm_bw in bi_lstms:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional lstm: %s, %s", lstm_fw.name, lstm_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(lstm_fw.inputs) == len(lstm_bw.inputs):
            if len(lstm_fw.inputs) > 4:
                h_node, c_node = process_ch_init_nodes(g, lstm_fw, lstm_bw, all_nodes)
        else:
            logger.error("fw, bw lstm inputs num is not consistent. stop")
            continue
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bilstm_rewriter.py View on Github external
def process_bilstm(g, bi_lstms):
    for lstm_fw, lstm_bw in bi_lstms:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional lstm: %s, %s", lstm_fw.name, lstm_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(lstm_fw.inputs) == len(lstm_bw.inputs):
            if len(lstm_fw.inputs) > 4:
                h_node, c_node = process_ch_init_nodes(g, lstm_fw, lstm_bw, all_nodes)
        else:
            logger.error("fw, bw lstm inputs num is not consistent. stop")
            continue
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bigru_rewriter.py View on Github external
def process_bigru(g, bi_grus):
    for gru_fw, gru_bw in bi_grus:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional gru: %s, %s", gru_fw.name, gru_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(gru_fw.inputs) == len(gru_bw.inputs):
            if len(gru_fw.inputs) > 4:
                initializer_node = process_init_nodes(g, gru_fw, gru_bw, all_nodes)
        else:
            logger.error("fw, bw gru inputs num is not consistent. stop")
            continue

        # create node
        w_name = utils.make_name("W")
        w_node = g.make_const(w_name, W, skip_conversion=True)
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bigru_rewriter.py View on Github external
def process_bigru(g, bi_grus):
    for gru_fw, gru_bw in bi_grus:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional gru: %s, %s", gru_fw.name, gru_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(gru_fw.inputs) == len(gru_bw.inputs):
            if len(gru_fw.inputs) > 4:
                initializer_node = process_init_nodes(g, gru_fw, gru_bw, all_nodes)
        else:
            logger.error("fw, bw gru inputs num is not consistent. stop")
            continue
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bigru_rewriter.py View on Github external
def process_bigru(g, bi_grus):
    for gru_fw, gru_bw in bi_grus:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional gru: %s, %s", gru_fw.name, gru_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(gru_fw.inputs) == len(gru_bw.inputs):
            if len(gru_fw.inputs) > 4:
                initializer_node = process_init_nodes(g, gru_fw, gru_bw, all_nodes)
        else:
            logger.error("fw, bw gru inputs num is not consistent. stop")
            continue

        # create node
        w_name = utils.make_name("W")
        w_node = g.make_const(w_name, W, skip_conversion=True)
        all_nodes.append(w_node)
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bigru_rewriter.py View on Github external
def process_bigru(g, bi_grus):
    for gru_fw, gru_bw in bi_grus:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional gru: %s, %s", gru_fw.name, gru_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(gru_fw.inputs) == len(gru_bw.inputs):
            if len(gru_fw.inputs) > 4:
                initializer_node = process_init_nodes(g, gru_fw, gru_bw, all_nodes)
        else:
            logger.error("fw, bw gru inputs num is not consistent. stop")
            continue
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bilstm_rewriter.py View on Github external
def process_bilstm(g, bi_lstms):
    for lstm_fw, lstm_bw in bi_lstms:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional lstm: %s, %s", lstm_fw.name, lstm_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(lstm_fw.inputs) == len(lstm_bw.inputs):
            if len(lstm_fw.inputs) > 4:
                h_node, c_node = process_ch_init_nodes(g, lstm_fw, lstm_bw, all_nodes)
        else:
            logger.error("fw, bw lstm inputs num is not consistent. stop")
            continue

        # create node
        w_name = utils.make_name("W")
        w_node = g.make_const(w_name, W, skip_conversion=True)
        all_nodes.append(w_node)
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / rewriter / bilstm_rewriter.py View on Github external
def process_bilstm(g, bi_lstms):
    for lstm_fw, lstm_bw in bi_lstms:
        logger.debug("=========================")
        logger.debug("start handling potential bidirectional lstm: %s, %s", lstm_fw.name, lstm_bw.name)

        w_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 1)
        w_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 1)
        r_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 2)
        r_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 2)
        b_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 3)
        b_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 3)
        W = np.concatenate((w_fw, w_bw), axis=0)
        R = np.concatenate((r_fw, r_bw), axis=0)
        B = np.concatenate((b_fw, b_bw), axis=0)

        all_nodes = g.get_nodes()
        if len(lstm_fw.inputs) == len(lstm_bw.inputs):
            if len(lstm_fw.inputs) > 4:
                h_node, c_node = process_ch_init_nodes(g, lstm_fw, lstm_bw, all_nodes)
        else:
            logger.error("fw, bw lstm inputs num is not consistent. stop")
            continue

        # create node