How to use the nlp.nn.linear_logit function in nlp

To help you get started, we’ve selected a few nlp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hanxiao / tf-nlp-blocks / nlp / match_blocks.py View on Github external
if num_units is None:
            num_units = context.get_shape().as_list()[-1]
        _, query_length, _ = query.get_shape().as_list()

        context = linear_logit(context, num_units, act_fn=tf.nn.relu, scope='context_mapping')
        query = linear_logit(query, num_units, act_fn=tf.nn.relu, scope='query_mapping')

        if score_func == 'dot':
            score = tf.matmul(context, query, transpose_b=True)
        elif score_func == 'bilinear':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
        elif score_func == 'scaled':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
                    (num_units ** 0.5)
        elif score_func == 'additive':
            score = tf.squeeze(linear_logit(
                tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
                                [1, 1, query_length, 1]) +
                        tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
                                [1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
        else:
            raise NotImplementedError

        mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
        paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
        masked_score = tf.where(tf.equal(mask, 0), paddings, score)  # B, Lc, Lq

        # Causality = Future blinding
        if causality:
            diag_vals = tf.ones_like(masked_score[0, :, :])  # (Lc, Lq)
            tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # (Lc, Lq)
            masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1])  # B, Lc, Lq
github hanxiao / tf-nlp-blocks / nlp / match_blocks.py View on Github external
context = linear_logit(context, num_units, act_fn=tf.nn.relu, scope='context_mapping')
        query = linear_logit(query, num_units, act_fn=tf.nn.relu, scope='query_mapping')

        if score_func == 'dot':
            score = tf.matmul(context, query, transpose_b=True)
        elif score_func == 'bilinear':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
        elif score_func == 'scaled':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
                    (num_units ** 0.5)
        elif score_func == 'additive':
            score = tf.squeeze(linear_logit(
                tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
                                [1, 1, query_length, 1]) +
                        tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
                                [1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
        else:
            raise NotImplementedError

        mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
        paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
        masked_score = tf.where(tf.equal(mask, 0), paddings, score)  # B, Lc, Lq

        # Causality = Future blinding
        if causality:
            diag_vals = tf.ones_like(masked_score[0, :, :])  # (Lc, Lq)
            tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # (Lc, Lq)
            masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1])  # B, Lc, Lq

            paddings = tf.ones_like(masks) * (-2 ** 32 + 1)
            masked_score = tf.where(tf.equal(masks, 0), paddings, masked_score)  # B, Lc, Lq
github hanxiao / tf-nlp-blocks / nlp / match_blocks.py View on Github external
scope='attention_match_block', reuse=None, **kwargs):
    with tf.variable_scope(scope, reuse=reuse):
        batch_size, context_length, _ = context.get_shape().as_list()
        if num_units is None:
            num_units = context.get_shape().as_list()[-1]
        _, query_length, _ = query.get_shape().as_list()

        context = linear_logit(context, num_units, act_fn=tf.nn.relu, scope='context_mapping')
        query = linear_logit(query, num_units, act_fn=tf.nn.relu, scope='query_mapping')

        if score_func == 'dot':
            score = tf.matmul(context, query, transpose_b=True)
        elif score_func == 'bilinear':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
        elif score_func == 'scaled':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
                    (num_units ** 0.5)
        elif score_func == 'additive':
            score = tf.squeeze(linear_logit(
                tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
                                [1, 1, query_length, 1]) +
                        tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
                                [1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
        else:
            raise NotImplementedError

        mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
        paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
        masked_score = tf.where(tf.equal(mask, 0), paddings, score)  # B, Lc, Lq

        # Causality = Future blinding
        if causality:
github hanxiao / tf-nlp-blocks / nlp / match_blocks.py View on Github external
def Attentive_match(context, query, context_mask, query_mask,
                    num_units=None,
                    score_func='scaled', causality=False,
                    scope='attention_match_block', reuse=None, **kwargs):
    with tf.variable_scope(scope, reuse=reuse):
        batch_size, context_length, _ = context.get_shape().as_list()
        if num_units is None:
            num_units = context.get_shape().as_list()[-1]
        _, query_length, _ = query.get_shape().as_list()

        context = linear_logit(context, num_units, act_fn=tf.nn.relu, scope='context_mapping')
        query = linear_logit(query, num_units, act_fn=tf.nn.relu, scope='query_mapping')

        if score_func == 'dot':
            score = tf.matmul(context, query, transpose_b=True)
        elif score_func == 'bilinear':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
        elif score_func == 'scaled':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
                    (num_units ** 0.5)
        elif score_func == 'additive':
            score = tf.squeeze(linear_logit(
                tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
                                [1, 1, query_length, 1]) +
                        tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
                                [1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
        else:
            raise NotImplementedError
github hanxiao / tf-attentive-conv / nlp / match_blocks.py View on Github external
scope='attention_match_block', reuse=None, **kwargs):
    with tf.variable_scope(scope, reuse=reuse):
        batch_size, context_length, num_units = context.get_shape().as_list()
        _, query_length, _ = query.get_shape().as_list()
        if score_func == 'dot':
            score = tf.matmul(context, query, transpose_b=True)
        elif score_func == 'bilinear':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
        elif score_func == 'scaled':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
                    (num_units ** 0.5)
        elif score_func == 'additive':
            score = tf.squeeze(linear_logit(
                tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
                                [1, 1, query_length, 1]) +
                        tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
                                [1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
        else:
            raise NotImplementedError

        mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
        paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
        masked_score = tf.where(tf.equal(mask, 0), paddings, score)  # B, Lc, Lq

        # Causality = Future blinding
        if causality:
            diag_vals = tf.ones_like(masked_score[0, :, :])  # (Lc, Lq)
            tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # (Lc, Lq)
            masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1])  # B, Lc, Lq

            paddings = tf.ones_like(masks) * (-2 ** 32 + 1)
            masked_score = tf.where(tf.equal(masks, 0), paddings, masked_score)  # B, Lc, Lq
github hanxiao / tf-attentive-conv / nlp / match_blocks.py View on Github external
def Attentive_match(context, query, context_mask, query_mask,
                    score_func='dot', causality=False,
                    scope='attention_match_block', reuse=None, **kwargs):
    with tf.variable_scope(scope, reuse=reuse):
        batch_size, context_length, num_units = context.get_shape().as_list()
        _, query_length, _ = query.get_shape().as_list()
        if score_func == 'dot':
            score = tf.matmul(context, query, transpose_b=True)
        elif score_func == 'bilinear':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
        elif score_func == 'scaled':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
                    (num_units ** 0.5)
        elif score_func == 'additive':
            score = tf.squeeze(linear_logit(
                tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
                                [1, 1, query_length, 1]) +
                        tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
                                [1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
        else:
            raise NotImplementedError

        mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
        paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
        masked_score = tf.where(tf.equal(mask, 0), paddings, score)  # B, Lc, Lq

        # Causality = Future blinding
        if causality:
            diag_vals = tf.ones_like(masked_score[0, :, :])  # (Lc, Lq)
            tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # (Lc, Lq)
            masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1])  # B, Lc, Lq
github hanxiao / tf-nlp-blocks / nlp / match_blocks.py View on Github external
num_units = context.get_shape().as_list()[-1]
        _, query_length, _ = query.get_shape().as_list()

        context = linear_logit(context, num_units, act_fn=tf.nn.relu, scope='context_mapping')
        query = linear_logit(query, num_units, act_fn=tf.nn.relu, scope='query_mapping')

        if score_func == 'dot':
            score = tf.matmul(context, query, transpose_b=True)
        elif score_func == 'bilinear':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
        elif score_func == 'scaled':
            score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
                    (num_units ** 0.5)
        elif score_func == 'additive':
            score = tf.squeeze(linear_logit(
                tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
                                [1, 1, query_length, 1]) +
                        tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
                                [1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
        else:
            raise NotImplementedError

        mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
        paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
        masked_score = tf.where(tf.equal(mask, 0), paddings, score)  # B, Lc, Lq

        # Causality = Future blinding
        if causality:
            diag_vals = tf.ones_like(masked_score[0, :, :])  # (Lc, Lq)
            tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # (Lc, Lq)
            masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1])  # B, Lc, Lq