How to use the fairseq.utils.set_incremental_state function in fairseq

To help you get started, we’ve selected a few fairseq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github freewym / espresso / fairseq / modules / dynamic_convolution.py View on Github external
def _set_input_buffer(self, incremental_state, new_buffer):
        return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
github freewym / espresso / fairseq / modules / lightweight_convolution.py View on Github external
def _set_input_buffer(self, incremental_state, new_buffer):
        return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
github lancopku / Prime / fairseq / models / transformer_bm.py View on Github external
def _set_input_buffer(self, incremental_state, buffer):
        utils.set_incremental_state(
            self,
            incremental_state,
            'attn_state',
            buffer,
        )
github StillKeepTry / Transformer-PyTorch / Papers / Double Path Networks for Sequence to Sequence Learning / dualpath.py View on Github external
def _set_input_buffer(self, incremental_state, new_buffer, name):
        return utils.set_incremental_state(self, incremental_state, name, new_buffer)
github lancopku / Prime / fairseq / models / transformer_bm.py View on Github external
def _set_input_buffer(self, incremental_state, buffer):
        utils.set_incremental_state(
            self,
            incremental_state,
            'attn_state',
            buffer,
        )
github freewym / espresso / espresso / models / external_language_model.py View on Github external
def reorder_incremental_state(self, incremental_state, new_order):
        super().reorder_incremental_state(incremental_state, new_order)

        for state_name in ['wordlm_logprobs', 'out_logprobs', 'subword_cumlogprobs']:
            state = utils.get_incremental_state(self, incremental_state, state_name)
            if state is not None:
                new_state = state.index_select(0, new_order)
                utils.set_incremental_state(
                    self, incremental_state, state_name, new_state,
                )

        nodes = utils.get_incremental_state(self, incremental_state, 'nodes')
        if nodes is not None:
            new_order_list = new_order.tolist()
            new_nodes = [nodes[i] for i in new_order_list]
            utils.set_incremental_state(
                self, incremental_state, 'nodes', new_nodes,
            )
github freewym / espresso / fairseq / modules / linearized_convolution.py View on Github external
def _set_input_buffer(self, incremental_state, new_buffer):
        return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
github pytorch / translate / pytorch_translate / ngram.py View on Github external
def forward_unprojected(self, input_tokens, encoder_out, incremental_state=None):
        padded_tokens = F.pad(
            input_tokens,
            (self.history_len - 1, 0, 0, 0),
            "constant",
            self.dst_dict.eos(),
        )
        # We use incremental_state only to check whether we are decoding or not
        # self.training is false even for the forward pass through validation
        if incremental_state is not None:
            padded_tokens = padded_tokens[:, -self.history_len :]
        utils.set_incremental_state(self, incremental_state, "incremental_marker", True)

        bsz, seqlen = padded_tokens.size()
        seqlen -= self.history_len - 1

        # get outputs from encoder
        (encoder_outs, final_hidden, _, src_lengths, _) = encoder_out

        # padded_tokens has shape [batch_size, seq_len+history_len]
        x = self.embed_tokens(padded_tokens)
        x = F.dropout(x, p=self.dropout_in, training=self.training)

        # Convolution needs shape [batch_size, channels, seq_len]
        x = self.history_conv(x.transpose(1, 2)).transpose(1, 2)
        x = F.dropout(x, p=self.dropout_out, training=self.training)

        # x has shape [batch_size, seq_len, channels]