Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
def _set_input_buffer(self, incremental_state, new_buffer, name):
return utils.set_incremental_state(self, incremental_state, name, new_buffer)
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
for state_name in ['wordlm_logprobs', 'out_logprobs', 'subword_cumlogprobs']:
state = utils.get_incremental_state(self, incremental_state, state_name)
if state is not None:
new_state = state.index_select(0, new_order)
utils.set_incremental_state(
self, incremental_state, state_name, new_state,
)
nodes = utils.get_incremental_state(self, incremental_state, 'nodes')
if nodes is not None:
new_order_list = new_order.tolist()
new_nodes = [nodes[i] for i in new_order_list]
utils.set_incremental_state(
self, incremental_state, 'nodes', new_nodes,
)
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def forward_unprojected(self, input_tokens, encoder_out, incremental_state=None):
padded_tokens = F.pad(
input_tokens,
(self.history_len - 1, 0, 0, 0),
"constant",
self.dst_dict.eos(),
)
# We use incremental_state only to check whether we are decoding or not
# self.training is false even for the forward pass through validation
if incremental_state is not None:
padded_tokens = padded_tokens[:, -self.history_len :]
utils.set_incremental_state(self, incremental_state, "incremental_marker", True)
bsz, seqlen = padded_tokens.size()
seqlen -= self.history_len - 1
# get outputs from encoder
(encoder_outs, final_hidden, _, src_lengths, _) = encoder_out
# padded_tokens has shape [batch_size, seq_len+history_len]
x = self.embed_tokens(padded_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# Convolution needs shape [batch_size, channels, seq_len]
x = self.history_conv(x.transpose(1, 2)).transpose(1, 2)
x = F.dropout(x, p=self.dropout_out, training=self.training)
# x has shape [batch_size, seq_len, channels]