Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
return self.__collapse_tensor(mask)
@overrides
def tokens_to_indices(
self,
tokens: List[str],
vocab: Vocabulary) -> Dict[str, List[int]]:
"""
Takes a list of tokens and converts them to one or more sets of indices.
During the indexing process, each item corresponds to an index in the
vocabulary.
Parameters
----------
vocab : ``Vocabulary``
``vocab`` is used to get the index of each item.
Returns
-------
@overrides
def backward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, in_channels, H, W]
Returns: out: Tensor , logdet: Tensor
out: [batch, in_channels, H, W], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
batch, channels, H, W = input.size()
out = input - self.bias
out = out.div(self.log_scale.exp() + 1e-8)
logdet = self.log_scale.sum(dim=0).squeeze(1).mul(H * -W)
@overrides
def compute_mask(self, inputs, mask=None):
return None
@overrides
def split_sentences(self, text: str) -> List[str]:
return [sent.string.strip() for sent in self.spacy(text).sents]
@overrides
def _instance_type(self):
return TextClassificationInstance
@overrides
def _compute_action_probabilities(
self,
state: GrammarBasedState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor,
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
@overrides
def reset(self) -> None:
self._precision_matches = Counter()
self._precision_totals = Counter()
self._prediction_lengths = 0
self._reference_lengths = 0
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
logger.info("Reading the dataset")
for sample in dataset:
instance = self.text_to_instance(
sample["candidates"],
sample["query"],
sample["supports"],
@overrides
def forward(
self, # type: ignore
question: Dict[str, torch.LongTensor],
table: Dict[str, torch.LongTensor],
world: List[WikiTablesLanguage],
actions: List[List[ProductionRuleArray]],
target_values: List[List[str]] = None,
target_action_sequences: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
In this method we encode the table entities, link them to words in the question, then
encode the question. Then we set up the initial state for the decoder, and pass that
state off to either a DecoderTrainer, if we're training, or a BeamSearch for inference,
if we're not.