How to use the tokenizers.decoders.ByteLevel function in tokenizers

To help you get started, we’ve selected a few tokenizers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github huggingface / transformers / src / transformers / tokenization_gpt2.py View on Github external
eos_token="<|endoftext|>",
        pad_to_max_length=False,
        add_prefix_space=False,
        max_length=None,
        stride=0,
        truncation_strategy="longest_first",
        **kwargs
    ):
        super(GPT2TokenizerFast, self).__init__(
            bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
        )

        self._tokenizer = tk.Tokenizer(tk.models.BPE.from_files(vocab_file, merges_file))
        self._update_special_tokens()
        self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.ByteLevel.new(add_prefix_space=add_prefix_space))
        self._tokenizer.with_decoder(tk.decoders.ByteLevel.new())
        if max_length:
            self._tokenizer.with_truncation(max_length, stride=stride, strategy=truncation_strategy)
        self._tokenizer.with_padding(
            max_length=max_length if pad_to_max_length else None,
            direction=self.padding_side,
            pad_id=self.pad_token_id if self.pad_token_id is not None else 0,
            pad_type_id=self.pad_token_type_id,
            pad_token=self.pad_token if self.pad_token is not None else "",
        )
        self._decoder = tk.decoders.ByteLevel.new()
github huggingface / transformers / src / transformers / tokenization_gpt2.py View on Github external
)

        self._tokenizer = tk.Tokenizer(tk.models.BPE.from_files(vocab_file, merges_file))
        self._update_special_tokens()
        self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.ByteLevel.new(add_prefix_space=add_prefix_space))
        self._tokenizer.with_decoder(tk.decoders.ByteLevel.new())
        if max_length:
            self._tokenizer.with_truncation(max_length, stride=stride, strategy=truncation_strategy)
        self._tokenizer.with_padding(
            max_length=max_length if pad_to_max_length else None,
            direction=self.padding_side,
            pad_id=self.pad_token_id if self.pad_token_id is not None else 0,
            pad_type_id=self.pad_token_type_id,
            pad_token=self.pad_token if self.pad_token is not None else "",
        )
        self._decoder = tk.decoders.ByteLevel.new()