How to use the tokenizers.pre_tokenizers function in tokenizers

To help you get started, we’ve selected a few tokenizers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github huggingface / transformers / src / transformers / tokenization_bert.py View on Github external
add_special_tokens=True,
        **kwargs
    ):
        super(BertTokenizerFast, self).__init__(
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
            **kwargs
        )

        self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))
        self._update_special_tokens()
        self._tokenizer.with_pre_tokenizer(
            tk.pre_tokenizers.BertPreTokenizer.new(
                do_basic_tokenize=do_basic_tokenize,
                do_lower_case=do_lower_case,
                tokenize_chinese_chars=tokenize_chinese_chars,
                never_split=never_split if never_split is not None else [],
            )
        )
        self._tokenizer.with_decoder(tk.decoders.WordPiece.new())

        if add_special_tokens:
            self._tokenizer.with_post_processor(
                tk.processors.BertProcessing.new(
                    (sep_token, self._tokenizer.token_to_id(sep_token)),
                    (cls_token, self._tokenizer.token_to_id(cls_token)),
                )
            )
        if max_length is not None:
github huggingface / transformers / src / transformers / tokenization_gpt2.py View on Github external
bos_token="<|endoftext|>",
        eos_token="<|endoftext|>",
        pad_to_max_length=False,
        add_prefix_space=False,
        max_length=None,
        stride=0,
        truncation_strategy="longest_first",
        **kwargs
    ):
        super(GPT2TokenizerFast, self).__init__(
            bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
        )

        self._tokenizer = tk.Tokenizer(tk.models.BPE.from_files(vocab_file, merges_file))
        self._update_special_tokens()
        self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.ByteLevel.new(add_prefix_space=add_prefix_space))
        self._tokenizer.with_decoder(tk.decoders.ByteLevel.new())
        if max_length:
            self._tokenizer.with_truncation(max_length, stride=stride, strategy=truncation_strategy)
        self._tokenizer.with_padding(
            max_length=max_length if pad_to_max_length else None,
            direction=self.padding_side,
            pad_id=self.pad_token_id if self.pad_token_id is not None else 0,
            pad_type_id=self.pad_token_type_id,
            pad_token=self.pad_token if self.pad_token is not None else "",
        )
        self._decoder = tk.decoders.ByteLevel.new()