How to use tokenizers - 10 common examples

To help you get started, we’ve selected a few tokenizers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github singletongue / WikiEntVec / tokenizers.py View on Github external
for match in preserving_pattern.finditer(text)]

            for (snippet, matched_string) in zip(
                    preserving_pattern.split(text), matched_strings + [None]):
                if self._lower:
                    tokens += [t.lower() for t in self._tokenize(snippet)]
                else:
                    tokens += self._tokenize(snippet)

                if matched_string is not None:
                    tokens.append(matched_string)

        return tokens


class RegExpTokenizer(Tokenizer):
    def __init__(self, pattern=r'\w+|\S', lower=False):
        super(RegExpTokenizer, self).__init__(lower)
        self._regex = re.compile(pattern)

    def _tokenize(self, text):
        tokens = self._regex.findall(text)

        return tokens


class NLTKTokenizer(Tokenizer):
    def __init__(self, lower=False):
        super(NLTKTokenizer, self).__init__(lower)
        from nltk import word_tokenize
        self.word_tokenize = word_tokenize
github singletongue / WikiEntVec / tokenizers.py View on Github external
tokens = self._regex.findall(text)

        return tokens


class NLTKTokenizer(Tokenizer):
    def __init__(self, lower=False):
        super(NLTKTokenizer, self).__init__(lower)
        from nltk import word_tokenize
        self.word_tokenize = word_tokenize

    def _tokenize(self, text):
        return self.word_tokenize(text)


class MeCabTokenizer(Tokenizer):
    def __init__(self, dic=None, udic=None, lower=False):
        super(MeCabTokenizer, self).__init__(lower)
        import MeCab
        mecab_options = ['-O wakati']
        if dic:
            mecab_options.append(f'-d {dic}')
        if udic:
            mecab_options.append(f'-u {udic}')

        self._mt = MeCab.Tagger(' '.join(mecab_options))

    def _tokenize(self, text):
        return self._mt.parse(text).strip().split()
github huggingface / transformers / src / transformers / tokenization_bert.py View on Github external
pad_to_max_length=False,
        stride=0,
        truncation_strategy="longest_first",
        add_special_tokens=True,
        **kwargs
    ):
        super(BertTokenizerFast, self).__init__(
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
            **kwargs
        )

        self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))
        self._update_special_tokens()
        self._tokenizer.with_pre_tokenizer(
            tk.pre_tokenizers.BertPreTokenizer.new(
                do_basic_tokenize=do_basic_tokenize,
                do_lower_case=do_lower_case,
                tokenize_chinese_chars=tokenize_chinese_chars,
                never_split=never_split if never_split is not None else [],
            )
        )
        self._tokenizer.with_decoder(tk.decoders.WordPiece.new())

        if add_special_tokens:
            self._tokenizer.with_post_processor(
                tk.processors.BertProcessing.new(
                    (sep_token, self._tokenizer.token_to_id(sep_token)),
                    (cls_token, self._tokenizer.token_to_id(cls_token)),
github huggingface / transformers / src / transformers / tokenization_gpt2.py View on Github external
merges_file,
        unk_token="<|endoftext|>",
        bos_token="<|endoftext|>",
        eos_token="<|endoftext|>",
        pad_to_max_length=False,
        add_prefix_space=False,
        max_length=None,
        stride=0,
        truncation_strategy="longest_first",
        **kwargs
    ):
        super(GPT2TokenizerFast, self).__init__(
            bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
        )

        self._tokenizer = tk.Tokenizer(tk.models.BPE.from_files(vocab_file, merges_file))
        self._update_special_tokens()
        self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.ByteLevel.new(add_prefix_space=add_prefix_space))
        self._tokenizer.with_decoder(tk.decoders.ByteLevel.new())
        if max_length:
            self._tokenizer.with_truncation(max_length, stride=stride, strategy=truncation_strategy)
        self._tokenizer.with_padding(
            max_length=max_length if pad_to_max_length else None,
            direction=self.padding_side,
            pad_id=self.pad_token_id if self.pad_token_id is not None else 0,
            pad_type_id=self.pad_token_type_id,
            pad_token=self.pad_token if self.pad_token is not None else "",
        )
        self._decoder = tk.decoders.ByteLevel.new()
github singletongue / WikiEntVec / tokenizers.py View on Github external
return tokens


class RegExpTokenizer(Tokenizer):
    def __init__(self, pattern=r'\w+|\S', lower=False):
        super(RegExpTokenizer, self).__init__(lower)
        self._regex = re.compile(pattern)

    def _tokenize(self, text):
        tokens = self._regex.findall(text)

        return tokens


class NLTKTokenizer(Tokenizer):
    def __init__(self, lower=False):
        super(NLTKTokenizer, self).__init__(lower)
        from nltk import word_tokenize
        self.word_tokenize = word_tokenize

    def _tokenize(self, text):
        return self.word_tokenize(text)


class MeCabTokenizer(Tokenizer):
    def __init__(self, dic=None, udic=None, lower=False):
        super(MeCabTokenizer, self).__init__(lower)
        import MeCab
        mecab_options = ['-O wakati']
        if dic:
            mecab_options.append(f'-d {dic}')
github singletongue / WikiEntVec / generate_corpus.py View on Github external
help='dictionary for MeCab tokenizer')
    parser.add_argument('--mecab_udic', type=str, default=None,
                        help='user dictionary for MeCab tokenizer')
    args = parser.parse_args()

    if args.tokenizer == 'regexp':
        logging.info('tokenizer: RegExpTokenizer')
        tokenizer = tokenizers.RegExpTokenizer(lower=args.lower)
    elif args.tokenizer == 'nltk':
        logging.info('tokenizer: NLTKTokenizer')
        tokenizer = tokenizers.NLTKTokenizer(lower=args.lower)
    elif args.tokenizer == 'mecab':
        logging.info('tokenizer: MeCabTokenizer')
        logging.info(f'dictionary: {args.mecab_dic}')
        logging.info(f'user dictionary: {args.mecab_udic}')
        tokenizer = tokenizers.MeCabTokenizer(
            dic=args.mecab_dic, udic=args.mecab_udic, lower=args.lower)
    else:
        raise Exception('Undefined tokenizer type.')

    logging.info('generating corpus for training')
    n_processed = 0
    with gzip.open(args.cirrus_file, 'rt') as fi, \
         bz2.open(args.out_file, 'wt') as fo:
        for line in fi:
            article = json.loads(line)
            if 'title' not in article:
                continue

            title = article['title']
            text = regex_spaces.sub(r' ', article['text'])
github singletongue / WikiEntVec / tokenizers.py View on Github external
def __init__(self, dic=None, udic=None, lower=False):
        super(MeCabTokenizer, self).__init__(lower)
        import MeCab
        mecab_options = ['-O wakati']
        if dic:
            mecab_options.append(f'-d {dic}')
        if udic:
            mecab_options.append(f'-u {udic}')

        self._mt = MeCab.Tagger(' '.join(mecab_options))
github singletongue / WikiEntVec / tokenizers.py View on Github external
def __init__(self, lower=False):
        super(NLTKTokenizer, self).__init__(lower)
        from nltk import word_tokenize
        self.word_tokenize = word_tokenize
github singletongue / WikiEntVec / generate_corpus.py View on Github external
default='regexp',
                        help='type of tokenizer [regexp]')
    parser.add_argument('--lower', action='store_true',
                        help='lowercase words (not applied to NEs)')
    parser.add_argument('--mecab_dic', type=str, default=None,
                        help='dictionary for MeCab tokenizer')
    parser.add_argument('--mecab_udic', type=str, default=None,
                        help='user dictionary for MeCab tokenizer')
    args = parser.parse_args()

    if args.tokenizer == 'regexp':
        logging.info('tokenizer: RegExpTokenizer')
        tokenizer = tokenizers.RegExpTokenizer(lower=args.lower)
    elif args.tokenizer == 'nltk':
        logging.info('tokenizer: NLTKTokenizer')
        tokenizer = tokenizers.NLTKTokenizer(lower=args.lower)
    elif args.tokenizer == 'mecab':
        logging.info('tokenizer: MeCabTokenizer')
        logging.info(f'dictionary: {args.mecab_dic}')
        logging.info(f'user dictionary: {args.mecab_udic}')
        tokenizer = tokenizers.MeCabTokenizer(
            dic=args.mecab_dic, udic=args.mecab_udic, lower=args.lower)
    else:
        raise Exception('Undefined tokenizer type.')

    logging.info('generating corpus for training')
    n_processed = 0
    with gzip.open(args.cirrus_file, 'rt') as fi, \
         bz2.open(args.out_file, 'wt') as fo:
        for line in fi:
            article = json.loads(line)
            if 'title' not in article:
github singletongue / WikiEntVec / tokenizers.py View on Github external
def __init__(self, pattern=r'\w+|\S', lower=False):
        super(RegExpTokenizer, self).__init__(lower)
        self._regex = re.compile(pattern)