How to use the tokenizers.Tokenizer function in tokenizers

To help you get started, we’ve selected a few tokenizers examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github singletongue / WikiEntVec / tokenizers.py View on Github external
for match in preserving_pattern.finditer(text)]

            for (snippet, matched_string) in zip(
                    preserving_pattern.split(text), matched_strings + [None]):
                if self._lower:
                    tokens += [t.lower() for t in self._tokenize(snippet)]
                else:
                    tokens += self._tokenize(snippet)

                if matched_string is not None:
                    tokens.append(matched_string)

        return tokens


class RegExpTokenizer(Tokenizer):
    def __init__(self, pattern=r'\w+|\S', lower=False):
        super(RegExpTokenizer, self).__init__(lower)
        self._regex = re.compile(pattern)

    def _tokenize(self, text):
        tokens = self._regex.findall(text)

        return tokens


class NLTKTokenizer(Tokenizer):
    def __init__(self, lower=False):
        super(NLTKTokenizer, self).__init__(lower)
        from nltk import word_tokenize
        self.word_tokenize = word_tokenize
github singletongue / WikiEntVec / tokenizers.py View on Github external
tokens = self._regex.findall(text)

        return tokens


class NLTKTokenizer(Tokenizer):
    def __init__(self, lower=False):
        super(NLTKTokenizer, self).__init__(lower)
        from nltk import word_tokenize
        self.word_tokenize = word_tokenize

    def _tokenize(self, text):
        return self.word_tokenize(text)


class MeCabTokenizer(Tokenizer):
    def __init__(self, dic=None, udic=None, lower=False):
        super(MeCabTokenizer, self).__init__(lower)
        import MeCab
        mecab_options = ['-O wakati']
        if dic:
            mecab_options.append(f'-d {dic}')
        if udic:
            mecab_options.append(f'-u {udic}')

        self._mt = MeCab.Tagger(' '.join(mecab_options))

    def _tokenize(self, text):
        return self._mt.parse(text).strip().split()
github singletongue / WikiEntVec / tokenizers.py View on Github external
return tokens


class RegExpTokenizer(Tokenizer):
    def __init__(self, pattern=r'\w+|\S', lower=False):
        super(RegExpTokenizer, self).__init__(lower)
        self._regex = re.compile(pattern)

    def _tokenize(self, text):
        tokens = self._regex.findall(text)

        return tokens


class NLTKTokenizer(Tokenizer):
    def __init__(self, lower=False):
        super(NLTKTokenizer, self).__init__(lower)
        from nltk import word_tokenize
        self.word_tokenize = word_tokenize

    def _tokenize(self, text):
        return self.word_tokenize(text)


class MeCabTokenizer(Tokenizer):
    def __init__(self, dic=None, udic=None, lower=False):
        super(MeCabTokenizer, self).__init__(lower)
        import MeCab
        mecab_options = ['-O wakati']
        if dic:
            mecab_options.append(f'-d {dic}')