How to use the hanlp.load function in hanlp

To help you get started, we’ve selected a few hanlp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hankcs / HanLP / tests / demo / zh / demo_pos.py View on Github external
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 21:25
import hanlp
from hanlp.pretrained.pos import CTB5_POS_RNN_FASTTEXT_ZH
tagger = hanlp.load(CTB5_POS_RNN_FASTTEXT_ZH)
print(tagger.predict(['我', '的', '希望', '是', '希望', '和平']))
print(tagger.predict([['支持', '批处理'], ['速度', '更', '快']]))
github hankcs / HanLP / tests / demo / zh / demo_pipeline.py View on Github external
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-31 03:24
import hanlp

tokenizer = hanlp.load('PKU_NAME_MERGED_SIX_MONTHS_CONVSEG')
tagger = hanlp.load('CTB5_POS_RNN_FASTTEXT_ZH')
syntactic_parser = hanlp.load('CTB7_BIAFFINE_DEP_ZH')
semantic_parser = hanlp.load('SEMEVAL16_TEXT_BIAFFINE_ZH')

pipeline = hanlp.pipeline() \
    .append(hanlp.utils.rules.split_sentence, output_key='sentences') \
    .append(tokenizer, output_key='tokens') \
    .append(tagger, output_key='part_of_speech_tags') \
    .append(syntactic_parser, input_key=('tokens', 'part_of_speech_tags'), output_key='syntactic_dependencies') \
    .append(semantic_parser, input_key=('tokens', 'part_of_speech_tags'), output_key='semantic_dependencies')
print(pipeline)

text = '''HanLP是一系列模型与算法组成的自然语言处理工具包,目标是普及自然语言处理在生产环境中的应用。
HanLP具备功能完善、性能高效、架构清晰、语料时新、可自定义的特点。
内部算法经过工业界和学术界考验,配套书籍《自然语言处理入门》已经出版。
'''
github hankcs / HanLP / tests / demo / zh / demo_pipeline.py View on Github external
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-31 03:24
import hanlp

tokenizer = hanlp.load('PKU_NAME_MERGED_SIX_MONTHS_CONVSEG')
tagger = hanlp.load('CTB5_POS_RNN_FASTTEXT_ZH')
syntactic_parser = hanlp.load('CTB7_BIAFFINE_DEP_ZH')
semantic_parser = hanlp.load('SEMEVAL16_TEXT_BIAFFINE_ZH')

pipeline = hanlp.pipeline() \
    .append(hanlp.utils.rules.split_sentence, output_key='sentences') \
    .append(tokenizer, output_key='tokens') \
    .append(tagger, output_key='part_of_speech_tags') \
    .append(syntactic_parser, input_key=('tokens', 'part_of_speech_tags'), output_key='syntactic_dependencies') \
    .append(semantic_parser, input_key=('tokens', 'part_of_speech_tags'), output_key='semantic_dependencies')
print(pipeline)

text = '''HanLP是一系列模型与算法组成的自然语言处理工具包,目标是普及自然语言处理在生产环境中的应用。
HanLP具备功能完善、性能高效、架构清晰、语料时新、可自定义的特点。
内部算法经过工业界和学术界考验,配套书籍《自然语言处理入门》已经出版。
'''

doc = pipeline(text)
github hankcs / HanLP / tests / debug / debug_bert_ner.py View on Github external
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-01-09 00:06
import hanlp

recognizer = hanlp.load(hanlp.pretrained.ner.MSRA_NER_BERT_BASE_ZH)
print(recognizer([list('孽债 (上海话)')]))
print(recognizer(['超', '长'] * 256))
github hankcs / HanLP / tests / demo / zh / demo_sdp.py View on Github external
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-31 23:55
import hanlp

semantic_parser = hanlp.load('SEMEVAL16_NEWS_BIAFFINE_ZH')
sent = [('蜡烛', 'NN'), ('两', 'CD'), ('头', 'NN'), ('烧', 'VV')]
print(semantic_parser(sent))