How to use the text2vec.bert.tokenization function in text2vec

To help you get started, we’ve selected a few text2vec examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github shibing624 / text2vec / text2vec / bert / model.py View on Github external
def get_sentence_examples(self, questions, prefix):
        data_list = []
        for index, data in enumerate(questions):
            data = data.strip().split('\t')
            guid = '%s-%d' % (prefix, index)
            text_a = tokenization.convert_to_unicode(str(data[0]))
            text_b = tokenization.convert_to_unicode(str(data[1]))
            label = str(data[2])
            data_list.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return data_list
github shibing624 / text2vec / text2vec / bert / model.py View on Github external
def get_sentence_examples(self, questions, prefix):
        data_list = []
        for index, data in enumerate(questions):
            data = data.strip().split('\t')
            guid = '%s-%d' % (prefix, index)
            text_a = tokenization.convert_to_unicode(str(data[0]))
            text_b = tokenization.convert_to_unicode(str(data[1]))
            label = str(data[2])
            data_list.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
        return data_list
github shibing624 / text2vec / text2vec / bert / extract_feature.py View on Github external
def _to_example(sentences):
        import re
        """
        sentences to InputExample
        :param sentences: list of strings
        :return: list of InputExample
        """
        unique_id = 0
        for ss in sentences:
            line = tokenization.convert_to_unicode(ss)
            if not line:
                continue
            line = line.strip()
            text_a = None
            text_b = None
            m = re.match(r"^(.*) \|\|\| (.*)$", line)
            if m is None:
                text_a = line
            else:
                text_a = m.group(1)
                text_b = m.group(2)
            yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)
            unique_id += 1
github shibing624 / text2vec / text2vec / bert / extract_feature.py View on Github external
input_masks.append(input_mask)
            # Zero-pad up to the sequence length.
            while len(input_ids) < seq_length:
                input_ids.append(0)
                input_mask.append(0)
                input_type_ids.append(0)

            assert len(input_ids) == seq_length
            assert len(input_mask) == seq_length
            assert len(input_type_ids) == seq_length

            if ex_index < 5:
                tf.logging.info("*** Example ***")
                tf.logging.info("unique_id: %s" % (example.unique_id))
                tf.logging.info("tokens: %s" % " ".join(
                    [tokenization.printable_text(x) for x in tokens]))
                tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
                tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
                tf.logging.info(
                    "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))

            yield InputFeatures(
                unique_id=example.unique_id,
                tokens=tokens,
                input_ids=input_ids,
                input_mask=input_mask,
                input_type_ids=input_type_ids)
github shibing624 / text2vec / text2vec / bert / model.py View on Github external
# Zero-pad up to the sequence length.
        while len(input_ids) < max_seq_length:
            input_ids.append(0)
            input_mask.append(0)
            segment_ids.append(0)

        assert len(input_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length

        label_id = label_map[example.label]
        if ex_index < 5:
            tf.logging.info("*** Example ***")
            tf.logging.info("guid: %s" % (example.guid))
            tf.logging.info("tokens: %s" % " ".join(
                [tokenization.printable_text(x) for x in tokens]))
            tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
            tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
            tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
            tf.logging.info("label: %s (id = %d)" % (example.label, label_id))

        feature = InputFeatures(
            input_ids=input_ids,
            input_mask=input_mask,
            segment_ids=segment_ids,
            label_id=label_id)
        return feature
github shibing624 / text2vec / text2vec / bert / extract_feature.py View on Github external
def __init__(self, max_seq_len=32, batch_size=32, layer_indexes=[-2], model_dir='', output_dir=''):
        """
        init BertVector
        :param batch_size:     Depending on your memory default is 32
        """
        self.max_seq_length = max_seq_len
        self.layer_indexes = layer_indexes
        self.gpu_memory_fraction = 1
        self.model_dir = model_dir
        vocab_file = os.path.join(model_dir, 'vocab.txt')
        config_name = os.path.join(model_dir, 'bert_config.json')
        ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
        self.graph_path = optimize_graph(layer_indexes=layer_indexes, config_name=config_name, ckpt_name=ckpt_name,
                                         max_seq_len=max_seq_len, output_dir=output_dir)
        self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=True)
        self.batch_size = batch_size
        self.estimator = self.get_estimator()
        self.input_queue = Queue(maxsize=1)
        self.output_queue = Queue(maxsize=1)
        self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
        self.predict_thread.start()
        self.sentence_len = 0
github shibing624 / text2vec / text2vec / bert / model.py View on Github external
def __init__(self, data_dir='', model_dir='', output_dir='', batch_size=128, max_seq_len=32,
                 num_train_epochs=10, learning_rate=0.00005, gpu_memory_fraction=0.9):
        self.model_dir = model_dir
        self.vocab_file = os.path.join(model_dir, 'vocab.txt')
        self.config_name = os.path.join(model_dir, 'bert_config.json')
        self.ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
        self.output_dir = output_dir
        self.mode = None
        self.max_seq_len = max_seq_len
        self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_file, do_lower_case=True)
        self.batch_size = batch_size
        self.estimator = None
        self.processor = TrainProcessor()
        tf.logging.set_verbosity(tf.logging.INFO)
        self.learning_rate = learning_rate

        self.data_dir = data_dir
        self.num_train_epochs = num_train_epochs
        self.gpu_memory_fraction = gpu_memory_fraction