Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
#coding:utf-8
import paddlehub as hub
simnet_bow = hub.Module(name="simnet_bow")
test_text_1 = ["这道题太难了", "这道题太难了", "这道题太难了"]
test_text_2 = ["这道题是上一年的考题", "这道题不简单", "这道题很有意思"]
inputs = {"text_1": test_text_1, "text_2": test_text_2}
results = simnet_bow.similarity(data=inputs)
max_score = -1
result_text = ""
for result in results:
if result['similarity'] > max_score:
max_score = result['similarity']
result_text = result['text_2']
print("The most matching with the %s is %s" % (test_text_1[0], result_text))
def test_convert_l1_regularizer(self):
program = fluid.Program()
with fluid.program_guard(program):
input = fluid.layers.data(name="test", shape=[1], dtype="float32")
fluid.layers.fc(
input=input,
size=10,
param_attr=fluid.ParamAttr(
name="fc_w",
regularizer=fluid.regularizer.L1Decay(
regularization_coeff=1)))
fc_w = [
param for param in
fluid.default_main_program().global_block().iter_parameters()
][0]
flexible_data = module_desc_pb2.FlexibleData()
from_param_to_flexible_data(fc_w, flexible_data)
param_dict = from_flexible_data_to_param(flexible_data)
assert fc_w.regularizer.__class__ == param_dict[
'regularizer'].__class__, "regularzier type convert error!"
assert fc_w.regularizer._regularization_coeff == param_dict[
'regularizer']._regularization_coeff, "regularzier value convert error!"
def test_convert_trainable(self):
program = fluid.Program()
with fluid.program_guard(program):
input = fluid.layers.data(name="test", shape=[1], dtype="float32")
fluid.layers.fc(
input=input,
size=10,
param_attr=fluid.ParamAttr(name="fc_w", trainable=False))
fc_w = [
param for param in
fluid.default_main_program().global_block().iter_parameters()
][0]
flexible_data = module_desc_pb2.FlexibleData()
from_param_to_flexible_data(fc_w, flexible_data)
param_dict = from_flexible_data_to_param(flexible_data)
assert fc_w.trainable.__class__ == param_dict[
'trainable'].__class__, "trainable type convert error!"
assert fc_w.trainable == param_dict[
'trainable'], "trainable value convert error!"
def test_convert_l2_regularizer(self):
program = fluid.Program()
with fluid.program_guard(program):
input = fluid.layers.data(name="test", shape=[1], dtype="float32")
fluid.layers.fc(
input=input,
size=10,
param_attr=fluid.ParamAttr(
name="fc_w",
regularizer=fluid.regularizer.L2Decay(
regularization_coeff=1.5)))
fc_w = [
param for param in
fluid.default_main_program().global_block().iter_parameters()
][0]
flexible_data = module_desc_pb2.FlexibleData()
from_param_to_flexible_data(fc_w, flexible_data)
param_dict = from_flexible_data_to_param(flexible_data)
assert fc_w.regularizer.__class__ == param_dict[
'regularizer'].__class__, "regularzier type convert error!"
assert fc_w.regularizer._regularization_coeff == param_dict[
'regularizer']._regularization_coeff, "regularzier value convert error!"
def test_list_2_flexible_data(self):
input = [1, 2, 3]
flexible_data = module_desc_pb2.FlexibleData()
from_pyobj_to_flexible_data(input, flexible_data)
assert flexible_data.type == module_desc_pb2.LIST, "type conversion error"
assert len(
flexible_data.list.data) == len(input), "value convesion error"
for index in range(len(input)):
_check_int(input[index], flexible_data.list.data[str(index)])
def test_convert_error_clip_by_value(self):
program = fluid.Program()
with fluid.program_guard(program):
input = fluid.layers.data(name="test", shape=[1], dtype="float32")
fluid.layers.fc(
input=input,
size=10,
param_attr=fluid.ParamAttr(
name="fc_w",
gradient_clip=fluid.clip.ErrorClipByValue(max=1)))
fc_w = [
param for param in
fluid.default_main_program().global_block().iter_parameters()
][0]
flexible_data = module_desc_pb2.FlexibleData()
from_param_to_flexible_data(fc_w, flexible_data)
param_dict = from_flexible_data_to_param(flexible_data)
assert fc_w.gradient_clip_attr.__class__ == param_dict[
'gradient_clip_attr'].__class__, "clip type convert error!"
assert fc_w.gradient_clip_attr.max == param_dict[
'gradient_clip_attr'].max, "clip value convert error!"
assert fc_w.gradient_clip_attr.min == param_dict[
'gradient_clip_attr'].min, "clip value convert error!"
def test_convert_str(self):
input = "123"
flexible_data = module_desc_pb2.FlexibleData()
from_pyobj_to_flexible_data(input, flexible_data)
output = from_flexible_data_to_pyobj(flexible_data)
assert input == output, "str convesion error"
def test_convert_gradient_clip_by_global_normal(self):
program = fluid.Program()
with fluid.program_guard(program):
input = fluid.layers.data(name="test", shape=[1], dtype="float32")
fluid.layers.fc(
input=input,
size=10,
param_attr=fluid.ParamAttr(
name="fc_w",
gradient_clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=1)))
fc_w = [
param for param in
fluid.default_main_program().global_block().iter_parameters()
][0]
flexible_data = module_desc_pb2.FlexibleData()
from_param_to_flexible_data(fc_w, flexible_data)
param_dict = from_flexible_data_to_param(flexible_data)
assert fc_w.gradient_clip_attr.__class__ == param_dict[
'gradient_clip_attr'].__class__, "clip type convert error!"
assert fc_w.gradient_clip_attr.clip_norm == param_dict[
'gradient_clip_attr'].clip_norm, "clip value convert error!"
assert fc_w.gradient_clip_attr.group_name == param_dict[
'gradient_clip_attr'].group_name, "clip value convert error!"
def test_dict_2_flexible_data(self):
input = {1: 1, 2: 2, 3: 3}
flexible_data = module_desc_pb2.FlexibleData()
from_pyobj_to_flexible_data(input, flexible_data)
assert flexible_data.type == module_desc_pb2.MAP, "type conversion error"
assert len(
flexible_data.map.data) == len(input), "value convesion error"
for key, value in flexible_data.map.data.items():
realkey = get_pykey(key, flexible_data.map.keyType[key])
assert realkey in input, "key convesion error"
_check_int(input[realkey], flexible_data.map.data[key])
def test_convert_int(self):
input = 1
flexible_data = module_desc_pb2.FlexibleData()
from_pyobj_to_flexible_data(input, flexible_data)
output = from_flexible_data_to_pyobj(flexible_data)
assert input == output, "int convesion error"