Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def verify_l2_normalize(input_dim, eps):
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
b_np = topi.testing.l2_normalize_python(a_np, eps, 1)
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input, output)
builder.add_l2_normalize(name='L2', epsilon=eps, input_name='input', output_name='output')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
OC, _, KH, KW = filter
a_np = np.random.uniform(size=input_dim).astype(dtype)
w_np = np.random.uniform(size=(OC, C, KH, KW)).astype(dtype)
w_np_cm = np.transpose(w_np, axes=(2, 3, 1, 0))
b_np = conv2d_nchw_python(a_np, w_np, [1, 1], padding)
inputs = [('input1', datatypes.Array(C, H, W))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_convolution(name='conv', kernel_channels=3, output_channels=OC,
height=KH, width=KW, stride_height=1, stride_width=1,
border_mode=padding.lower(), groups=1,
W=w_np_cm, b=None, has_bias=False,
is_deconv=False,
input_name='input1',
output_name='output')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np],
['input1'], output_shape=None)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.max((a_np1, a_np2, a_np3), axis=0)
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim)),
('input3', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Max',
input_names=['input1', 'input2', 'input3'],
output_name='output',
mode='MAX')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, target, ctx, [a_np1, a_np2, a_np3],
['input1', 'input2', 'input3'], b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
model = LogisticRegression()
model.fit(iris.data, iris.target)
# Make a prediction
print 'prediction with scikit model:'
print iris.target_names[model.predict([[1.0, 2.0, 2.0, 3.0]])]
# Dumping the model with joblib for comparison
joblib.dump(model, 'iris.pkl')
# Export and save the CoreML model
coreml_model = coremltools.converters.sklearn.convert(model, iris.feature_names, 'iris class')
coreml_model.save('iris.mlmodel')
# Load back the model
loaded_model = coremltools.models.MLModel('iris.mlmodel')
# You can check the model's specifications
print loaded_model.get_spec()
input_data = {
'sepal length (cm)': 1.0,
'sepal width (cm)': 2.0,
'petal length (cm)': 2.0,
'petal width (cm)': 3.0
}
print 'prediction with coreml model:'
print loaded_model.predict(input_data)
# Softmax
builder.add_inner_product(name='dense_layer1',
W=weights['dense1_weight'], b=weights['dense1_bias'],
has_bias=True,
input_channels=_net_params['dense_h'],
output_channels=self.num_classes,
input_name='dense1_in', output_name='softmax_in')
builder.add_softmax(name=prob_name,
input_name='softmax_in',
output_name=prob_name)
labels = list(map(str, sorted(self._target_id_map.keys())))
builder.set_class_labels(labels)
mlmodel = _cmt.models.MLModel(builder.spec)
model_type = 'activity classifier'
mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)
# Add useful information to the mlmodel
features_str = ', '.join(self.features)
mlmodel.input_description['features'] = u'Window \xd7 [%s]' % features_str
mlmodel.input_description['lstm_h_in'] = 'LSTM hidden state input'
mlmodel.input_description['lstm_c_in'] = 'LSTM cell state input'
mlmodel.output_description[prob_name] = 'Activity prediction probabilities'
mlmodel.output_description['classLabel'] = 'Class label of top prediction'
mlmodel.output_description['lstm_h_out'] = 'LSTM hidden state output'
mlmodel.output_description['lstm_c_out'] = 'LSTM cell state output'
_coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {
'prediction_window': str(self.prediction_window),
'session_id': self.session_id,
'target': self.target,
'features': ','.join(self.features),
output_name='v^2-2vu+u^2')
# v^2-2vu+u^2=(v-u)^2 is non-negative but some computations on GPU may result in
# small negative values. Apply RELU so we don't take the square root of negative values.
builder.add_activation('relu', non_linearity='RELU',
input_name='v^2-2vu+u^2', output_name='relu')
builder.add_unary('sqrt', mode='sqrt', input_name='relu', output_name=output_name)
# Finalize model
if self.model != 'VisionFeaturePrint_Scene':
_mxnet_converter._set_input_output_layers(builder, [input_name], [output_name])
builder.set_input([input_name], [self.input_image_shape])
builder.set_output([output_name], [(num_examples,)])
_cmt.models.utils.rename_feature(builder.spec, input_name, self.feature)
builder.set_pre_processing_parameters(image_input_names=self.feature)
mlmodel = _cmt.models.MLModel(builder.spec)
else:
top_spec.pipeline.models.extend([builder.spec])
mlmodel = _cmt.models.MLModel(top_spec)
# Add metadata
model_type = 'image similarity'
mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)
mlmodel.input_description[self.feature] = u'Input image'
mlmodel.output_description[output_name] = u'Distances between the input and reference images'
_coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {
'model': self.model,
'num_examples': str(self.num_examples)
}, version=ImageSimilarityModel._PYTHON_IMAGE_SIMILARITY_VERSION)
mlmodel.save(filename)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
if preprocessor_args is not None:
builder.set_pre_processing_parameters(**preprocessor_args)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels))
builder.set_class_labels(class_labels = labels)
# Return the model
return _coremltools.models.MLModel(builder.spec)
nn_spec.stringClassLabels.vector.append(c)
prob_name = self.target + 'Probability'
label_name = self.target
old_output_name = nn_spec.layers[-1].name
coremltools.models.utils.rename_feature(spec, 'classLabel', label_name)
coremltools.models.utils.rename_feature(spec, old_output_name, prob_name)
if nn_spec.layers[-1].name == old_output_name:
nn_spec.layers[-1].name = prob_name
if nn_spec.labelProbabilityLayerName == old_output_name:
nn_spec.labelProbabilityLayerName = prob_name
coremltools.models.utils.rename_feature(spec, 'data', self.feature)
if len(nn_spec.preprocessing) > 0:
nn_spec.preprocessing[0].featureName = self.feature
mlmodel = coremltools.models.MLModel(spec)
model_type = 'image classifier (%s)' % self.model
mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)
mlmodel.input_description[self.feature] = u'Input image'
mlmodel.output_description[prob_name] = 'Prediction probabilities'
mlmodel.output_description[label_name] = 'Class label of top prediction'
model_metadata = {
'model': self.model,
'target': self.target,
'features': self.feature,
'max_iterations': str(self.max_iterations),
}
user_defined_metadata = model_metadata.update(
_coreml_utils._get_tc_version_info())
_coreml_utils._set_model_metadata(mlmodel,
self.__class__.__name__,
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.xgboost.convert(model)
# Saving the Core ML model to a file.
>>> coremltools.save('my_model.mlmodel')
"""
return _MLModel(_convert_tree_ensemble(
model,
feature_names,
target,
force_32bit_float = force_32bit_float,
mode=mode,
class_labels=class_labels,
n_classes=n_classes,
))