Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
X = operator.inputs[0]
out = operator.outputs
op = operator.raw_operator
C = op.cluster_centers_
C2 = row_norms(C, squared=True)
rs = OnnxReduceSumSquare(
X, axes=[1], keepdims=1,
op_version=onnx.defs.onnx_opset_version())
N = X.type.shape[0]
if isinstance(N, int):
zeros = np.zeros((N, ))
else:
zeros = OnnxMul(
rs, np.array([0], dtype=np.float32),
op_version=onnx.defs.onnx_opset_version())
z = OnnxAdd(
rs,
OnnxGemm(
X, C, zeros, alpha=-2., transB=1,
op_version=onnx.defs.onnx_opset_version()),
op_version=onnx.defs.onnx_opset_version())
y2 = OnnxAdd(C2, z, op_version=onnx.defs.onnx_opset_version())
lo = OnnxArgMin(
y2, axis=1, keepdims=0, output_names=out[:1],
op_version=onnx.defs.onnx_opset_version())
y2s = OnnxSqrt(
y2, output_names=out[1:],
op_version=onnx.defs.onnx_opset_version())
def nmf_to_onnx(W, H):
"""
The function converts a NMF described by matrices
*W*, *H* (*WH* approximate training data *M*).
into a function which takes two indices *(i, j)*
and returns the predictions for it. It assumes
these indices applies on the training data.
"""
col = OnnxArrayFeatureExtractor(H, 'col')
row = OnnxArrayFeatureExtractor(W.T, 'row')
dot = OnnxMul(col, row)
res = OnnxReduceSum(dot, output_names="rec")
indices_type = np.array([0], dtype=np.int64)
onx = res.to_onnx(inputs={'col': indices_type,
'row': indices_type},
outputs=[('rec', FloatTensorType((None, 1)))])
return onx
def test_mul(self):
from skl2onnx.algebra.onnx_ops import OnnxMul
assert OnnxMul.operator_name == 'Mul'
assert isinstance(
OnnxMul(
'a', 'b', op_version=onnx.defs.onnx_opset_version()),
OnnxOperator)
"They cannot be computed here as the same operation "
"(matrix inversion) produces too many discrepencies "
"if done with single floats than double floats.")
_K_inv = op._K_inv
# y_var = self.kernel_.diag(X)
y_var = convert_kernel_diag(kernel, X, dtype=dtype,
optim=options.get('optim', None),
op_version=opv)
# y_var -= np.einsum("ij,ij->i",
# np.dot(K_trans, self._K_inv), K_trans)
k_dot = OnnxMatMul(k_trans, _K_inv.astype(dtype), op_version=opv)
ys_var = OnnxSub(
y_var, OnnxReduceSum(
OnnxMul(k_dot, k_trans, op_version=opv),
axes=[1], keepdims=0, op_version=opv),
op_version=opv)
# y_var_negative = y_var < 0
# if np.any(y_var_negative):
# y_var[y_var_negative] = 0.0
ys0_var = OnnxMax(ys_var, np.array([0], dtype=dtype),
op_version=opv)
# var = np.sqrt(ys0_var)
var = OnnxSqrt(ys0_var, output_names=out[1:], op_version=opv)
var.set_onnx_name_prefix('gprv')
outputs.append(var)
for o in outputs:
o.add_to(scope, container)
def get_proba_and_label(container, nb_classes, reshaped,
wei, axis, opv):
"""
This function calculates the label by choosing majority label
amongst the nearest neighbours.
"""
conc = []
for cl in range(nb_classes):
cst = np.array([cl], dtype=np.int64)
mat_cast = OnnxCast(
OnnxEqual(reshaped, cst, op_version=opv),
op_version=opv,
to=container.proto_dtype)
if wei is not None:
mat_cast = OnnxMul(mat_cast, wei, op_version=opv)
wh = OnnxReduceSum(mat_cast, axes=[1], op_version=opv)
conc.append(wh)
all_together = OnnxConcat(*conc, axis=1, op_version=opv)
sum_prob = OnnxReduceSum(
all_together, axes=[1], op_version=opv, keepdims=1)
res = OnnxArgMax(all_together, axis=axis, op_version=opv,
keepdims=0)
return all_together, sum_prob, res
dists = onnx_cdist(
X, Y, metric="euclidean", dtype=dtype, op_version=op_version)
elif optim == 'cdist':
dists = OnnxCDist(X, Y, metric="euclidean", op_version=op_version)
else:
raise ValueError("Unknown optimization '{}'.".format(optim))
t_pi = py_make_float_array(pi, dtype=dtype)
t_periodicity = py_make_float_array(periodicity, dtype)
arg = OnnxMul(OnnxDiv(dists, t_periodicity, op_version=op_version),
t_pi, op_version=op_version)
sin_of_arg = OnnxSin(arg, op_version=op_version)
t_2 = py_make_float_array(2, dtype=dtype)
t__2 = py_make_float_array(-2, dtype=dtype)
t_length_scale = py_make_float_array(length_scale, dtype=dtype)
K = OnnxExp(
OnnxMul(
OnnxPow(
OnnxDiv(sin_of_arg, t_length_scale, op_version=op_version),
t_2, op_version=op_version),
t__2, op_version=op_version),
op_version=op_version)
return OnnxIdentity(K, op_version=op_version, **kwargs)
"""
if optim == 'cdist':
from skl2onnx.algebra.custom_ops import OnnxCDist
dist = OnnxCDist(X, Y, metric=metric, op_version=op_version,
**kwargs)
elif optim is None:
dim_in = Y.shape[1] if hasattr(Y, 'shape') else None
dim_out = Y.shape[0] if hasattr(Y, 'shape') else None
dist = onnx_cdist(X, Y, metric=metric, dtype=dtype,
op_version=op_version,
dim_in=dim_in, dim_out=dim_out,
**kwargs)
else:
raise ValueError("Unknown optimisation '{}'.".format(optim))
if op_version < 10:
neg_dist = OnnxMul(dist, np.array(
[-1], dtype=dtype), op_version=op_version)
node = OnnxTopK_1(neg_dist, k=k, op_version=1, **kwargs)
elif op_version < 11:
neg_dist = OnnxMul(dist, np.array(
[-1], dtype=dtype), op_version=op_version)
node = OnnxTopK_10(neg_dist, np.array([k], dtype=np.int64),
op_version=10, **kwargs)
else:
node = OnnxTopK_11(dist, np.array([k], dtype=np.int64),
largest=0, sorted=1,
op_version=11, **kwargs)
if keep_distances:
return (node[1], OnnxMul(node[0], np.array(
[-1], dtype=dtype), op_version=op_version))
return node[1]
opv = container.target_opset
C = op.cluster_centers_
input_name = X
if type(X.type) == Int64TensorType:
x_cast = OnnxCast(X, to=onnx_proto.TensorProto.FLOAT, op_version=opv)
input_name = x_cast
C2 = row_norms(C, squared=True)
rs = OnnxReduceSumSquare(input_name, axes=[1], keepdims=1, op_version=opv)
N = X.type.shape[0]
if isinstance(N, int):
zeros = np.zeros((N, ))
else:
zeros = OnnxMul(rs, np.array([0], dtype=np.float32), op_version=opv)
z = OnnxAdd(rs, OnnxGemm(input_name, C, zeros, alpha=-2.,
transB=1, op_version=opv),
op_version=opv)
y2 = OnnxAdd(C2, z, op_version=opv)
ll = OnnxArgMin(y2, axis=1, keepdims=0, output_names=out[:1],
op_version=opv)
y2s = OnnxSqrt(y2, output_names=out[1:], op_version=opv)
ll.add_to(scope, container)
y2s.add_to(scope, container)
metric='sqeuclidean',
dtype=dtype, op_version=op_version)
elif optim == 'cdist':
dist = OnnxCDist(X_scaled, x_train_scaled,
metric='sqeuclidean',
op_version=op_version)
else:
raise ValueError("Unknown optimization '{}'.".format(optim))
tensor_value = py_make_float_array(-0.5, dtype=dtype, as_tensor=True)
cst5 = OnnxConstantOfShape(
OnnxShape(zerov, op_version=op_version),
value=tensor_value, op_version=op_version)
# K = np.exp(-.5 * dists)
exp = OnnxExp(OnnxMul(dist, cst5, op_version=op_version),
output_names=output_names, op_version=op_version)
# This should not be needed.
# K = squareform(K)
# np.fill_diagonal(K, 1)
return exp
if isinstance(kernel, ExpSineSquared):
if not isinstance(kernel.length_scale, (float, int)):
raise NotImplementedError(
"length_scale should be float not {}.".format(
type(kernel.length_scale)))
return _convert_exp_sine_squared(
X, Y=X if x_train is None else x_train,
length_scale=kernel.length_scale,
raise ValueError("Unknown optimisation '{}'.".format(optim))
if op_version < 10:
neg_dist = OnnxMul(dist, np.array(
[-1], dtype=dtype), op_version=op_version)
node = OnnxTopK_1(neg_dist, k=k, op_version=1, **kwargs)
elif op_version < 11:
neg_dist = OnnxMul(dist, np.array(
[-1], dtype=dtype), op_version=op_version)
node = OnnxTopK_10(neg_dist, np.array([k], dtype=np.int64),
op_version=10, **kwargs)
else:
node = OnnxTopK_11(dist, np.array([k], dtype=np.int64),
largest=0, sorted=1,
op_version=11, **kwargs)
if keep_distances:
return (node[1], OnnxMul(node[0], np.array(
[-1], dtype=dtype), op_version=op_version))
return node[1]