Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def fit(self, X, y):
n_samples, n_features = X.shape
# Gram matrix
K = np.zeros((n_samples, n_samples))
for i in range(n_samples):
for j in range(n_samples):
K[i,j] = self.kernel(X[i], X[j])
P = cvxopt.matrix(np.outer(y,y) * K)
q = cvxopt.matrix(np.ones(n_samples) * -1)
A = cvxopt.matrix(y, (1,n_samples))
b = cvxopt.matrix(0.0)
if self.C is None:
G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
h = cvxopt.matrix(np.zeros(n_samples))
else:
tmp1 = np.diag(np.ones(n_samples) * -1)
tmp2 = np.identity(n_samples)
G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
tmp1 = np.zeros(n_samples)
tmp2 = np.ones(n_samples) * self.C
h = cvxopt.matrix(np.hstack((tmp1, tmp2)))
# solve QP problem
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
def eval_coeff(coeff, params, rows):
v = coeff.constant_value()
if v:
return o.matrix(v, (rows,1))
else:
value = 0
for k,v in coeff.coeff_dict.iteritems():
if k == '1':
value += o.matrix(v, (rows,1))
elif not ismultiply(k):
p = params[ k.rstrip('\'') ]
if istranspose(k): value += v*p.T
else: value += v*p
else:
keys = k.split('*')
keys.reverse()
mult = 1
for k1 in keys:
p = params[ k.rstrip('\'') ]
if istranspose(k): mult = p.T*mult
else: mult = p*mult
value += v*mult
return value
## Create the +1 -1 arrays
Gaux = np.ones((N_constraints, n))
for i in range (n):
period = [1]*(2**i)
period.extend([-1]*(2**i))
# print period
# A[1::(2**i),i] = -1
Gaux[:,i] = np.tile(period,2**(n-i-1))
# print Gaux.shape
G = opt.matrix(Gaux)
# print G
h = opt.matrix(1.0, (N_constraints,1)) # Sums to 1
else:
print "You fucked up"
return -1
#################
#### Ax = b #####
# Here we place the constraint in the weights
A = opt.matrix(1.0, (1, n)) # The sum of weights is 1
b = opt.matrix(1.0)
if(kind == "Lintner"):
# We remove this constraint
A = opt.matrix(0.0,(1, n)) # The sum of weights is 1
A[0,0] = 0.1
b = opt.matrix(0.0)
def _solve_1_slack_qp(self, constraints, n_samples):
C = np.float(self.C) * n_samples # this is how libsvm/svmstruct do it
joint_features = [c[0] for c in constraints]
losses = [c[1] for c in constraints]
joint_feature_matrix = np.vstack(joint_features)
n_constraints = len(joint_features)
P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T))
# q contains loss from margin-rescaling
q = cvxopt.matrix(-np.array(losses, dtype=np.float))
# constraints: all alpha must be >zero
idy = np.identity(n_constraints)
tmp1 = np.zeros(n_constraints)
# positivity constraints:
if self.negativity_constraint is None:
#empty constraints
zero_constr = np.zeros(0)
joint_features_constr = np.zeros((0, n_constraints))
else:
joint_features_constr = joint_feature_matrix.T[self.negativity_constraint]
zero_constr = np.zeros(len(self.negativity_constraint))
# put together
G = cvxopt.sparse(cvxopt.matrix(np.vstack((-idy, joint_features_constr))))
return optimal, allocations
solvers.options['show_progress'] = False
## Number of symbols and their returns
n = len(self.pf.symbols.keys())
returns = self.get_Returns()
## Number of samples santard deviations for which
## we will calculate the highest return.
# N = 100 # Exponential search in the mean mu.
mus = [10**(5.0 * t/N - 1.0) for t in range(N)]
# Convert to cvxopt matrices
S = opt.matrix(self.get_covMatrix())
pbar = opt.matrix(np.mean(returns, axis=0))
####### Create constraint matrices
###### Gx <= b #######
G = -opt.matrix(np.eye(n)) # negative n x n identity matrix
if (kind == "Markowitz"):
h = opt.matrix(0.0, (n ,1)) # X cannot be smaller than 0
elif (kind == "Normal"):
h = opt.matrix(100.0, (n ,1)) # No contraint in the value (just too high)
elif(kind == "Lintner"):
N_constraints = 2**n
G = opt.matrix(1.0, (N_constraints, n)) # The sum of weights is 1
raise ValueError('Non-positive distance in source kernel.')
if not np.all(KXZ >= 0):
raise ValueError('Non-positive distance in source-target kernel.')
# Compute kernels
if self.kernel_type == 'rbf':
# Radial basis functions
KXX = np.exp(-KXX / (2*self.bandwidth**2))
KXZ = np.exp(-KXZ / (2*self.bandwidth**2))
# Collapse second kernel and normalize
KXZ = N/M * np.sum(KXZ, axis=1)
# Prepare for CVXOPT
Q = matrix(KXX, tc='d')
p = matrix(KXZ, tc='d')
G = matrix(np.concatenate((np.ones((1, N)), -1*np.ones((1, N)),
-1.*np.eye(N)), axis=0), tc='d')
h = matrix(np.concatenate((np.array([N/np.sqrt(N) + N], ndmin=2),
np.array([N/np.sqrt(N) - N], ndmin=2),
np.zeros((N, 1))), axis=0), tc='d')
# Call quadratic program solver
sol = solvers.qp(Q, p, G, h)
# Return optimal coefficients as importance weights
return np.array(sol['x'])[:, 0]
if self.tol:
solvers.options['reltol'] = self.tol
if self.eps == 0:
# Quadratic part of the objective
gram = matrix(gram)
# Linear part of the objective
q_lin = matrix(-kron(targets, ones(n_quantiles)))
# LHS of the inequality constraint
g_lhs = matrix(r_[eye(n_coefs), -eye(n_coefs)])
# RHS of the inequality
h_rhs = matrix(r_[self.reg_c_ * kronprobs,
self.reg_c_ * (1 - kronprobs)])
# LHS of the equality constraint
lhs_eqc = matrix(kron(ones(n_samples), eye(n_quantiles)))
# Solve the dual optimization problem
self.sol_ = solvers.qp(gram, q_lin, g_lhs, h_rhs, lhs_eqc,
matrix(zeros(n_quantiles)))
# Set coefs
coefs = asarray(self.sol_['x'])
else:
def build_lhs(m, p):
# m: n_samples
# p: n_quantiles
n = m*p # n_variables
# Get the norm bounds (m last variables)
A = zeros(p+1)
A[0] = -1
def _margin_cvxopt(K, Y, init_vals=None, max_iter=-1, tol=1e-6):
'''margin optimization with CVXOPT'''
n = Y.size()[0]
YY = spdiag(Y.numpy().tolist())
P = 2*(YY*matrix(K.numpy())*YY)
p = matrix([0.0]*n)
G = -spdiag([1.0]*n)
h = matrix([0.0]*n)
A = matrix([[1.0 if Y[i]==+1 else 0 for i in range(n)],
[1.0 if Y[j]==-1 else 0 for j in range(n)]]).T
b = matrix([[1.0],[1.0]],(2,1))
solvers.options['show_progress']=False
if max_iter > 0:
solvers.options['maxiters']=max_iter
solvers.options['abstol']=tol
sol = solvers.qp(P,p,G,h,A,b, initvals=init_vals)
gamma = torch.Tensor(np.array(sol['x'])).double().T[0]
return sol['primal objective']**.5, gamma
import cvxopt
from cvxopt import glpk
member_insts, member_inds = get_region_memberships(x, model=model,
instance_indexes=instance_indexes,
region_indexes=region_indexes)
# logger.debug("anom indexes in selected regions (%d):\n%s" % (len(member_anoms), str(list(member_anoms))))
# logger.debug("member_inds (%s):\n%s" % (str(member_inds.shape), str(member_inds)))
nvars = member_inds.shape[1]
glpk.options['msg_lev'] = 'GLP_MSG_OFF'
c = cvxopt.matrix([float(v**p) for v in volumes], tc='d') # minimize total volume**p
# below states that each anomaly should be included in atleast one region
G = cvxopt.matrix(-member_inds, tc='d')
h = cvxopt.matrix([-1] * member_inds.shape[0], tc='d')
bin_vars = [i for i in range(nvars)]
(status, soln) = cvxopt.glpk.ilp(c, G, h, B=set(bin_vars))
# logger.debug("ILP status: %s" % status)
if soln is not None:
soln = np.reshape(np.array(soln), newshape=(nvars,))
# logger.debug("ILP solution:\n%s" % str(soln))
idxs = np.where(soln == 1)[0]
if False:
logger.debug("\nregion_indexes: %d\n%s\nmember_insts: %d\n%s" %
(len(idxs), str(list(region_indexes[idxs])),
len(member_insts), str(list(member_insts))))
return region_indexes[idxs]
else:
return None
def projection_in_norm(self, x, M):
""" Projection of x to simplex indiced by matrix M. Uses quadratic programming.
"""
m = M.shape[0]
P = matrix(2*M)
q = matrix(-2 * M * x)
G = matrix(-np.eye(m))
h = matrix(np.zeros((m,1)))
A = matrix(np.ones((1,m)))
b = matrix(1.)
sol = solvers.qp(P, q, G, h, A, b)
return np.squeeze(sol['x'])