How to use the cvxpy.sum_squares function in cvxpy

To help you get started, we’ve selected a few cvxpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cvxgrp / cvxpylayers / cvxpylayers / torch / test_cvxpylayer.py View on Github external
def test_not_enough_parameters_at_call_time(self):
        x = cp.Variable(1)
        lam = cp.Parameter(1, nonneg=True)
        lam2 = cp.Parameter(1, nonneg=True)
        objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(objective))
        layer = CvxpyLayer(prob, [lam, lam2], [x])  # noqa: F841
        with self.assertRaisesRegex(
                ValueError,
                'A tensor must be provided for each CVXPY parameter.*'):
            layer(lam)
github cvxgrp / cvxpylayers / cvxpylayers / torch / test_cvxpylayer.py View on Github external
def test_entropy_maximization(self):
        set_seed(243)
        n, m, p = 5, 3, 2

        tmp = np.random.rand(n)
        A_np = np.random.randn(m, n)
        b_np = A_np.dot(tmp)
        F_np = np.random.randn(p, n)
        g_np = F_np.dot(tmp) + np.random.rand(p)

        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        F = cp.Parameter((p, n))
        g = cp.Parameter(p)
        obj = cp.Maximize(cp.sum(cp.entr(x)) - .01 * cp.sum_squares(x))
        constraints = [A * x == b,
                       F * x <= g]
        prob = cp.Problem(obj, constraints)
        layer = CvxpyLayer(prob, [A, b, F, g], [x])

        A_tch, b_tch, F_tch, g_tch = map(
            lambda x: torch.from_numpy(x).requires_grad_(True), [
                A_np, b_np, F_np, g_np])
        torch.autograd.gradcheck(
            lambda *x: layer(*x, solver_args={"eps": 1e-12,
                                              "max_iters": 10000}),
            (A_tch,
             b_tch,
             F_tch,
             g_tch),
            eps=1e-4,
github cvxgrp / diffcp / tests.py View on Github external
def test_proj_psd(self):
        np.random.seed(0)
        n = 10
        for _ in range(15):
            x = np.random.randn(n, n)
            x = x + x.T
            x_vec = cone_lib.vec_symm(x)
            z = cp.Variable((n, n), PSD=True)
            objective = cp.Minimize(cp.sum_squares(z - x))
            prob = cp.Problem(objective)
            prob.solve(solver="SCS", eps=1e-10)
            p = cone_lib.unvec_symm(
                cone_lib._proj(x_vec, cone_lib.PSD, dual=False), n)
            np.testing.assert_allclose(p, z.value, atol=1e-5, rtol=1e-5)
            np.testing.assert_allclose(p, cone_lib.unvec_symm(
                cone_lib._proj(x_vec, cone_lib.PSD, dual=True), n))
github cvxgrp / cvxpylayers / cvxpylayers / torch / test_cvxpylayer.py View on Github external
X_tch = torch.from_numpy(X_np)
        X_tch.requires_grad_(True)
        lam_tch = 0.1 * torch.ones(1, requires_grad=True, dtype=torch.double)

        a = cp.Variable((n, 1))
        X = cp.Parameter((N, n))
        lam = cp.Parameter(1, nonneg=True)
        y = y_np

        log_likelihood = cp.sum(
            cp.multiply(y, X @ a) -
            cp.log_sum_exp(cp.hstack([np.zeros((N, 1)), X @ a]).T, axis=0,
                           keepdims=True).T
        )
        prob = cp.Problem(
            cp.Minimize(-log_likelihood + lam * cp.sum_squares(a)))

        fit_logreg = CvxpyLayer(prob, [X, lam], [a])

        def layer_eps(*x):
            return fit_logreg(*x, solver_args={"eps": 1e-12})

        torch.autograd.gradcheck(layer_eps,
                                 (X_tch,
                                  lam_tch),
                                 eps=1e-4,
                                 atol=1e-3,
                                 rtol=1e-3)
github cvxgrp / cvxpylayers / cvxpylayers / tensorflow / test_cvxpylayer.py View on Github external
def test_not_enough_parameters(self):
        x = cp.Variable(1)
        lam = cp.Parameter(1, nonneg=True)
        lam2 = cp.Parameter(1, nonneg=True)
        objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(objective))
        with self.assertRaisesRegex(ValueError, "The layer's parameters.*"):
            CvxpyLayer(prob, [lam], [x])  # noqa: F841
github cvxgrp / cvxpylayers / cvxpylayers / torch / test_cvxpylayer.py View on Github external
def test_simple_batch_socp(self):
        set_seed(243)
        n = 5
        m = 1
        batch_size = 4

        P_sqrt = cp.Parameter((n, n), name='P_sqrt')
        q = cp.Parameter((n, 1), name='q')
        A = cp.Parameter((m, n), name='A')
        b = cp.Parameter((m, 1), name='b')

        x = cp.Variable((n, 1), name='x')

        objective = 0.5 * cp.sum_squares(P_sqrt @ x) + q.T @ x
        constraints = [A@x == b, cp.norm(x) <= 1]
        prob = cp.Problem(cp.Minimize(objective), constraints)

        prob_tch = CvxpyLayer(prob, [P_sqrt, q, A, b], [x])

        P_sqrt_tch = torch.randn(batch_size, n, n, requires_grad=True)
        q_tch = torch.randn(batch_size, n, 1, requires_grad=True)
        A_tch = torch.randn(batch_size, m, n, requires_grad=True)
        b_tch = torch.randn(batch_size, m, 1, requires_grad=True)

        torch.autograd.gradcheck(prob_tch, (P_sqrt_tch, q_tch, A_tch, b_tch))
github alexlee-gk / visual_dynamics / visual_dynamics / algorithms / fqi.py View on Github external
phi_p = self.servoing_pol.phi(S_p, A_p, preprocessed=True)
                V_p = phi_p.dot(self.theta) + self.bias
                Q_sample = R + self.gamma * V_p
            toc("\tQ_sample")

        tic()
        import cvxpy
        theta_var = cvxpy.Variable(self.theta.shape[0])
        bias_var = cvxpy.Variable(1)
        assert len(phi) == batch_size
        scale = 1.0
        solved = False
        while not solved:
            if self.opt_fit_bias:
                objective = cvxpy.Minimize(
                    (1 / 2.) * cvxpy.sum_squares(
                        (phi * np.sqrt(scale / batch_size)) * theta_var + bias_var * np.sqrt(scale / batch_size) -
                        ((Q_sample + (bias_var - self.bias) * self.gamma) * np.sqrt(scale / batch_size))) +
                    (self.l2_reg / 2.) * scale * cvxpy.sum_squares(theta_var))  # no regularization on bias
            else:
                objective = cvxpy.Minimize(
                    (1 / 2.) * cvxpy.sum_squares(
                        (phi * np.sqrt(scale / batch_size)) * theta_var + bias_var * np.sqrt(scale / batch_size) -
                        (Q_sample * np.sqrt(scale / batch_size))) +
                    (self.l2_reg / 2.) * scale * cvxpy.sum_squares(theta_var))  # no regularization on bias
            constraints = [0 <= theta_var]  # no constraint on bias

            if self.eps is not None:
                constraints.append(cvxpy.sum_squares(theta_var - self.theta) <= (len(self.theta) * self.eps))

            prob = cvxpy.Problem(objective, constraints)
            for solver in [None, cvxpy.GUROBI, cvxpy.CVXOPT]:
github AthenaEPI / dmipy / microstruktur / signal_models / three_dimensional_models.py View on Github external
def _cvx_fit_linear_parameters(self, data, phi):
        fe = cvxpy.Variable(phi.shape[1])
        constraints = [cvxpy.sum_entries(fe) == 1,
                       fe >= 0.011,
                       fe <= 0.89]
        obj = cvxpy.Minimize(cvxpy.sum_squares(phi * fe - data))
        prob = cvxpy.Problem(obj, constraints)
        prob.solve()
        return np.array(fe.value).squeeze()
github ustunb / dcptree / dcptree / baselines.py View on Github external
### setup optimization problem
        obj = 0
        np.random.seed(self.random_state)  # set the seed before initializing the values of w

        if self.train_multiple:
            w = {}
            for k in group_labels:
                idx = x_sensitive == k

                # setup coefficients and initialize as uniform distribution over [0,1]
                w[k] = cp.Variable(d)
                w[k].value = np.random.rand(d)

                # first term in w is the intercept, so no need to regularize that
                obj += cp.sum_squares(w[k][coefficient_idx]) * self.lam[k]

                # setup
                X_k, y_k = X[idx], y[idx]

                if self.sparse_formulation:
                    XY = np.concatenate((X_k, y_k[:, np.newaxis]), axis = 1)
                    UY, counts = np.unique(XY, return_counts = True, axis = 0)
                    pos_idx = np.greater(UY[:, -1], 0)
                    neg_idx = np.logical_not(pos_idx)
                    U = UY[:, 0:d]
                    obj_weights_pos = counts[pos_idx] / float(n)
                    Z_pos = -U[pos_idx, :]

                    obj_weights_neg = counts[neg_idx] / float(n)
                    Z_neg = U[neg_idx, :]