How to use the casadi.mtimes function in casadi

To help you get started, we’ve selected a few casadi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github helgeanl / GP-MPC / GP / prediction.py View on Github external
for a in range(E):
        ell = hyper[a, :D]
        w = 1 / ell**2
        sf2 = ca.MX(hyper[a, D]**2)
        iK = ca.MX(invK[a])
        alpha = ca.mtimes(iK, F[:, a])
        kss = sf2
        ks = ca.MX.zeros(n, 1)

        for i in range(n):
            ks[i] = covSEard2(X[i, :], inputmean, ell, sf2)

        invKks = ca.mtimes(iK, ks)
        mean[a] = ca.mtimes(ks.T, alpha)
        var[a] = kss - ca.mtimes(ks.T, invKks)
        d_mean[a] = ca.mtimes(ca.transpose(w[a] * v[:, a] * ks), alpha)

    for d in range(E):
        for e in range(E):
            dd_var1a = ca.mtimes(ca.transpose(v[:, d] * ks), iK)
            dd_var1b = ca.mtimes(dd_var1a, v[e] * ks)
            dd_var2 = ca.mtimes(ca.transpose(v[d] * v[e] * ks), invKks)
            dd_var[d, e] = -2 * w[d] * w[e] * (dd_var1b + dd_var2)
            if d == e:
                dd_var[d, e] = dd_var[d, e] + 2 * w[d] * (kss - var[d])

    # covariance with noisy input
    for a in range(E):
        covar1 = ca.mtimes(d_mean, d_mean.T)
        covar[0, 0] = inputcov[a]
        covariance[a, a] = var[a] + ca.trace(ca.mtimes(covar, .5 * dd_var + covar1))
github helgeanl / GP-MPC / GP / prediction.py View on Github external
d_mean = ca.MX.zeros(E, 1)
    dd_var = ca.MX.zeros(E, E)

    for a in range(E):
        ell = hyper[a, :D]
        w = 1 / ell**2
        sf2 = ca.MX(hyper[a, D]**2)
        iK = ca.MX(invK[a])
        alpha = ca.mtimes(iK, F[:, a])
        kss = sf2
        ks = ca.MX.zeros(n, 1)

        for i in range(n):
            ks[i] = covSEard2(X[i, :], inputmean, ell, sf2)

        invKks = ca.mtimes(iK, ks)
        mean[a] = ca.mtimes(ks.T, alpha)
        var[a] = kss - ca.mtimes(ks.T, invKks)
        d_mean[a] = ca.mtimes(ca.transpose(w[a] * v[:, a] * ks), alpha)

    for d in range(E):
        for e in range(E):
            dd_var1a = ca.mtimes(ca.transpose(v[:, d] * ks), iK)
            dd_var1b = ca.mtimes(dd_var1a, v[e] * ks)
            dd_var2 = ca.mtimes(ca.transpose(v[d] * v[e] * ks), invKks)
            dd_var[d, e] = -2 * w[d] * w[e] * (dd_var1b + dd_var2)
            if d == e:
                dd_var[d, e] = dd_var[d, e] + 2 * w[d] * (kss - var[d])

    # covariance with noisy input
    for a in range(E):
        covar1 = ca.mtimes(d_mean, d_mean.T)
github meco-group / omg-tools / omgtools / basics / spline_extra.py View on Github external
def shift_knot1_fwd(cfs, basis, t_shift):
    if isinstance(cfs, (SX, MX)):
        cfs_sym = MX.sym('cfs', cfs.shape)
        t_shift_sym = MX.sym('t_shift')
        T = shiftfirstknot_T(basis, t_shift_sym)
        cfs2_sym = mtimes(T, cfs_sym)
        fun = Function('fun', [cfs_sym, t_shift_sym], [cfs2_sym]).expand()
        return fun(cfs, t_shift)
    else:
        T = shiftfirstknot_T(basis, t_shift)
        return T.dot(cfs)
github helgeanl / GP-MPC / GP / prediction.py View on Github external
alpha = ca.mtimes(iK, F[:, a])
        kss = sf2
        ks = ca.MX.zeros(n, 1)

        for i in range(n):
            ks[i] = covSEard2(X[i, :], inputmean, ell, sf2)

        invKks = ca.mtimes(iK, ks)
        mean[a] = ca.mtimes(ks.T, alpha)
        var[a] = kss - ca.mtimes(ks.T, invKks)
        d_mean[a] = ca.mtimes(ca.transpose(w[a] * v[:, a] * ks), alpha)

    for d in range(E):
        for e in range(E):
            dd_var1a = ca.mtimes(ca.transpose(v[:, d] * ks), iK)
            dd_var1b = ca.mtimes(dd_var1a, v[e] * ks)
            dd_var2 = ca.mtimes(ca.transpose(v[d] * v[e] * ks), invKks)
            dd_var[d, e] = -2 * w[d] * w[e] * (dd_var1b + dd_var2)
            if d == e:
                dd_var[d, e] = dd_var[d, e] + 2 * w[d] * (kss - var[d])

    # covariance with noisy input
    for a in range(E):
        covar1 = ca.mtimes(d_mean, d_mean.T)
        covar[0, 0] = inputcov[a]
        covariance[a, a] = var[a] + ca.trace(ca.mtimes(covar, .5 * dd_var + covar1))
        #covariance[a] = var[a] + ca.trace(ca.mtimes(inputcov, .5 * dd_var + covar1))
    return [mean, covariance]
github uuvsimulator / uuv_simulator / uuv_control / uuv_trajectory_control / src / uuv_control_interfaces / sym_vehicle.py View on Github external
if CASADI_IMPORTED:
            # Declaring state variables
            ## Generalized position vector
            self.eta = casadi.SX.sym('eta', 6)
            ## Generalized velocity vector
            self.nu = casadi.SX.sym('nu', 6)

            # Build the Coriolis matrix
            self.CMatrix = casadi.SX.zeros(6, 6)

            S_12 = - cross_product_operator(
                casadi.mtimes(self._Mtotal[0:3, 0:3], self.nu[0:3]) +
                casadi.mtimes(self._Mtotal[0:3, 3:6], self.nu[3:6]))
            S_22 = - cross_product_operator(
                casadi.mtimes(self._Mtotal[3:6, 0:3], self.nu[0:3]) +
                casadi.mtimes(self._Mtotal[3:6, 3:6], self.nu[3:6]))

            self.CMatrix[0:3, 3:6] = S_12
            self.CMatrix[3:6, 0:3] = S_12
            self.CMatrix[3:6, 3:6] = S_22

            # Build the damping matrix (linear and nonlinear elements)
            self.DMatrix = - casadi.diag(self._linear_damping)        
            self.DMatrix -= casadi.diag(self._linear_damping_forward_speed)
            self.DMatrix -= casadi.diag(self._quad_damping * self.nu)      

            # Build the restoring forces vectors wrt the BODY frame
            Rx = np.array([[1, 0, 0],
                        [0, casadi.cos(self.eta[3]), -1 * casadi.sin(self.eta[3])],
                        [0, casadi.sin(self.eta[3]), casadi.cos(self.eta[3])]])
            Ry = np.array([[casadi.cos(self.eta[4]), 0, casadi.sin(self.eta[4])],
                        [0, 1, 0],
github helgeanl / GP-MPC / GP / noisyGP.py View on Github external
E     = len(invK)
    n     = ca.MX.size(F[:, 1])[0]
    mean  = ca.MX.zeros(E, 1)
    beta  = ca.MX.zeros(n, E)
    log_k = ca.MX.zeros(n, E)
    v     = X - ca.repmat(inputmean, n, 1)

    #invK = MX(invK)
    covariance = ca.MX.zeros(E, E)

    A = ca.SX.sym('A', inputcov.shape)
    [Q, R2] = ca.qr(A)
    determinant = ca.Function('determinant', [A], [ca.exp(ca.trace(ca.log(R2)))])

    for a in range(E):
        beta[:, a] = ca.mtimes(invK[a], F[:, a])
        iLambda   = ca.diag(ca.exp(-2 * hyper[a, :D]))
        R  = inputcov + ca.diag(ca.exp(2 * hyper[a, :D]))
        iR = ca.mtimes(iLambda, (ca.MX.eye(D) - ca.solve((ca.MX.eye(D) + ca.mtimes(inputcov, iLambda)), (ca.mtimes(inputcov, iLambda)))))
        T  = ca.mtimes(v, iR)
        c  = ca.exp(2 * hyper[a, D]) / ca.sqrt(determinant(R)) * ca.exp(ca.sum2(hyper[a, :D]))
        q2 = c * ca.exp(-ca.sum2(T * v) * 0.5)
        qb = q2 * beta[:, a]
        mean[a] = ca.sum1(qb)
        t  = ca.repmat(ca.exp(hyper[a, :D]), n, 1)
        v1 = v / t
        log_k[:, a] = 2 * hyper[a, D] - ca.sum2(v1 * v1) * 0.5

    # covariance with noisy input
    for a in range(E):
        ii = v / ca.repmat(ca.exp(2 * hyper[a, :D]), n, 1)
        for b in range(a + 1):
github helgeanl / GP-MPC / gp_mpc / mpc_class.py View on Github external
"""
        Q_s = ca.SX.sym('Q', ca.MX.size(Q))
        R_s = ca.SX.sym('R', ca.MX.size(R))
        x_s = ca.SX.sym('x', ca.MX.size(x))
        u_s = ca.SX.sym('u', ca.MX.size(u))
        covar_x_s = ca.SX.sym('covar_x', ca.MX.size(covar_x))
        covar_u_s = ca.SX.sym('covar_u', ca.MX.size(R))

        sqnorm_x = ca.Function('sqnorm_x', [x_s, Q_s],
                               [ca.mtimes(x_s.T, ca.mtimes(Q_s, x_s))])
        sqnorm_u = ca.Function('sqnorm_u', [u_s, R_s],
                               [ca.mtimes(u_s.T, ca.mtimes(R_s, u_s))])
        trace_u  = ca.Function('trace_u', [R_s, covar_u_s],
                               [s * ca.trace(ca.mtimes(R_s, covar_u_s))])
        trace_x  = ca.Function('trace_x', [Q_s, covar_x_s],
                               [s * ca.trace(ca.mtimes(Q_s, covar_x_s))])

        return sqnorm_x(x - x_ref, Q) + sqnorm_u(u, R) + sqnorm_u(delta_u, S) \
                + trace_x(Q, covar_x)  + trace_u(R, covar_u)
github helgeanl / GP-MPC / GP / prediction.py View on Github external
dd_var = ca.MX.zeros(E, E)

    for a in range(E):
        ell = hyper[a, :D]
        w = 1 / ell**2
        sf2 = ca.MX(hyper[a, D]**2)
        iK = ca.MX(invK[a])
        alpha = ca.mtimes(iK, F[:, a])
        kss = sf2
        ks = ca.MX.zeros(n, 1)

        for i in range(n):
            ks[i] = covSEard2(X[i, :], inputmean, ell, sf2)

        invKks = ca.mtimes(iK, ks)
        mean[a] = ca.mtimes(ks.T, alpha)
        var[a] = kss - ca.mtimes(ks.T, invKks)
        d_mean[a] = ca.mtimes(ca.transpose(w[a] * v[:, a] * ks), alpha)

    for d in range(E):
        for e in range(E):
            dd_var1a = ca.mtimes(ca.transpose(v[:, d] * ks), iK)
            dd_var1b = ca.mtimes(dd_var1a, v[e] * ks)
            dd_var2 = ca.mtimes(ca.transpose(v[d] * v[e] * ks), invKks)
            dd_var[d, e] = -2 * w[d] * w[e] * (dd_var1b + dd_var2)
            if d == e:
                dd_var[d, e] = dd_var[d, e] + 2 * w[d] * (kss - var[d])

    # covariance with noisy input
    for a in range(E):
        covar1 = ca.mtimes(d_mean, d_mean.T)
        covar[0, 0] = inputcov[a]
github TUMFTM / global_racetrajectory_optimization / opt_mintime_traj / src / opt_mintime.py View on Github external
# path constraint: limited energy consumption
    if pars["optim_opts"]["limit_energy"]:
        g.append(ca.sum1(ca.vertcat(*ec_opt)) / 3600000.0)
        lbg.append([0])
        ubg.append([pars["optim_opts"]["energy_limit"]])

    # formulate differentiation matrix (for regularization)
    diff_matrix = np.eye(N)
    for i in range(N - 1):
        diff_matrix[i, i + 1] = -1.0
    diff_matrix[N - 1, 0] = -1.0

    # regularization (delta)
    delta_p = ca.vertcat(*delta_p)
    Jp_delta = ca.mtimes(ca.MX(diff_matrix), delta_p)
    Jp_delta = ca.dot(Jp_delta, Jp_delta)

    # regularization (f_drive + f_brake)
    F_p = ca.vertcat(*F_p)
    Jp_f = ca.mtimes(ca.MX(diff_matrix), F_p)
    Jp_f = ca.dot(Jp_f, Jp_f)

    # formulate objective
    J = J + pars["optim_opts"]["penalty_F"] * Jp_f + pars["optim_opts"]["penalty_delta"] * Jp_delta

    # concatenate NLP vectors
    w = ca.vertcat(*w)
    g = ca.vertcat(*g)
    w0 = np.concatenate(w0)
    lbw = np.concatenate(lbw)
    ubw = np.concatenate(ubw)