How to use the casadi.vertcat function in casadi

To help you get started, we’ve selected a few casadi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github adbuerger / casiopeia / test / test_integration_test_ode_multi_setups.py View on Github external
def setUp(self):

        self.x = ca.MX.sym("x", 4)
        self.p = ca.MX.sym("p", 6)
        self.u = ca.MX.sym("u", 2)

        self.f = ca.vertcat( \

            self.x[3] * np.cos(self.x[2] + self.p[0] * self.u[0]),

            self.x[3] * np.sin(self.x[2] + self.p[0] * self.u[0]),

            self.x[3] * self.u[0] * self.p[1],

            self.p[2] * self.u[1] \
                - self.p[3] * self.u[1] * self.x[3] \
                - self.p[4] * self.x[3]**2 \
                - self.p[5] \
                - (self.x[3] * self.u[0])**2 * self.p[1] * self.p[0])

        self.phi = self.x

        data = np.array(np.loadtxt("test/data_2d_vehicle_pe.dat", \
github Pyomo / pyomo / pyomo / dae / simulator.py View on Github external
# Old way (10 times faster, but can't incorporate time
        # varying parameters/controls)
        xalltemp = [self._templatemap[i] for i in self._diffvars]
        xall = casadi.vertcat(*xalltemp)

        odealltemp = [convert_pyomo2casadi(self._rhsdict[i])
                      for i in self._derivlist]
        odeall = casadi.vertcat(*odealltemp)
        dae = {'x': xall, 'ode': odeall}

        if len(self._algvars) != 0:
            zalltemp = [self._templatemap[i] for i in self._simalgvars]
            zall = casadi.vertcat(*zalltemp)

            algalltemp = [convert_pyomo2casadi(i) for i in self._alglist]
            algall = casadi.vertcat(*algalltemp)
            dae['z'] = zall
            dae['alg'] = algall

        integrator_options['grid'] = tsim
        integrator_options['output_t0'] = True
        F = casadi.integrator('F', integrator, dae, integrator_options)
        sol = F(x0=initcon)
        profile = sol['xf'].full().T

        if len(self._algvars) != 0:
            algprofile = sol['zf'].full().T
            profile = np.concatenate((profile, algprofile), axis=1)

        return [tsim, profile]
github helgeanl / GP-MPC / gp_mpc / gp_class.py View on Github external
def set_method(self, gp_method='TA'):
        """ Select wich GP function to use """

        x = ca.MX.sym('x', self.__Ny)
        covar_s = ca.MX.sym('covar', self.__Nx, self.__Nx)
        u = ca.MX.sym('u', self.__Nu)
        self.__gp_method = gp_method

        if gp_method is 'ME':
            self.__predict = ca.Function('gp_mean', [x, u, covar_s],
                                [self.__mean(ca.vertcat(x,u)),
                                 self.__covar(ca.vertcat(x,u))])
        elif gp_method is 'TA':
            self.__predict = ca.Function('gp_taylor', [x, u, covar_s],
                                [self.__mean(ca.vertcat(x,u)),
                                 self.__TA_covar(ca.vertcat(x,u), covar_s)])
        elif gp_method is 'EM':
            self.__predict = ca.Function('gp_exact_moment', [x, u, covar_s],
                                gp_exact_moment(self.__invK, ca.MX(self.__X),
                                        ca.MX(self.__Y), ca.MX(self.__hyper),
                                        ca.vertcat(x, u).T, covar_s))
        elif gp_method is 'old_ME':
            self.__predict = ca.Function('gp_mean', [x, u, covar_s],
                                gp(self.__invK, ca.MX(self.__X), ca.MX(self.__Y),
                                   ca.MX(self.__hyper),
                                   ca.vertcat(x, u).T, meanFunc=self.__mean_func))
        elif gp_method is 'old_TA':
github adbuerger / casiopeia / examples / quarter_vehicle.py View on Github external
rk4 = ca.Integrator("cvodes", "rk", ffcn, {"t0": 0, "tf": dt}) #, \
        #"number_of_finite_elements": 1})

    P = ca.MX.sym("P", 3)
    EPS_U = ca.MX.sym("EPS_U", N)
    X0 = ca.MX.sym("X0", 4)

    V = ca.vertcat([P, EPS_U, X0])

    x_end = X0
    obj = [x_end - ydata_noise[0,:].T]

    for k in range(N):

        x_end = rk4(x0 = x_end, p = ca.vertcat([udata[k], EPS_U[k], P]))["xf"]
        obj.append(x_end - ydata_noise[k+1, :].T)

    r = ca.vertcat([ca.vertcat(obj), EPS_U])

    Sigma_y_inv = ca.diag(ca.vec(wv))
    Sigma_u_inv = ca.diag(weps_u)
    Z = ca.DMatrix(pl.zeros((Sigma_y_inv.shape[0], Sigma_u_inv.shape[1])))

    Sigma = ca.blockcat(Sigma_y_inv, Z, Z.T, Sigma_u_inv)

    nlp = ca.MXFunction("nlp", ca.nlpIn(x = V), \
        ca.nlpOut(f = 0.5 * ca.mul([r.T, Sigma, r])))

    nlpsolver = ca.NlpSolver("nlpsolver", "ipopt", nlp)

    V0 = ca.vertcat([
github helgeanl / GP-MPC / simulation / four_tank.py View on Github external
a3 = 0.127
    a4 = 0.127
    A1 = 50.27
    A2 = 50.27
    A3 = 28.27
    A4 = 28.27
    gamma1 = 0.4
    gamma2 = 0.4

    # Differential states
    xd = ca.SX.sym("xd", ndstate)  # vector of states [h1,h2,h3,h4]

    # Initial conditions
    xDi = x0

    ode = ca.vertcat(
        -a1 / A1 * ca.sqrt(2 * g * xd[0]) + a3 / A1 
        * ca.sqrt(2 * g * xd[2]) + gamma1 / A1 * u[0],
        -a2 / A2 * ca.sqrt(2 * g * xd[1]) + a4 / A2 
        * ca.sqrt(2 * g * xd[3]) + gamma2 / A2 * u[1],
        -a3 / A3 * ca.sqrt(2 * g * xd[2]) + (1 - gamma2) / A3 * u[1],
        -a4 / A4 * ca.sqrt(2 * g * xd[3]) + (1 - gamma1) / A4 * u[0])

    dae = {'x': xd, 'ode': ode}

    # Create a DAE system solver
    opts = {}
    opts['abstol'] = 1e-10  # abs. tolerance
    opts['reltol'] = 1e-10  # rel. tolerance
    opts['t0'] = t0
    opts['tf'] = tf
    Sim = ca.integrator('Sim', 'idas', dae, opts)
github pymoca / pymoca / src / pymoca / backends / casadi / generator.py View on Github external
def exitEquation(self, tree):
        logger.debug('exitEquation')

        if isinstance(tree.left, list):
            src_left = ca.vertcat(*[self.get_mx(c) for c in tree.left])
        else:
            src_left = self.get_mx(tree.left)

        if isinstance(tree.right, list):
            src_right = ca.vertcat(*[self.get_mx(c) for c in tree.right])
        else:
            src_right = self.get_mx(tree.right)

        src_left = ca.MX(src_left)
        src_right = ca.MX(src_right)

        # According to the Modelica spec,
        # "It is possible to omit left hand side component references and/or truncate the left hand side list in order to discard outputs from a function call."
        if isinstance(tree.right, ast.Expression) and tree.right.operator in self.root.classes:
            if src_left.size1() < src_right.size1():
                src_right = src_right[0:src_left.size1()]
        if isinstance(tree.left, ast.Expression) and tree.left.operator in self.root.classes:
            if src_left.size1() > src_right.size1():
                src_left = src_left[0:src_right.size1()]

        # If dimensions between the lhs and rhs do not match, but the dimensions of lhs
github acados / acados / experimental / examples / python / broken / robin_ocp_nlp.py View on Github external
import matplotlib.pyplot as plt
from numpy import array, diag, eye, zeros
from scipy.linalg import block_diag

from acados import ocp_nlp
from casadi import SX, Function, vertcat

nx, nu = 2, 1

x = SX.sym('x', nx)
u = SX.sym('u', nu)

ode_fun = Function('ode_fun', [x, u], [vertcat(x[1], u)], ['x', 'u'], ['rhs'])

N = 15

nlp = ocp_nlp(N, nx, nu)
nlp.set_dynamics(ode_fun, {'integrator': 'rk4', 'step': 0.1})

q, r = 1, 1
P = eye(nx)

nlp.set_stage_cost(eye(nx+nu), zeros(nx+nu), diag([q, q, r]))
nlp.set_terminal_cost(eye(nx), zeros(nx), P)

x0 = array([1, 1])
nlp.set_field("lbx", 0, x0)
nlp.set_field("ubx", 0, x0)
github pymoca / pymoca / src / pymoca / backends / casadi / model.py View on Github external
elif expr.name() in alg_states:
                        # This algebraic state must now become a differentiated state.
                        states[expr.name()] = alg_states.pop(expr.name())
                        der_sym = ca.MX.sym('der({})'.format(expr.name()))
                        der_states[expr.name()] = Variable(der_sym, float)
                        return der_sym
                    else:
                        return 0.0
                else:
                    # Differentiate using CasADi and chain rule
                    orig_deps = ca.symvar(expr)
                    deps = ca.vertcat(*orig_deps)
                    J = ca.Function('J', [deps], [ca.jacobian(expr, deps)])
                    J_sparsity = J.sparsity_out(0)
                    der_deps = [get_derivative(dep) if J_sparsity.has_nz(0, j) else ca.DM.zeros(dep.size()) for j, dep in enumerate(orig_deps)]
                    return ca.mtimes(J(deps), ca.vertcat(*der_deps))
github Pyomo / pyomo / pyomo / dae / simulator.py View on Github external
for i in self._derivlist]
        odeall = casadi.vertcat(*odealltemp)

        # Time-varying inputs
        ptemp = [self._templatemap[i]
                 for i in self._siminputvars.values()]
        pall = casadi.vertcat(time, *ptemp)

        dae = {'x': xall, 'p': pall, 'ode': odeall}

        if len(self._algvars) != 0:
            zalltemp = [self._templatemap[i] for i in self._simalgvars]
            zall = casadi.vertcat(*zalltemp)
            # Need to do anything special with time scaling??
            algalltemp = [convert_pyomo2casadi(i) for i in self._alglist]
            algall = casadi.vertcat(*algalltemp)
            dae['z'] = zall
            dae['alg'] = algall

        integrator_options['tf'] = 1.0
        F = casadi.integrator('F', integrator, dae, integrator_options)
        N = len(tsim)

        # This approach removes the time scaling from tsim so must
        # create an array with the time step between consecutive
        # time points
        tsimtemp = np.hstack([0, tsim[1:] - tsim[0:-1]])
        tsimtemp.shape = (1, len(tsimtemp))

        palltemp = [casadi.DM(tsimtemp)]

        # Need a similar np array for each time-varying input
github pymoca / pymoca / src / pymoca / backends / casadi / model.py View on Github external
logger.warning('Not all constants have been eliminated.  As a result, the affine DAE expression will use a symbolic matrix, as opposed to a numerical sparse matrix.')
                    if len(self.parameters) == 0 or not ca.depends_on(equations, parameters):
                        parameters = 0
                    else:
                        logger.warning('Not all parameters have been eliminated.  As a result, the affine DAE expression will use a symbolic matrix, as opposed to a numerical sparse matrix.')

                    A = Af(0, constants, parameters)
                    b = bf(0, constants, parameters)

                    # Replace veccat'ed states with brand new state vectors so as to avoid the value copy operations induced by veccat.
                    self._states_vector = ca.MX.sym('states_vector', sum([s.numel() for s in self._symbols(self.states)]))
                    self._der_states_vector = ca.MX.sym('der_states_vector', sum([s.numel() for s in self._symbols(self.der_states)]))
                    self._alg_states_vector = ca.MX.sym('alg_states_vector', sum([s.numel() for s in self._symbols(self.alg_states)]))
                    self._inputs_vector = ca.MX.sym('inputs_vector', sum([s.numel() for s in self._symbols(self.inputs)]))

                    states_vector = ca.vertcat(self._states_vector, self._der_states_vector, self._alg_states_vector, self._inputs_vector)
                    equations = [ca.reshape(ca.mtimes(A, states_vector), equations.shape) + b]
                    setattr(self, equation_list, equations)

        if options['expand_mx']:
            logger.info("Expanded MX functions will be returned")
            self._expand_mx_func = lambda x: x.expand()

        logger.info("Finished model simplification")