How to use the quantecon.markov.core.MarkovChain function in quantecon

To help you get started, we’ve selected a few quantecon examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github QuantEcon / QuantEcon.py / quantecon / markov / approximation.py View on Github external
# discretized state space for demeaned y_t
    x = np.linspace(x_min, x_max, n)

    step = (x_max - x_min) / (n - 1)
    half_step = 0.5 * step
    P = np.empty((n, n))

    # approximate Markov transition matrix for
    # demeaned y_t
    _fill_tauchen(x, P, n, rho, sigma_u, half_step)

    # shifts the state values by the long run mean of y_t
    mu = b / (1 - rho)

    mc = MarkovChain(P, state_values=x+mu)

    return mc
github QuantEcon / QuantEcon.py / quantecon / markov / core.py View on Github external
def mc_compute_stationary(P):
    """
    Computes stationary distributions of P, one for each recurrent
    class. Any stationary distribution is written as a convex
    combination of these distributions.

    Returns
    -------
    stationary_dists : array_like(float, ndim=2)
        Array containing the stationary distributions as its rows.

    """
    return MarkovChain(P).stationary_distributions
github QuantEcon / QuantEcon.py / quantecon / markov / ddp.py View on Github external
"""
        Returns the controlled Markov chain for a given policy `sigma`.

        Parameters
        ----------
        sigma : array_like(int, ndim=1)
            Policy vector, of length n.

        Returns
        -------
        mc : MarkovChain
            Controlled Markov chain.

        """
        _, Q_sigma = self.RQ_sigma(sigma)
        return MarkovChain(Q_sigma)
github QuantEcon / QuantEcon.py / quantecon / markov / random.py View on Github external
--------
    >>> mc = qe.markov.random_markov_chain(3, random_state=1234)
    >>> mc.P
    array([[ 0.19151945,  0.43058932,  0.37789123],
           [ 0.43772774,  0.34763084,  0.21464142],
           [ 0.27259261,  0.5073832 ,  0.22002419]])
    >>> mc = qe.markov.random_markov_chain(3, k=2, random_state=1234)
    >>> mc.P
    array([[ 0.19151945,  0.80848055,  0.        ],
           [ 0.        ,  0.62210877,  0.37789123],
           [ 0.56227226,  0.        ,  0.43772774]])

    """
    P = random_stochastic_matrix(n, k, sparse, format='csr',
                                 random_state=random_state)
    mc = MarkovChain(P)
    return mc
github QuantEcon / QuantEcon.py / quantecon / markov / core.py View on Github external
Returns
    -------
    X : array_like(int, ndim=1)
        The simulation of states.

    """
    random_state = check_random_state(random_state)

    if isinstance(init, numbers.Integral):
        X_0 = init
    else:
        cdf0 = np.cumsum(init)
        u_0 = random_state.random_sample()
        X_0 = searchsorted(cdf0, u_0)

    mc = MarkovChain(P)
    return mc.simulate(ts_length=sample_size, init=X_0,
                       random_state=random_state)
github QuantEcon / QuantEcon.py / quantecon / markov / approximation.py View on Github external
p4[1:, 1:] = q * new_mat

            theta = p1 + p2 + p3 + p4
            theta[1:n - 1, :] = theta[1:n - 1, :] / 2

        else:
            raise ValueError("The number of states must be positive " +
                             "and greater than or equal to 2")

        return theta

    theta = row_build_mat(n, p, q)

    bar += ybar / (1 - rho)

    return MarkovChain(theta, bar)