How to use the quantecon.lqcontrol.LQ function in quantecon

To help you get started, we’ve selected a few quantecon examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github QuantEcon / QuantEcon.py / quantecon / robustlq.py View on Github external
Solution method used in solving the associated Riccati
            equation, str in {'doubling', 'qz'}.

        Returns
        -------
        F : array_like(float, ndim=2)
            The policy function for a given K
        P : array_like(float, ndim=2)
            The value function for a given K

        """
        A1 = self.A + dot(self.C, K)
        B1 = self.B
        Q1 = self.Q
        R1 = self.R - self.beta * self.theta * dot(K.T, K)
        lq = LQ(Q1, R1, A1, B1, beta=self.beta)
        P, F, d = lq.stationary_values(method=method)

        return F, P
github QuantEcon / QuantEcon.py / quantecon / robustlq.py View on Github external
# == Set up LQ version == #
        I = identity(j)
        Z = np.zeros((k, j))

        if self.pure_forecasting:
            lq = LQ(-beta*I*theta, R, A, C, beta=beta)

            # == Solve and convert back to robust problem == #
            P, f, d = lq.stationary_values(method=method)
            F = np.zeros((self.k, self.n))
            K = -f[:k, :]

        else:
            Ba = hstack([B, C])
            Qa = vstack([hstack([Q, Z]), hstack([Z.T, -beta*I*theta])])
            lq = LQ(Qa, R, A, Ba, beta=beta)

            # == Solve and convert back to robust problem == #
            P, f, d = lq.stationary_values(method=method)
            F = f[:k, :]
            K = -f[k:f.shape[0], :]

        return F, K, P
github QuantEcon / QuantEcon.py / solutions / stand_alone_programs / solution_ree_ex4.py View on Github external
Filename: solution_ree_ex4.py
Authors: Chase Coleman, Spencer Lyon, Thomas Sargent, John Stachurski
Solves an exercise from the rational expectations module
"""

from __future__ import print_function
import numpy as np
from quantecon.lqcontrol import LQ
from solution_ree_ex1 import a0, a1, beta, gamma

A = np.array([[1, 0], [0, 1]])
B = np.array([[1], [0]])
R = - np.array([[a1, -a0 / 2], [-a0 / 2, 0]])
Q = - gamma / 2

lq = LQ(Q, R, A, B, beta=beta)
P, F, d = lq.stationary_values()

F = F.flatten()
m0, m1 = -F[1], 1 - F[0]
print(m0, m1)
github QuantEcon / QuantEcon.py / examples / robust_monopolist.py View on Github external
for theta in thetas:
        df.ix[theta] = evaluate_policy(theta, F)
        if df.ix[theta, 'entropy'] >= emax:
            break

    df = df.dropna(how='any')
    return df


#-----------------------------------------------------------------------------#
#                                    Main
#-----------------------------------------------------------------------------#


# == Compute the optimal rule == #
optimal_lq = qe.lqcontrol.LQ(Q, R, A, B, C, beta)
Po, Fo, do = optimal_lq.stationary_values()

# == Compute a robust rule given theta == #
baseline_robust = qe.robustlq.RBLQ(Q, R, A, B, C, beta, theta)
Fb, Kb, Pb = baseline_robust.robust_rule()

# == Check the positive definiteness of worst-case covariance matrix to == #
# == ensure that theta exceeds the breakdown point == #
test_matrix = np.identity(Pb.shape[0]) - np.dot(C.T, Pb.dot(C)) / theta
eigenvals, eigenvecs = eig(test_matrix)
assert (eigenvals >= 0).all(), 'theta below breakdown point.'


emax = 1.6e6

optimal_best_case = value_and_entropy(emax, Fo, 'best')
github QuantEcon / QuantEcon.py / quantecon / robustlq.py View on Github external
Solution method used in solving the associated Riccati
            equation, str in {'doubling', 'qz'}.

        Returns
        -------
        K : array_like(float, ndim=2)
            Agent's best cost minimizing response for a given F
        P : array_like(float, ndim=2)
            The value function for a given F

        """
        Q2 = self.beta * self.theta
        R2 = - self.R - dot(F.T, dot(self.Q, F))
        A2 = self.A - dot(self.B, F)
        B2 = self.C
        lq = LQ(Q2, R2, A2, B2, beta=self.beta)
        neg_P, neg_K, d = lq.stationary_values(method=method)

        return -neg_K, -neg_P
github QuantEcon / QuantEcon.py / quantecon / robustlq.py View on Github external
function
        K : array_like(float, ndim=2)
            the worst-case shock matrix K, where
            :math:`w_{t+1} = K x_t` is the worst case shock

        """
        # == Simplify names == #
        A, B, C, Q, R = self.A, self.B, self.C, self.Q, self.R
        beta, theta = self.beta, self.theta
        k, j = self.k, self.j
        # == Set up LQ version == #
        I = identity(j)
        Z = np.zeros((k, j))

        if self.pure_forecasting:
            lq = LQ(-beta*I*theta, R, A, C, beta=beta)

            # == Solve and convert back to robust problem == #
            P, f, d = lq.stationary_values(method=method)
            F = np.zeros((self.k, self.n))
            K = -f[:k, :]

        else:
            Ba = hstack([B, C])
            Qa = vstack([hstack([Q, Z]), hstack([Z.T, -beta*I*theta])])
            lq = LQ(Qa, R, A, Ba, beta=beta)

            # == Solve and convert back to robust problem == #
            P, f, d = lq.stationary_values(method=method)
            F = f[:k, :]
            K = -f[k:f.shape[0], :]
github QuantEcon / QuantEcon.lectures.code / robustness / robust_monopolist.py View on Github external
for θ in θs:
        df.loc[θ] = evaluate_policy(θ, F)
        if df.loc[θ, 'entropy'] >= emax:
            break

    df = df.dropna(how='any')
    return df


# -------------------------------------------------------------------------- #
#                                    Main
# -------------------------------------------------------------------------- #


# == Compute the optimal rule == #
optimal_lq = qe.lqcontrol.LQ(Q, R, A, B, C, β)
Po, Fo, do = optimal_lq.stationary_values()

# == Compute a robust rule given θ == #
baseline_robust = qe.robustlq.RBLQ(Q, R, A, B, C, β, θ)
Fb, Kb, Pb = baseline_robust.robust_rule()

# == Check the positive definiteness of worst-case covariance matrix to == #
# == ensure that θ exceeds the breakdown point == #
test_matrix = np.identity(Pb.shape[0]) - (C.T @ Pb @ C) / θ
eigenvals, eigenvecs = eig(test_matrix)
assert (eigenvals >= 0).all(), 'θ below breakdown point.'


emax = 1.6e6

optimal_best_case = value_and_entropy(emax, Fo, 'best')
github QuantEcon / QuantEcon.py / solutions / stand_alone_programs / solution_ree_ex1.py View on Github external
# == Beliefs == #

kappa0  = 95.5
kappa1  = 0.95

# == Formulate the LQ problem == #

A = np.array([[1, 0, 0], [0, kappa1, kappa0], [0, 0, 1]])
B = np.array([1, 0, 0])
B.shape = 3, 1
R = np.array([[0, -a1/2, a0/2], [-a1/2, 0, 0], [a0/2, 0, 0]])
Q = -0.5 * gamma

# == Solve for the optimal policy == #

lq = LQ(Q, R, A, B, beta=beta)
P, F, d = lq.stationary_values()
F = F.flatten()
out1 = "F = [{0:.3f}, {1:.3f}, {2:.3f}]".format(F[0], F[1], F[2])
h0, h1, h2 = -F[2], 1 - F[0], -F[1]
out2 = "(h0, h1, h2) = ({0:.3f}, {1:.3f}, {2:.3f})".format(h0, h1, h2)

print(out1)
print(out2)