Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""
Simulate job / career paths and compute the waiting time to permanent job /
career. In reading the code, recall that optimal_policy[i, j] = policy at
(theta_i, epsilon_j) = either 1, 2 or 3; meaning 'stay put', 'new job' and
'new life'.
"""
import matplotlib.pyplot as plt
import numpy as np
from quantecon import DiscreteRV, compute_fixed_point, CareerWorkerProblem
wp = CareerWorkerProblem()
v_init = np.ones((wp.N, wp.N))*100
v = compute_fixed_point(wp.bellman,v_init)
optimal_policy = wp.get_greedy(v)
F = DiscreteRV(wp.F_probs)
G = DiscreteRV(wp.G_probs)
def gen_path(T=20):
i = j = 0
theta_index = []
epsilon_index = []
for t in range(T):
if optimal_policy[i, j] == 1: # Stay put
pass
elif optimal_policy[i, j] == 2: # New job
j = int(G.draw())
else: # New life
i, j = int(F.draw()), int(G.draw())
theta_index.append(i)
epsilon_index.append(j)
return wp.theta[theta_index], wp.epsilon[epsilon_index]
import matplotlib.pyplot as plt
import numpy as np
from quantecon import DiscreteRV, CareerWorkerProblem
from quantecon import compute_fixed_point
wp = CareerWorkerProblem()
v_init = np.ones((wp.N, wp.N))*100
v = compute_fixed_point(wp.bellman, v_init)
optimal_policy = wp.get_greedy(v)
F = DiscreteRV(wp.F_probs)
G = DiscreteRV(wp.G_probs)
def gen_first_passage_time():
t = 0
i = j = 0
theta_index = []
epsilon_index = []
while 1:
if optimal_policy[i, j] == 1: # Stay put
return t
elif optimal_policy[i, j] == 2: # New job
j = int(G.draw())
else: # New life
i, j = int(F.draw()), int(G.draw())
t += 1
M = 25000 # Number of samples
def simulate(self, g1, l1, T):
"""
Given a policy for consumption (g1) and a policy for continuation
values (l1) simulate for T periods.
"""
# Pull out information from class
ybar, pi_y = self.ybar, self.pi_y
ns = self.ybar.size
# Draw random simulation of iid income realizations
d = qe.DiscreteRV(pi_y)
y_indexes = d.draw(T)
# Draw appropriate indexes for policy.
# We do this by making sure that the indexes are weakly increasing
# by changing any values less than the previous max to the previous max
pol_indexes = np.empty(T, dtype=int)
fix_indexes(ns, T, y_indexes, pol_indexes)
# Pull off consumption and continuation value sequences
c = g1[pol_indexes]
w = l1[pol_indexes]
y = ybar[y_indexes]
return c, w, y
import matplotlib.pyplot as plt
import numpy as np
from quantecon import DiscreteRV, CareerWorkerProblem
from quantecon import compute_fixed_point
wp = CareerWorkerProblem()
v_init = np.ones((wp.N, wp.N))*100
v = compute_fixed_point(wp.bellman, v_init)
optimal_policy = wp.get_greedy(v)
F = DiscreteRV(wp.F_probs)
G = DiscreteRV(wp.G_probs)
def gen_first_passage_time():
t = 0
i = j = 0
theta_index = []
epsilon_index = []
while 1:
if optimal_policy[i, j] == 1: # Stay put
return t
elif optimal_policy[i, j] == 2: # New job
j = int(G.draw())
else: # New life
i, j = int(F.draw()), int(G.draw())
t += 1
"""
Simulate job / career paths and compute the waiting time to permanent job /
career. In reading the code, recall that optimal_policy[i, j] = policy at
(theta_i, epsilon_j) = either 1, 2 or 3; meaning 'stay put', 'new job' and
'new life'.
"""
import matplotlib.pyplot as plt
import numpy as np
from quantecon import DiscreteRV, compute_fixed_point, CareerWorkerProblem
wp = CareerWorkerProblem()
v_init = np.ones((wp.N, wp.N))*100
v = compute_fixed_point(wp.bellman,v_init)
optimal_policy = wp.get_greedy(v)
F = DiscreteRV(wp.F_probs)
G = DiscreteRV(wp.G_probs)
def gen_path(T=20):
i = j = 0
theta_index = []
epsilon_index = []
for t in range(T):
if optimal_policy[i, j] == 1: # Stay put
pass
elif optimal_policy[i, j] == 2: # New job
j = int(G.draw())
else: # New life
i, j = int(F.draw()), int(G.draw())
theta_index.append(i)
epsilon_index.append(j)
return wp.theta[theta_index], wp.epsilon[epsilon_index]