Skip to content

Commit

Permalink
Merge pull request PSLmodels#84 from talumbau/triv_test
Browse files Browse the repository at this point in the history
modify package to allow run of trivial test
  • Loading branch information
talumbau committed Jul 13, 2015
2 parents 4cf1cf8 + 3ae892a commit 231096e
Show file tree
Hide file tree
Showing 9 changed files with 314 additions and 93 deletions.
53 changes: 32 additions & 21 deletions Python/dynamic/SS.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,9 @@
------------------------------------------------------------------------
'''

from dynamic.parameters import *
from .parameters import get_parameters
globals().update(get_parameters())
from .parameters import DATASET

'''
------------------------------------------------------------------------
Expand Down Expand Up @@ -248,7 +250,12 @@ def function_to_minimize(chi_params_scalars, chi_params_init, params, iterative_
# labor calibration euler
lab_data_dict = pickle.load(open("OUTPUT/Saved_moments/labor_data_moments.pkl", "r"))
labor_sim = (n_new.reshape(S, J)*lambdas.reshape(1, J)).sum(axis=1)
error6 = list(utils.perc_dif_func(labor_sim, lab_data_dict['labor_dist_data']))
if DATASET == 'SMALL':
lab_dist_data = lab_data_dict['labor_dist_data'][:S]
else:
lab_dist_data = lab_data_dict['labor_dist_data']

error6 = list(utils.perc_dif_func(labor_sim, lab_dist_data))
# combine eulers
output = np.array(error5 + error6)
# Constraints
Expand Down Expand Up @@ -284,23 +291,28 @@ def run_steady_state(ss_parameters, iterative_params, get_baseline=False):
if get_baseline:
# Generate initial guesses for chi^b_j and chi^n_s
chi_params = np.zeros(S+J)
chi_params[0:J] = np.array([2, 10, 90, 350, 1700, 22000, 120000])
chi_n_guess = np.array([47.12000874 , 22.22762421 , 14.34842241 , 10.67954008 , 8.41097278
, 7.15059004 , 6.46771332 , 5.85495452 , 5.46242013 , 5.00364263
, 4.57322063 , 4.53371545 , 4.29828515 , 4.10144524 , 3.8617942 , 3.57282
, 3.47473172 , 3.31111347 , 3.04137299 , 2.92616951 , 2.58517969
, 2.48761429 , 2.21744847 , 1.9577682 , 1.66931057 , 1.6878927
, 1.63107201 , 1.63390543 , 1.5901486 , 1.58143606 , 1.58005578
, 1.59073213 , 1.60190899 , 1.60001831 , 1.67763741 , 1.70451784
, 1.85430468 , 1.97291208 , 1.97017228 , 2.25518398 , 2.43969757
, 3.21870602 , 4.18334822 , 4.97772026 , 6.37663164 , 8.65075992
, 9.46944758 , 10.51634777 , 12.13353793 , 11.89186997 , 12.07083882
, 13.2992811 , 14.07987878 , 14.19951571 , 14.97943562 , 16.05601334
, 16.42979341 , 16.91576867 , 17.62775142 , 18.4885405 , 19.10609921
, 20.03988031 , 20.86564363 , 21.73645892 , 22.6208256 , 23.37786072
, 24.38166073 , 25.22395387 , 26.21419653 , 27.05246704 , 27.86896121
, 28.90029708 , 29.83586775 , 30.87563699 , 31.91207845 , 33.07449767
, 34.27919965 , 35.57195873 , 36.95045988 , 38.62308152])
if DATASET == 'REAL':
chi_params[0:J] = np.array([2, 10, 90, 350, 1700, 22000, 120000])
chi_n_guess = np.array([47.12000874 , 22.22762421 , 14.34842241 , 10.67954008 , 8.41097278
, 7.15059004 , 6.46771332 , 5.85495452 , 5.46242013 , 5.00364263
, 4.57322063 , 4.53371545 , 4.29828515 , 4.10144524 , 3.8617942 , 3.57282
, 3.47473172 , 3.31111347 , 3.04137299 , 2.92616951 , 2.58517969
, 2.48761429 , 2.21744847 , 1.9577682 , 1.66931057 , 1.6878927
, 1.63107201 , 1.63390543 , 1.5901486 , 1.58143606 , 1.58005578
, 1.59073213 , 1.60190899 , 1.60001831 , 1.67763741 , 1.70451784
, 1.85430468 , 1.97291208 , 1.97017228 , 2.25518398 , 2.43969757
, 3.21870602 , 4.18334822 , 4.97772026 , 6.37663164 , 8.65075992
, 9.46944758 , 10.51634777 , 12.13353793 , 11.89186997 , 12.07083882
, 13.2992811 , 14.07987878 , 14.19951571 , 14.97943562 , 16.05601334
, 16.42979341 , 16.91576867 , 17.62775142 , 18.4885405 , 19.10609921
, 20.03988031 , 20.86564363 , 21.73645892 , 22.6208256 , 23.37786072
, 24.38166073 , 25.22395387 , 26.21419653 , 27.05246704 , 27.86896121
, 28.90029708 , 29.83586775 , 30.87563699 , 31.91207845 , 33.07449767
, 34.27919965 , 35.57195873 , 36.95045988 , 38.62308152])
elif DATASET == 'SMALL':
chi_params[0:J] = np.array([1, 100000])
chi_n_guess = np.array([5, 6, 7, 8, 9, 10, 11, 12, 13, 14])

chi_params[J:] = chi_n_guess
chi_params = list(chi_params)
# First run SS simulation with guesses at initial values for b, n, w, r, etc
Expand All @@ -322,8 +334,7 @@ def run_steady_state(ss_parameters, iterative_params, get_baseline=False):
# minimizer peturbs that value by 1e-8, the % difference will be extremely small, outside of the tolerance of the
# minimizer, and it will not change that parameter.
chi_params_scalars = np.ones(S+J)
chi_params_scalars = opt.minimize(function_to_minimize_X, chi_params_scalars, method='TNC', tol=1e-14, bounds=bnds, options={'maxiter': 1}).x
# chi_params_scalars = opt.minimize(function_to_minimize_X, chi_params_scalars, method='TNC', tol=1e-14, bounds=bnds).x
chi_params_scalars = opt.minimize(function_to_minimize_X, chi_params_scalars, method='TNC', tol=MINIMIZER_TOL, bounds=bnds, options={'maxiter': 1}).x
chi_params *= chi_params_scalars
print 'The final scaling params', chi_params_scalars
print 'The final bequest parameter values:', chi_params
Expand Down
16 changes: 9 additions & 7 deletions Python/dynamic/TPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,8 @@
------------------------------------------------------------------------
'''

from dynamic.parameters import *
from .parameters import get_parameters
globals().update(get_parameters())

def create_tpi_params(a_tax_income, b_tax_income, c_tax_income,
d_tax_income,
Expand Down Expand Up @@ -301,12 +302,13 @@ def run_time_path_iteration(Kss, Lss, Yss, BQss, theta, income_tax_params, wealt
Kpath_TPI = list(Kinit) + list(np.ones(10)*Kss)
Lpath_TPI = list(Linit) + list(np.ones(10)*Lss)
# Plot TPI for K for each iteration, so we can see if there is a problem
plt.figure()
plt.axhline(
y=Kss, color='black', linewidth=2, label=r"Steady State $\hat{K}$", ls='--')
plt.plot(np.arange(
T+10), Kpath_TPI[:T+10], 'b', linewidth=2, label=r"TPI time path $\hat{K}_t$")
plt.savefig("OUTPUT/TPI_K")
if PLOT_TPI == True:
plt.figure()
plt.axhline(
y=Kss, color='black', linewidth=2, label=r"Steady State $\hat{K}$", ls='--')
plt.plot(np.arange(
T+10), Kpath_TPI[:T+10], 'b', linewidth=2, label=r"TPI time path $\hat{K}_t$")
plt.savefig("OUTPUT/TPI_K")
# Uncomment the following print statements to make sure all euler equations are converging
for j in xrange(J):
b_mat[1, -1, j], n_mat[0, -1, j] = np.array(opt.fsolve(SS_TPI_firstdoughnutring, [guesses_b[1, -1, j], guesses_n[0, -1, j]],
Expand Down
3 changes: 1 addition & 2 deletions Python/dynamic/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,2 @@
import wealth
import dynamic
import parameters
import wealth
8 changes: 6 additions & 2 deletions Python/dynamic/household.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,12 @@ def marg_ut_labor(n, chi_n, params):
Returns: Marginal Utility of Labor
'''
J, S, T, beta, sigma, alpha, Z, delta, ltilde, nu, g_y, tau_payroll, retire, mean_income_data, a_tax_income, b_tax_income, c_tax_income, d_tax_income, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon = params
deriv = b_ellipse * (1/ltilde) * ((1 - (n / ltilde) ** upsilon) ** (
(1/upsilon)-1)) * (n / ltilde) ** (upsilon - 1)
try:
deriv = b_ellipse * (1/ltilde) * ((1 - (n / ltilde) ** upsilon) ** (
(1/upsilon)-1)) * (n / ltilde) ** (upsilon - 1)
except ValueError as ve:
deriv = 1e-5

output = chi_n * deriv
return output

Expand Down
1 change: 0 additions & 1 deletion Python/dynamic/income.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,4 +190,3 @@ def get_e(S, J, starting_age, ending_age, bin_weights, omega_SS, flag_graphs):
graph_income(S, J, e_final, starting_age, ending_age, bin_weights)
e_final /= (e_final * omega_SS).sum()
return e_final

180 changes: 128 additions & 52 deletions Python/dynamic/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,55 +2,131 @@
from demographics import get_omega
from income import get_e

# Parameters
S = 80
J = 7
T = int(2 * S)
lambdas = np.array([.25, .25, .2, .1, .1, .09, .01])
starting_age = 20
ending_age = 100
E = int(starting_age * (S / float(ending_age-starting_age)))
beta_annual = .96
beta = beta_annual ** (float(ending_age-starting_age) / S)
sigma = 3.0
alpha = .35
Z = 1.0
delta_annual = .05
delta = 1 - ((1-delta_annual) ** (float(ending_age-starting_age) / S))
ltilde = 1.0
g_y_annual = 0.03
g_y = (1 + g_y_annual)**(float(ending_age-starting_age)/S) - 1
# TPI parameters
maxiter = 250
mindist_SS = 1e-9
mindist_TPI = 1e-6
nu = .40
# Ellipse parameters
b_ellipse = 25.6594
k_ellipse = -26.4902
upsilon = 3.0542
# Tax parameters:
mean_income_data = 84377.0
a_tax_income = 3.03452713268985e-06
b_tax_income = .222
c_tax_income = 133261.0
d_tax_income = .219
retire = np.round(9.0 * S / 16.0) - 1
# Wealth tax params
# These won't be used for the wealth tax, h and m just need
# need to be nonzero to avoid errors
h_wealth = 0.1
m_wealth = 1.0
p_wealth = 0.0
# Tax parameters that are zeroed out for SS
# Initial taxes below
tau_bq = np.zeros(J)
tau_payroll = 0.15
# Flag to prevent graphing from occuring in demographic, income, wealth, and labor files
flag_graphs = False
# Generate Income and Demographic parameters
omega, g_n, omega_SS, surv_rate = get_omega(
S, J, T, lambdas, starting_age, ending_age, E, flag_graphs)
e = get_e(S, J, starting_age, ending_age, lambdas, omega_SS, flag_graphs)
rho = 1-surv_rate
rho[-1] = 1.0
DATASET = 'REAL'

def get_parameters():
if DATASET == 'REAL':
return get_full_parameters()
elif DATASET == 'SMALL':
return get_reduced_parameters()
else:
raise ValueError("Unknown value {0}".format(DATASET))


def get_reduced_parameters():
# Parameters
MINIMIZER_TOL = 1e-3
PLOT_TPI = False
S = 10
J = 2
T = int(2 * S)
#lambdas = np.array([.25, .25, .2, .1, .1, .09, .01])
lambdas = np.array([.50, .50])
starting_age = 40
ending_age = 50
E = int(starting_age * (S / float(ending_age-starting_age)))
beta_annual = .96
beta = beta_annual ** (float(ending_age-starting_age) / S)
sigma = 3.0
alpha = .35
Z = 1.0
delta_annual = .05
delta = 1 - ((1-delta_annual) ** (float(ending_age-starting_age) / S))
ltilde = 1.0
g_y_annual = 0.03
g_y = (1 + g_y_annual)**(float(ending_age-starting_age)/S) - 1
# TPI parameters
maxiter = 10
mindist_SS = 1e-3
mindist_TPI = 1e-6
nu = .40
# Ellipse parameters
b_ellipse = 25.6594
k_ellipse = -26.4902
upsilon = 3.0542
# Tax parameters:
mean_income_data = 84377.0
a_tax_income = 3.03452713268985e-06
b_tax_income = .222
c_tax_income = 133261.0
d_tax_income = .219
retire = np.round(9.0 * S / 16.0) - 1
# Wealth tax params
# These won't be used for the wealth tax, h and m just need
# need to be nonzero to avoid errors
h_wealth = 0.1
m_wealth = 1.0
p_wealth = 0.0
# Tax parameters that are zeroed out for SS
# Initial taxes below
tau_bq = np.zeros(J)
tau_payroll = 0.15
# Flag to prevent graphing from occuring in demographic, income, wealth, and labor files
flag_graphs = False
# Generate Income and Demographic parameters
omega, g_n, omega_SS, surv_rate = get_omega(
S, J, T, lambdas, starting_age, ending_age, E, flag_graphs)
e = np.array([[0.25, 1.25]] * 10)
rho = 1-surv_rate
rho[-1] = 1.0
allvars = dict(locals())
return allvars

def get_full_parameters():
# Parameters
MINIMIZER_TOL = 1e-14
PLOT_TPI = True
S = 80
J = 7
T = int(2 * S)
lambdas = np.array([.25, .25, .2, .1, .1, .09, .01])
starting_age = 20
ending_age = 100
E = int(starting_age * (S / float(ending_age-starting_age)))
beta_annual = .96
beta = beta_annual ** (float(ending_age-starting_age) / S)
sigma = 3.0
alpha = .35
Z = 1.0
delta_annual = .05
delta = 1 - ((1-delta_annual) ** (float(ending_age-starting_age) / S))
ltilde = 1.0
g_y_annual = 0.03
g_y = (1 + g_y_annual)**(float(ending_age-starting_age)/S) - 1
# TPI parameters
maxiter = 250
mindist_SS = 1e-9
mindist_TPI = 1e-6
nu = .40
# Ellipse parameters
b_ellipse = 25.6594
k_ellipse = -26.4902
upsilon = 3.0542
# Tax parameters:
mean_income_data = 84377.0
a_tax_income = 3.03452713268985e-06
b_tax_income = .222
c_tax_income = 133261.0
d_tax_income = .219
retire = np.round(9.0 * S / 16.0) - 1
# Wealth tax params
# These won't be used for the wealth tax, h and m just need
# need to be nonzero to avoid errors
h_wealth = 0.1
m_wealth = 1.0
p_wealth = 0.0
# Tax parameters that are zeroed out for SS
# Initial taxes below
tau_bq = np.zeros(J)
tau_payroll = 0.15
# Flag to prevent graphing from occuring in demographic, income, wealth, and labor files
flag_graphs = False
# Generate Income and Demographic parameters
omega, g_n, omega_SS, surv_rate = get_omega(
S, J, T, lambdas, starting_age, ending_age, E, flag_graphs)
e = get_e(S, J, starting_age, ending_age, lambdas, omega_SS, flag_graphs)
rho = 1-surv_rate
rho[-1] = 1.0
allvars = dict(locals())
return allvars

3 changes: 3 additions & 0 deletions Python/dynamic/tests/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,6 @@
def test_import_ok():
import dynamic

def test_run_small():
from run_small import runner
runner()
15 changes: 7 additions & 8 deletions Python/run_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,18 @@
------------------------------------------------------------------------
'''

import numpy as np
import cPickle as pickle
import os
from glob import glob
from subprocess import call
from dynamic import parameters, wealth, labor, demographics, income, SS, TPI
import numpy as np

#import income_polynomials as income
#import demographics
#import wealth_data
#import labor_data
import dynamic
dynamic.parameters.DATASET = 'REAL'

import dynamic.SS
import dynamic.TPI
from dynamic import parameters, wealth, labor, demographics, income, SS, TPI

'''
------------------------------------------------------------------------
Expand Down Expand Up @@ -88,14 +88,13 @@
------------------------------------------------------------------------
'''

from dynamic.parameters import *
globals().update(dynamic.parameters.get_parameters())

# Flag to prevent graphing from occuring in demographic, income, wealth, and labor files
flag_graphs = False
# Generate Income and Demographic parameters
omega, g_n, omega_SS, surv_rate = demographics.get_omega(S, J, T, lambdas, starting_age, ending_age, E, flag_graphs)
# S, J, T, lambdas, starting_age, ending_age, E, flag_graphs)
#e = income.get_e()
e = income.get_e(S, J, starting_age, ending_age, lambdas, omega_SS, flag_graphs)
rho = 1-surv_rate
rho[-1] = 1.0
Expand Down
Loading

0 comments on commit 231096e

Please sign in to comment.