Skip to content

Commit

Permalink
Merge pull request #401 from jdebacker/test_demog
Browse files Browse the repository at this point in the history
Tests for demographics.py
  • Loading branch information
jdebacker authored Nov 8, 2018
2 parents 36177c1 + 8543da9 commit 8e7110f
Show file tree
Hide file tree
Showing 9 changed files with 88 additions and 31 deletions.
4 changes: 2 additions & 2 deletions ogusa/SS.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ def inner_loop(outer_loop_vars, params, baseline,
args=euler_params,
xtol=MINIMIZER_TOL,
full_output=True))
results = compute(*lazy_values, get=dask.multiprocessing.get,
results = compute(*lazy_values, scheduler=dask.multiprocessing.get,
num_workers=num_workers)

# for j, result in results.items():
Expand Down Expand Up @@ -1105,7 +1105,7 @@ def run_SS(income_tax_params, ss_params, iterative_params, chi_params,
# For initial guesses of w, r, T_H, and factor, we use values that
# are close to some steady state values.
if baseline:
rguess = 0.04 # 0.01 + delta
rguess = 0.06 # 0.01 + delta
# wguess = 1.2
T_Hguess = 0.12
factorguess = 70000
Expand Down
4 changes: 2 additions & 2 deletions ogusa/TPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -733,7 +733,7 @@ def run_TPI(income_tax_params, tpi_params, iterative_params,
lazy_values.append(
delayed(inner_loop)(guesses, outer_loop_vars,
inner_loop_params, j))
results = compute(*lazy_values, get=dask.multiprocessing.get,
results = compute(*lazy_values, scheduler=dask.multiprocessing.get,
num_workers=num_workers)
for j, result in enumerate(results):
euler_errors[:, :, j], b_mat[:, :, j], n_mat[:, :, j] = result
Expand Down Expand Up @@ -924,7 +924,7 @@ def run_TPI(income_tax_params, tpi_params, iterative_params,
lazy_values.append(
delayed(inner_loop)(guesses, outer_loop_vars,
inner_loop_params, j))
results = compute(*lazy_values, get=dask.multiprocessing.get,
results = compute(*lazy_values, scheduler=dask.multiprocessing.get,
num_workers=num_workers)
for j, result in enumerate(results):
euler_errors[:, :, j], b_mat[:, :, j], n_mat[:, :, j] = result
Expand Down
2 changes: 1 addition & 1 deletion ogusa/get_micro_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def get_data(baseline=False, start_year=DEFAULT_START_YEAR, reform={},
for year in range(start_year + 1, TC_LAST_YEAR + 1):
lazy_values.append(
delayed(taxcalc_advance)(calc1, year, length))
results = compute(*lazy_values, get=dask.multiprocessing.get,
results = compute(*lazy_values, scheduler=dask.multiprocessing.get,
num_workers=num_workers)
# for i, result in results.items():
for i, result in enumerate(results):
Expand Down
10 changes: 6 additions & 4 deletions ogusa/macro_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
except ImportError:
import pickle
import os
from ogusa.utils import safe_read_pickle


def dump_diff_output(baseline_dir, policy_dir):
'''
Expand Down Expand Up @@ -72,9 +74,9 @@ def dump_diff_output(baseline_dir, policy_dir):
if not os.path.exists(tpi_policy_dir):
os.mkdir(tpi_policy_dir)
tpi_macro_vars_policy_path = os.path.join(tpi_policy_dir, "TPI_vars.pkl")
tpi_macro_vars_policy = pickle.load(open(tpi_macro_vars_policy_path, "rb"))
tpi_macro_vars_policy = safe_read_pickle(tpi_macro_vars_policy_path)
tpi_macro_vars_baseline_path = os.path.join(tpi_baseline_dir, "TPI_vars.pkl")
tpi_macro_vars_baseline = pickle.load(open(tpi_macro_vars_baseline_path, "rb"))
tpi_macro_vars_baseline = safe_read_pickle(tpi_macro_vars_baseline_path)

T = len(tpi_macro_vars_baseline['C'])
baseline_macros = np.zeros((7,T))
Expand Down Expand Up @@ -103,9 +105,9 @@ def dump_diff_output(baseline_dir, policy_dir):

## Load SS results
ss_policy_path = os.path.join(policy_dir, "SS", "SS_vars.pkl")
ss_policy = pickle.load(open( ss_policy_path, "rb" ))
ss_policy = safe_read_pickle(ss_policy_path)
ss_baseline_path = os.path.join(baseline_dir, "SS", "SS_vars.pkl")
ss_baseline = pickle.load(open( ss_baseline_path, "rb" ) )
ss_baseline = safe_read_pickle( ss_baseline_path)
# pct changes in macro aggregates in SS
pct_changes[0,11] = (ss_policy['Yss']-ss_baseline['Yss'])/ss_baseline['Yss']
pct_changes[1,11] = (ss_policy['Css']-ss_baseline['Css'])/ss_baseline['Css']
Expand Down
2 changes: 1 addition & 1 deletion ogusa/scripts/execute.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@


def runner(output_base, baseline_dir, test=False, time_path=True,
baseline=False, constant_rates=True, tax_func_type='DEP',
baseline=False, constant_rates=False, tax_func_type='DEP',
analytical_mtrs=False, age_specific=False, reform={},
user_params={}, guid='', run_micro=True, small_open=False,
budget_balance=False, baseline_spending=False, data=None,
Expand Down
54 changes: 54 additions & 0 deletions ogusa/tests/test_demographics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import numpy as np
import pytest
from ogusa import demographics


def test_get_pop_objs():
"""
Test of the that omega_SS and the last period of omega_path_S are
close to each other.
"""
E = 20
S = 80
T = int(round(4.0 * S))
start_year = 2018

(omega, g_n_ss, omega_SS, surv_rate, rho, g_n_vector, imm_rates,
omega_S_preTP) = demographics.get_pop_objs(E, S, T, 1, 100,
start_year, False)

assert (np.allclose(omega_SS, omega[-1, :]))


def test_pop_smooth():
"""
Test that population growth rates evolve smoothly.
"""
E = 20
S = 80
T = int(round(4.0 * S))
start_year = 2018

(omega, g_n_ss, omega_SS, surv_rate, rho, g_n_vector, imm_rates,
omega_S_preTP) = demographics.get_pop_objs(E, S, T, 1, 100,
start_year, False)

assert (np.any(np.absolute(omega[:-1, :] - omega[1:, :]) < 0.0001))
assert (np.any(np.absolute(g_n_vector[:-1] - g_n_vector[1:]) < 0.0001))


def test_imm_smooth():
"""
Test that population growth rates evolve smoothly.
"""
E = 20
S = 80
T = int(round(4.0 * S))
start_year = 2018

(omega, g_n_ss, omega_SS, surv_rate, rho, g_n_vector, imm_rates,
omega_S_preTP) = demographics.get_pop_objs(E, S, T, 1, 100,
start_year, False)

assert (np.any(np.absolute(imm_rates[:-1, :] - imm_rates[1:, :]) <
0.0001))
2 changes: 1 addition & 1 deletion ogusa/txfunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -1555,7 +1555,7 @@ def tax_func_estimate(BW, S, starting_age, ending_age,
analytical_mtrs, desc_data,
graph_data, graph_est, output_dir,
numparams, tpers))
results = compute(*lazy_values, get=dask.multiprocessing.get,
results = compute(*lazy_values, scheduler=dask.multiprocessing.get,
num_workers=num_workers)

# for i, result in results.items():
Expand Down
27 changes: 14 additions & 13 deletions regression/run_reg_reforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,15 @@ def run_micro_macro(user_params, reform=None, baseline_dir=BASELINE_DIR,
------------------------------------------------------------------------
'''
print('path exists', not os.path.exists(baseline_dir), ok_to_run_baseline)
if not os.path.exists(baseline_dir) and ok_to_run_baseline:
# if not os.path.exists(baseline_dir) and ok_to_run_baseline:
if ok_to_run_baseline:
output_base = baseline_dir
input_dir = baseline_dir
kwargs={'output_base':baseline_dir, 'baseline_dir':baseline_dir,
'test':False, 'time_path':True, 'baseline':True,
'analytical_mtrs':False, 'age_specific':True,
'user_params':user_params,'guid':'baseline',
'run_micro':True, 'small_open': False, 'budget_balance':False,
'run_micro':False, 'small_open': False, 'budget_balance':False,
'baseline_spending':False, 'data': data}
#p1 = Process(target=runner, kwargs=kwargs)
#p1.start()
Expand All @@ -143,21 +144,21 @@ def run_micro_macro(user_params, reform=None, baseline_dir=BASELINE_DIR,

ans = postprocess.create_diff(baseline_dir=baseline_dir, policy_dir=reform_dir)

print "total time was ", (time.time() - start_time)
print 'Percentage changes in aggregates:', ans
print("total time was ", (time.time() - start_time))
print('Percentage changes in aggregates:', ans)


def run_reforms(ref_idxs=REF_IDXS, path_prefix="", cpu_count=CPU_COUNT,
data=DATA):
# make sure we have a baseline result before other reforms are run
# ok_to_run_baseline = True
# run_micro_macro({},
# reforms[0],
# "./{0}OUTPUT_BASELINE".format(path_prefix),
# "./{0}OUTPUT_REFORM_{1}".format(path_prefix, 0),
# str(0),
# data,
# ok_to_run_baseline,)
ok_to_run_baseline = True
run_micro_macro({},
reforms[0],
"./{0}OUTPUT_BASELINE".format(path_prefix),
"./{0}OUTPUT_REFORM_{1}".format(path_prefix, 0),
str(0),
data,
ok_to_run_baseline,)
# run reforms in parallel
pool = Pool(processes=cpu_count)
# results = []
Expand Down Expand Up @@ -185,7 +186,7 @@ def run_reforms(ref_idxs=REF_IDXS, path_prefix="", cpu_count=CPU_COUNT,
results = []

ok_to_run_baseline = False
for i in range(0, len(reforms)):
for i in range(1, len(reforms)):
args = ({},
reforms[i],
"./{0}OUTPUT_BASELINE".format(path_prefix),
Expand Down
14 changes: 7 additions & 7 deletions regression/test_regression.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from ogusa.macro_output import dump_diff_output
from ogusa.utils import safe_read_pickle
import matplotlib.pyplot as plt
import numpy as np
import pickle
Expand All @@ -9,7 +10,8 @@
CURDIR = os.path.abspath(os.path.dirname(__file__))
REG_BASELINE = os.path.join(CURDIR, 'regression_results/REG_OUTPUT_BASELINE')
REG_REFORM = os.path.join(CURDIR, 'regression_results/REG_OUTPUT_REFORM_{ref_idx}')
REF_IDXS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# REF_IDXS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
REF_IDXS = [0, 1, 2, 3, 4, 5, 6, 7]

BASELINE = os.path.join(CURDIR, 'OUTPUT_BASELINE')
REFORM = os.path.join(CURDIR, 'OUTPUT_REFORM_{ref_idx}')
Expand Down Expand Up @@ -63,8 +65,7 @@ def test_macro_output(macro_outputs, macro_var_idx):
@pytest.fixture(scope="module", params=REF_IDXS + ["baseline"])
def tpi_output(request):
def get_tpi_output(path):
with open(path + "/TPI/TPI_vars.pkl", 'rb') as f:
return pickle.load(f)
return safe_read_pickle(path + "/TPI/TPI_vars.pkl")

ref_idx = request.param
if ref_idx == "baseline":
Expand Down Expand Up @@ -97,8 +98,7 @@ def test_tpi_vars(tpi_output, tpi_var):
@pytest.fixture(scope="module", params=REF_IDXS + ["baseline"])
def txfunc_output(request):
def get_txfunc_output(path):
with open(path, 'rb') as f:
return pickle.load(f)
return safe_read_pickle(path)

ref_idx = request.param
if ref_idx == "baseline":
Expand All @@ -109,9 +109,9 @@ def get_txfunc_output(path):

else:
reg_path = (REG_REFORM.format(ref_idx=ref_idx) +
"/TxFuncEst_{idx}.pkl".format(idx=ref_idx))
"/TxFuncEst_policy{idx}.pkl".format(idx=ref_idx))
path = (REFORM.format(ref_idx=ref_idx) +
"/TxFuncEst_{idx}.pkl".format(idx=ref_idx))
"/TxFuncEst_policy{idx}.pkl".format(idx=ref_idx))

return (get_txfunc_output(reg_path), get_txfunc_output(path))

Expand Down

0 comments on commit 8e7110f

Please sign in to comment.