From 4f644bf1a22362ae5fd1b20570ecad4074456edb Mon Sep 17 00:00:00 2001 From: Mridul Seth Date: Sat, 5 Aug 2023 17:09:33 +0400 Subject: [PATCH] Remove jupytext sync .py files from examples (#1335) --- examples/Calibration/Income_calibrations.py | 98 --- examples/Calibration/Life_Cycle_example.py | 118 --- examples/Calibration/SCF_distributions.py | 59 -- .../Sabelhaus_Song_var_profiles.py | 72 -- examples/Calibration/US_SSA_life_tables.py | 73 -- .../example_AccidentalBequest.py | 128 --- .../example_ConsIndShockComp.py | 39 - .../example_TerminalBequest.py | 128 --- .../ConsIndShockModel/Finite Cyclical Test.py | 95 --- .../ConsIndShockModel/IndShockConsumerType.py | 283 ------- .../IndShockConsumerType_Jacobian_Example.py | 180 ---- ...kConsumerType_Transition_Matrix_Example.py | 771 ----------------- .../ConsIndShockModel/KinkedRconsumerType.py | 230 ----- .../PerfForesightConsumerType.py | 298 ------- .../example_ConsPortfolioModel.py | 431 ---------- .../example_ConsRiskyAssetModel.py | 312 ------- .../example_ConsSequentialPortfolioModel.py | 171 ---- .../example_ConsAggShockModel.py | 298 ------- .../example_ConsGenIncProcessModel.py | 204 ----- .../ConsumptionSaving/example_ConsIndShock.py | 198 ----- .../example_ConsLaborModel.py | 302 ------- .../example_ConsMarkovModel.py | 352 -------- .../ConsumptionSaving/example_ConsMedModel.py | 143 ---- .../example_ConsPrefShockModel.py | 206 ----- .../example_ConsRepAgentModel.py | 127 --- .../example_ConsRiskyContribModel.py | 303 ------- .../example_TractableBufferStockModel.py | 159 ---- .../DiscreteDistributionLabeled.py | 148 ---- examples/Distributions/ExpectedValue.py | 202 ----- .../FrameAgentType/FrameAgentType Demo.py | 258 ------ examples/FrameAgentType/FrameModels.py | 503 ----------- .../GenIncProcessModel/GenIncProcessModel.py | 421 --------- examples/Gentle-Intro/Gentle-Intro-To-HARK.py | 316 ------- .../HowWeSolveIndShockConsumerType.py | 204 ----- examples/Interpolation/CubicInterp.py | 85 -- examples/Interpolation/DecayInterp.py | 237 ------ examples/Journeys/AzureMachineLearning.py | 132 --- .../Journey-Engineering-Background.py | 93 -- examples/Journeys/Journey-PhD.py | 389 --------- examples/Journeys/Journey-Policymaker.py | 281 ------ examples/Journeys/Journeys-into-HARK.py | 41 - .../Quickstart_tutorial/Jounery_1_param.py | 157 ---- .../Quick_start_with_solution.py | 799 ------------------ examples/LabeledModels/LabeledModels.py | 644 -------------- examples/LifecycleModel/Cycles_tutorial.py | 262 ------ examples/LifecycleModel/LifecycleModel.py | 184 ---- 46 files changed, 11134 deletions(-) delete mode 100644 examples/Calibration/Income_calibrations.py delete mode 100644 examples/Calibration/Life_Cycle_example.py delete mode 100644 examples/Calibration/SCF_distributions.py delete mode 100644 examples/Calibration/Sabelhaus_Song_var_profiles.py delete mode 100644 examples/Calibration/US_SSA_life_tables.py delete mode 100644 examples/ConsBequestModel/example_AccidentalBequest.py delete mode 100644 examples/ConsBequestModel/example_ConsIndShockComp.py delete mode 100644 examples/ConsBequestModel/example_TerminalBequest.py delete mode 100644 examples/ConsIndShockModel/Finite Cyclical Test.py delete mode 100644 examples/ConsIndShockModel/IndShockConsumerType.py delete mode 100644 examples/ConsIndShockModel/IndShockConsumerType_Jacobian_Example.py delete mode 100644 examples/ConsIndShockModel/IndShockConsumerType_Transition_Matrix_Example.py delete mode 100644 examples/ConsIndShockModel/KinkedRconsumerType.py delete mode 100644 examples/ConsIndShockModel/PerfForesightConsumerType.py delete mode 100644 examples/ConsPortfolioModel/example_ConsPortfolioModel.py delete mode 100644 examples/ConsPortfolioModel/example_ConsRiskyAssetModel.py delete mode 100644 examples/ConsPortfolioModel/example_ConsSequentialPortfolioModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsAggShockModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsGenIncProcessModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsIndShock.py delete mode 100644 examples/ConsumptionSaving/example_ConsLaborModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsMarkovModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsMedModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsPrefShockModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsRepAgentModel.py delete mode 100644 examples/ConsumptionSaving/example_ConsRiskyContribModel.py delete mode 100644 examples/ConsumptionSaving/example_TractableBufferStockModel.py delete mode 100644 examples/Distributions/DiscreteDistributionLabeled.py delete mode 100644 examples/Distributions/ExpectedValue.py delete mode 100644 examples/FrameAgentType/FrameAgentType Demo.py delete mode 100644 examples/FrameAgentType/FrameModels.py delete mode 100644 examples/GenIncProcessModel/GenIncProcessModel.py delete mode 100644 examples/Gentle-Intro/Gentle-Intro-To-HARK.py delete mode 100644 examples/HowWeSolveIndShockConsumerType/HowWeSolveIndShockConsumerType.py delete mode 100644 examples/Interpolation/CubicInterp.py delete mode 100644 examples/Interpolation/DecayInterp.py delete mode 100644 examples/Journeys/AzureMachineLearning.py delete mode 100644 examples/Journeys/Journey-Engineering-Background.py delete mode 100644 examples/Journeys/Journey-PhD.py delete mode 100644 examples/Journeys/Journey-Policymaker.py delete mode 100644 examples/Journeys/Journeys-into-HARK.py delete mode 100644 examples/Journeys/Quickstart_tutorial/Jounery_1_param.py delete mode 100644 examples/Journeys/Quickstart_tutorial/Quick_start_with_solution.py delete mode 100644 examples/LabeledModels/LabeledModels.py delete mode 100644 examples/LifecycleModel/Cycles_tutorial.py delete mode 100644 examples/LifecycleModel/LifecycleModel.py diff --git a/examples/Calibration/Income_calibrations.py b/examples/Calibration/Income_calibrations.py deleted file mode 100644 index 34ab06b9f..000000000 --- a/examples/Calibration/Income_calibrations.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: title,-all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -""" -Created on Sun Jan 3 10:50:02 2021 - -@author: Mateo -""" - -from HARK.Calibration.Income.IncomeTools import ( - parse_income_spec, - find_profile, - Cagetti_income, - CGM_income, -) - -import numpy as np -import matplotlib.pyplot as plt - -# What year to use as the base monetary year? -# (pick 1992 as it is used by both of the papers we are comparing) -adjust_infl_to = 1992 - -# %% Cocco, Gomes, Maenhout (2005) calibration -age_min = 21 -age_max = 100 - -ages = np.arange(age_min, age_max + 1) - -plt.figure() -for spec in CGM_income.items(): - label = spec[0] - - params = parse_income_spec( - age_min=age_min, age_max=age_max, adjust_infl_to=adjust_infl_to, **spec[1] - ) - MeanY = find_profile(params["PermGroFac"], params["P0"]) - - plt.plot(ages, MeanY, label=label) - -plt.title( - "Mean paths of permanent income calibrations in\n" - + "Cocco, Gomes & Maenhout (2005)" -) -plt.xlabel("Age") -plt.ylabel( - "Mean Permanent Income,\n" + "Thousands of {} U.S. dollars".format(adjust_infl_to) -) -plt.legend() -plt.show() - -# %% Cagetti (2003) calibration - -age_min = 25 -age_max = 91 -# Cagetti has a year trend in his specification, so we have to say on what -# year agents enter the model. -start_year = 1980 - -ages = np.arange(age_min, age_max + 1) - -plt.figure() -for spec in Cagetti_income.items(): - label = spec[0] - - params = parse_income_spec( - age_min=age_min, - age_max=age_max, - adjust_infl_to=adjust_infl_to, - start_year=start_year, - **spec[1] - ) - MeanY = find_profile(params["PermGroFac"], params["P0"]) - - plt.plot(ages, MeanY, label=label) - -plt.title("Mean paths of permanent income calibrations in\n" + "Cagetti (2003)") -plt.xlabel("Age") -plt.ylabel( - "Mean Permanent Income,\n" + "Thousands of {} U.S. dollars".format(adjust_infl_to) -) -plt.legend() -plt.show() diff --git a/examples/Calibration/Life_Cycle_example.py b/examples/Calibration/Life_Cycle_example.py deleted file mode 100644 index d0dc93130..000000000 --- a/examples/Calibration/Life_Cycle_example.py +++ /dev/null @@ -1,118 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import ( - IndShockConsumerType, - init_lifecycle, -) - -from HARK.Calibration.Income.IncomeTools import ( - parse_income_spec, - parse_time_params, - CGM_income, -) - -from HARK.datasets.life_tables.us_ssa.SSATools import parse_ssa_life_table -from HARK.datasets.SCF.WealthIncomeDist.SCFDistTools import income_wealth_dists_from_scf -import matplotlib.pyplot as plt -import pandas as pd -from copy import copy -from HARK.utilities import plot_funcs - -# %% Alter calibration -birth_age = 25 -death_age = 90 -adjust_infl_to = 1992 -income_calib = CGM_income -education = "College" - -# Income specification -income_params = parse_income_spec( - age_min=birth_age, - age_max=death_age, - adjust_infl_to=adjust_infl_to, - **income_calib[education], - SabelhausSong=True -) - -# Initial distribution of wealth and permanent income -dist_params = income_wealth_dists_from_scf( - base_year=adjust_infl_to, age=birth_age, education=education, wave=1995 -) - -# We need survival probabilities only up to death_age-1, because survival -# probability at death_age is 1. -liv_prb = parse_ssa_life_table( - female=True, cross_sec=True, year=2004, min_age=birth_age, max_age=death_age - 1 -) - -# Parameters related to the number of periods implied by the calibration -time_params = parse_time_params(age_birth=birth_age, age_death=death_age) - -# Update all the new parameters -params = copy(init_lifecycle) -params.update(time_params) -params.update(dist_params) -params.update(income_params) -params.update({"LivPrb": liv_prb}) - -# %% Create and solve agent -Agent = IndShockConsumerType(**params) -Agent.solve() - -# %% -Agent.unpack("cFunc") -# Plot the consumption functions -print("Consumption functions") -plot_funcs(Agent.cFunc, 0, 5) - -# %% Simulation -# Number of agents and periods in the simulation. -Agent.AgentCount = 500 -Agent.T_sim = 200 - -# Set up the variables we want to keep track of. -Agent.track_vars = ["aNrm", "cNrm", "pLvl", "t_age", "mNrm"] - -# Run the simulations -Agent.initialize_sim() -Agent.simulate() - -# %% Extract and format simulation results -raw_data = { - "Age": Agent.history["t_age"].flatten() + birth_age - 1, - "pIncome": Agent.history["pLvl"].flatten(), - "nrmM": Agent.history["mNrm"].flatten(), - "nrmC": Agent.history["cNrm"].flatten(), -} - -Data = pd.DataFrame(raw_data) -Data["Cons"] = Data.nrmC * Data.pIncome -Data["M"] = Data.nrmM * Data.pIncome - -# %% Plots -# Find the mean of each variable at every age -AgeMeans = Data.groupby(["Age"]).median().reset_index() - -plt.figure() -plt.plot(AgeMeans.Age, AgeMeans.pIncome, label="Permanent Income") -plt.plot(AgeMeans.Age, AgeMeans.M, label="Market resources") -plt.plot(AgeMeans.Age, AgeMeans.Cons, label="Consumption") -plt.legend() -plt.xlabel("Age") -plt.ylabel("Thousands of {} USD".format(adjust_infl_to)) -plt.title("Variable Medians Conditional on Survival") -plt.grid() diff --git a/examples/Calibration/SCF_distributions.py b/examples/Calibration/SCF_distributions.py deleted file mode 100644 index 5fef14250..000000000 --- a/examples/Calibration/SCF_distributions.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: title,-all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -""" -Created on Mon Jan 18 13:57:50 2021 - -@author: Mateo -""" - -from HARK.datasets.SCF.WealthIncomeDist.SCFDistTools import income_wealth_dists_from_scf -import seaborn as sns -from itertools import product, starmap -import pandas as pd - -# List the education levels and years -educ_lvls = ["NoHS", "HS", "College"] -years = list(range(1995, 2022, 3)) - -age = 25 -base_year = 1992 - -# %% Get the distribution of aNrm and pLvl at each year x education -params = list(product([base_year], [age], educ_lvls, years)) -base_year, age, education, year = list(zip(*params)) - -frame = pd.DataFrame( - {"base_year": base_year, "age": age, "education": education, "wave": year} -) - -results = list(starmap(income_wealth_dists_from_scf, params)) -frame = pd.concat([frame, pd.DataFrame(results)], axis=1) - -# %% Plot time trends at different education levels. - -# Formatting -frame = frame.melt(id_vars=["base_year", "age", "education", "wave"]) -aux = frame["variable"].str.split("(Mean|Std)", n=1, expand=True) -frame["variable"] = aux[0] -frame["stat"] = aux[1] - -# Plot -g = sns.FacetGrid(frame, col="stat", row="variable", hue="education", sharey=True) -g.map(sns.scatterplot, "wave", "value", alpha=0.7) -g.add_legend() diff --git a/examples/Calibration/Sabelhaus_Song_var_profiles.py b/examples/Calibration/Sabelhaus_Song_var_profiles.py deleted file mode 100644 index 908048df8..000000000 --- a/examples/Calibration/Sabelhaus_Song_var_profiles.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: title,-all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -""" -Created on Thu Jan 14 16:44:09 2021 - -@author: Mateo - -This short script demonstrates how to use the module for computing -[1] Sabelhaus & Song (2010) age profiles of income volatility. -It does so by replicating the results from the original paper (Figure 6 in [1]) - -[1] Sabelhaus, J., & Song, J. (2010). The great moderation in micro labor - earnings. Journal of Monetary Economics, 57(4), 391-403. - -""" - -import matplotlib.pyplot as plt -from HARK.Calibration.Income.IncomeTools import sabelhaus_song_var_profile -import numpy as np - -# Set up ages and cohorts at which we will get the variances -age_min = 27 -age_max = 54 -cohorts = [1940, 1965, None] - -# Find volatility profiles using the module -variances = [ - sabelhaus_song_var_profile(age_min=age_min, age_max=age_max, cohort=c) - for c in cohorts -] - -# %% Plots - -# Plot transitory shock variances -plt.figure() -for i in range(len(cohorts)): - coh_label = "aggregate" if cohorts[i] is None else cohorts[i] - plt.plot( - variances[i]["Age"], - np.power(variances[i]["TranShkStd"], 2), - label="Tran. {} cohort".format(coh_label), - ) - -plt.legend() - -# Plot permanent shock variances -plt.figure() -for i in range(len(cohorts)): - coh_label = "aggregate" if cohorts[i] is None else cohorts[i] - plt.plot( - variances[i]["Age"], - np.power(variances[i]["PermShkStd"], 2), - label="Perm. {} cohort".format(coh_label), - ) - -plt.legend() diff --git a/examples/Calibration/US_SSA_life_tables.py b/examples/Calibration/US_SSA_life_tables.py deleted file mode 100644 index 18a55fc40..000000000 --- a/examples/Calibration/US_SSA_life_tables.py +++ /dev/null @@ -1,73 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: title,-all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from HARK.datasets.life_tables.us_ssa.SSATools import ( - parse_ssa_life_table, - get_ssa_life_tables, -) - -import numpy as np -import matplotlib.pyplot as plt - -# %% Inspect lifetables - -tables = get_ssa_life_tables() -print(tables.head) - -# %% Survival probabilities from the SSA - -# We will find 1-year survival probabilities from ages 21 to 100 -min_age = 21 -max_age = 100 -ages = np.arange(min_age, max_age + 1) - -# In the years 1900 and 1950 -years = [1900, 1950] - -# %% - -# First, the "longitudinal method", which gives us the probabilities -# experienced by agents born in "year" throughout their lived -plt.figure() -for cohort in years: - for s in ["male", "female"]: - fem = s == "female" - LivPrb = parse_ssa_life_table( - female=fem, cohort=cohort, min_age=min_age, max_age=max_age - ) - - plt.plot(ages, LivPrb, label=s + " born in " + str(cohort)) - -plt.legend() -plt.title("Longitudinal survival probabilities") - -# %% - -# Second, the "cross-sectional method", which gives us the probabilities of -# survivals of individuals of differnet ages that are alive in the given year. -plt.figure() -for year in years: - for s in ["male", "female"]: - fem = s == "female" - LivPrb = parse_ssa_life_table( - female=fem, year=year, cross_sec=True, min_age=min_age, max_age=max_age - ) - - plt.plot(ages, LivPrb, label=s + "s in " + str(year)) - -plt.legend() -plt.title("Cross-sectional survival probabilities") diff --git a/examples/ConsBequestModel/example_AccidentalBequest.py b/examples/ConsBequestModel/example_AccidentalBequest.py deleted file mode 100644 index 9fa8c2441..000000000 --- a/examples/ConsBequestModel/example_AccidentalBequest.py +++ /dev/null @@ -1,128 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from copy import copy -from time import time - -import matplotlib.pyplot as plt -import pandas as pd - -from HARK.Calibration.Income.IncomeTools import ( - CGM_income, - parse_income_spec, - parse_time_params, -) -from HARK.ConsumptionSaving.ConsBequestModel import ( - BequestWarmGlowConsumerType, - init_accidental_bequest, -) -from HARK.datasets.life_tables.us_ssa.SSATools import parse_ssa_life_table -from HARK.datasets.SCF.WealthIncomeDist.SCFDistTools import income_wealth_dists_from_scf -from HARK.utilities import plot_funcs - -# %% Alter calibration -birth_age = 25 -death_age = 90 -adjust_infl_to = 1992 -income_calib = CGM_income -education = "College" - -# Income specification -income_params = parse_income_spec( - age_min=birth_age, - age_max=death_age, - adjust_infl_to=adjust_infl_to, - **income_calib[education], - SabelhausSong=True, -) - -# Initial distribution of wealth and permanent income -dist_params = income_wealth_dists_from_scf( - base_year=adjust_infl_to, age=birth_age, education=education, wave=1995 -) - -# We need survival probabilities only up to death_age-1, because survival -# probability at death_age is 1. -liv_prb = parse_ssa_life_table( - female=True, cross_sec=True, year=2004, min_age=birth_age, max_age=death_age - 1 -) - -# Parameters related to the number of periods implied by the calibration -time_params = parse_time_params(age_birth=birth_age, age_death=death_age) - -# Update all the new parameters -params = copy(init_accidental_bequest) -params.update(time_params) -params.update(dist_params) -params.update(income_params) -params.update({"LivPrb": liv_prb}) - -# %% Create and solve agent -# Make and solve an idiosyncratic shocks consumer with a finite lifecycle -Agent = BequestWarmGlowConsumerType(**params) -# Make this consumer live a sequence of periods exactly once -Agent.cycles = 1 - -# %% -start_time = time() -Agent.solve() -end_time = time() -print(f"Solving a lifecycle consumer took {end_time - start_time} seconds.") -Agent.unpack("cFunc") - -# %% -# Plot the consumption functions -print("Consumption functions") -plot_funcs(Agent.cFunc, 0, 5) - -# %% Simulation -# Number of LifecycleExamples and periods in the simulation. -Agent.AgentCount = 500 -Agent.T_sim = 200 - -# Set up the variables we want to keep track of. -Agent.track_vars = ["aNrm", "cNrm", "pLvl", "t_age", "mNrm"] - -# Run the simulations -Agent.initialize_sim() -Agent.simulate() - - -# %% Extract and format simulation results -raw_data = { - "Age": Agent.history["t_age"].flatten() + birth_age - 1, - "pIncome": Agent.history["pLvl"].flatten(), - "nrmM": Agent.history["mNrm"].flatten(), - "nrmC": Agent.history["cNrm"].flatten(), -} - -Data = pd.DataFrame(raw_data) -Data["Cons"] = Data.nrmC * Data.pIncome -Data["M"] = Data.nrmM * Data.pIncome - -# %% Plots -# Find the mean of each variable at every age -AgeMeans = Data.groupby(["Age"]).median().reset_index() - -plt.figure() -plt.plot(AgeMeans.Age, AgeMeans.pIncome, label="Permanent Income") -plt.plot(AgeMeans.Age, AgeMeans.M, label="Market resources") -plt.plot(AgeMeans.Age, AgeMeans.Cons, label="Consumption") -plt.legend() -plt.xlabel("Age") -plt.ylabel("Thousands of {} USD".format(adjust_infl_to)) -plt.title("Variable Medians Conditional on Survival") -plt.grid() diff --git a/examples/ConsBequestModel/example_ConsIndShockComp.py b/examples/ConsBequestModel/example_ConsIndShockComp.py deleted file mode 100644 index 09bd1bbcc..000000000 --- a/examples/ConsBequestModel/example_ConsIndShockComp.py +++ /dev/null @@ -1,39 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: hark-dev -# language: python -# name: python3 -# --- - -# %% -from HARK.ConsumptionSaving.ConsBequestModel import BequestWarmGlowConsumerType -from HARK.ConsumptionSaving.ConsIndShockModel import ( - IndShockConsumerType, - init_idiosyncratic_shocks, -) -from HARK.utilities import plot_funcs - -# %% -beq_agent = BequestWarmGlowConsumerType( - **init_idiosyncratic_shocks, TermBeqFac=0.0, BeqFac=0.0 -) -beq_agent.cycles = 0 -beq_agent.solve() - -# %% -ind_agent = IndShockConsumerType(**init_idiosyncratic_shocks) -ind_agent.cycles = 0 -ind_agent.solve() - -# %% -plot_funcs([beq_agent.solution[0].cFunc, ind_agent.solution[0].cFunc], 0, 10) - -# %% diff --git a/examples/ConsBequestModel/example_TerminalBequest.py b/examples/ConsBequestModel/example_TerminalBequest.py deleted file mode 100644 index 9aae978ab..000000000 --- a/examples/ConsBequestModel/example_TerminalBequest.py +++ /dev/null @@ -1,128 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from copy import copy -from time import time - -import matplotlib.pyplot as plt -import pandas as pd - -from HARK.Calibration.Income.IncomeTools import ( - CGM_income, - parse_income_spec, - parse_time_params, -) -from HARK.ConsumptionSaving.ConsBequestModel import ( - BequestWarmGlowConsumerType, - init_warm_glow, -) -from HARK.datasets.life_tables.us_ssa.SSATools import parse_ssa_life_table -from HARK.datasets.SCF.WealthIncomeDist.SCFDistTools import income_wealth_dists_from_scf -from HARK.utilities import plot_funcs - -# %% Alter calibration -birth_age = 25 -death_age = 90 -adjust_infl_to = 1992 -income_calib = CGM_income -education = "College" - -# Income specification -income_params = parse_income_spec( - age_min=birth_age, - age_max=death_age, - adjust_infl_to=adjust_infl_to, - **income_calib[education], - SabelhausSong=True, -) - -# Initial distribution of wealth and permanent income -dist_params = income_wealth_dists_from_scf( - base_year=adjust_infl_to, age=birth_age, education=education, wave=1995 -) - -# We need survival probabilities only up to death_age-1, because survival -# probability at death_age is 1. -liv_prb = parse_ssa_life_table( - female=True, cross_sec=True, year=2004, min_age=birth_age, max_age=death_age - 1 -) - -# Parameters related to the number of periods implied by the calibration -time_params = parse_time_params(age_birth=birth_age, age_death=death_age) - -# Update all the new parameters -params = copy(init_warm_glow) -params.update(time_params) -params.update(dist_params) -params.update(income_params) -params.update({"LivPrb": [1.0] * len(liv_prb)}) - -# %% Create and solve agent -# Make and solve an idiosyncratic shocks consumer with a finite lifecycle -TerminalExample = BequestWarmGlowConsumerType(**params) -# Make this consumer live a sequence of periods exactly once -TerminalExample.cycles = 1 - -# %% -start_time = time() -TerminalExample.solve() -end_time = time() -print(f"Solving a lifecycle consumer took {end_time - start_time} seconds.") -TerminalExample.unpack("cFunc") - -# %% -# Plot the consumption functions -print("Consumption functions") -plot_funcs(TerminalExample.cFunc, 0, 5) - -# %% Simulation -# Number of LifecycleExamples and periods in the simulation. -TerminalExample.AgentCount = 500 -TerminalExample.T_sim = 200 - -# Set up the variables we want to keep track of. -TerminalExample.track_vars = ["aNrm", "cNrm", "pLvl", "t_age", "mNrm"] - -# Run the simulations -TerminalExample.initialize_sim() -TerminalExample.simulate() - - -# %% Extract and format simulation results -raw_data = { - "Age": TerminalExample.history["t_age"].flatten() + birth_age - 1, - "pIncome": TerminalExample.history["pLvl"].flatten(), - "nrmM": TerminalExample.history["mNrm"].flatten(), - "nrmC": TerminalExample.history["cNrm"].flatten(), -} - -Data = pd.DataFrame(raw_data) -Data["Cons"] = Data.nrmC * Data.pIncome -Data["M"] = Data.nrmM * Data.pIncome - -# %% Plots -# Find the mean of each variable at every age -AgeMeans = Data.groupby(["Age"]).median().reset_index() - -plt.figure() -plt.plot(AgeMeans.Age, AgeMeans.pIncome, label="Permanent Income") -plt.plot(AgeMeans.Age, AgeMeans.M, label="Market resources") -plt.plot(AgeMeans.Age, AgeMeans.Cons, label="Consumption") -plt.legend() -plt.xlabel("Age") -plt.ylabel("Thousands of {} USD".format(adjust_infl_to)) -plt.title("Variable Medians Conditional on Survival") -plt.grid() diff --git a/examples/ConsIndShockModel/Finite Cyclical Test.py b/examples/ConsIndShockModel/Finite Cyclical Test.py deleted file mode 100644 index 13cf05800..000000000 --- a/examples/ConsIndShockModel/Finite Cyclical Test.py +++ /dev/null @@ -1,95 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -# Initial imports and notebook setup, click arrow to show -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType -from HARK.utilities import plot_funcs_der, plot_funcs -import matplotlib.pyplot as plt -import numpy as np - -mystr = lambda number: "{:.4f}".format(number) - -# %% -CyclicalDict = { # Click the arrow to expand this parameter dictionary - # Parameters shared with the perfect foresight model - "CRRA": 2.0, # Coefficient of relative risk aversion - "Rfree": 1.03, # Interest factor on assets - "DiscFac": 0.96, # Intertemporal discount factor - "LivPrb": 4 * [0.98], # Survival probability - "PermGroFac": [1.082251, 2.8, 0.3, 1.1], - # Parameters that specify the income distribution over the lifecycle - "PermShkStd": [0.1, 0.1, 0.1, 0.1], - "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks - "TranShkStd": [0.2, 0.2, 0.2, 0.2], - "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks - "UnempPrb": 0.05, # Probability of unemployment while working - "IncUnemp": 0.3, # Unemployment benefits replacement rate - "UnempPrbRet": 0.0005, # Probability of "unemployment" while retired - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "T_retire": 0, # Period of retirement (0 --> no retirement) - "tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future) - # Parameters for constructing the "assets above minimum" grid - "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 20, # Maximum end-of-period "assets above minimum" value - "aXtraCount": 48, # Number of points in the base grid of "assets above minimum" - "aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraExtra": [None], # Additional values to add to aXtraGrid - # A few other paramaters - "BoroCnstArt": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets - "vFuncBool": True, # Whether to calculate the value function during solution - "CubicBool": False, # Preference shocks currently only compatible with linear cFunc - "T_cycle": 4, # Number of periods in the cycle for this agent type - # Parameters only used in simulation - "AgentCount": 10000, # Number of agents of this type - "T_sim": 120, # Number of periods to simulate - "aNrmInitMean": -6.0, # Mean of log initial assets - "aNrmInitStd": 1.0, # Standard deviation of log initial assets - "pLvlInitMean": 0.0, # Mean of log initial permanent income - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor - "T_age": None, # Age after which simulated agents are automatically killed -} - -# %% -CyclicalExample = IndShockConsumerType(**CyclicalDict) -CyclicalExample.cycles = 0 # Make this consumer type have an infinite horizon -CyclicalExample.solve() - -CyclicalExample.unpack_cFunc() -print("Quarterly consumption functions:") -mMin = min([X.mNrmMin for X in CyclicalExample.solution]) -plot_funcs(CyclicalExample.cFunc, mMin, 5) - -# %% -FiniteCyclicalDict = CyclicalDict.copy() -FiniteCyclicalDict["T_age"] = 60 - -FiniteCyclicalExample = IndShockConsumerType(**FiniteCyclicalDict) -# CyclicalExample.cycles = 0 # Make this consumer type have an infinite horizon -FiniteCyclicalExample.solve() - -FiniteCyclicalExample.unpack("cFunc") -print("Quarterly consumption functions:") -mMin = min([X.mNrmMin for X in FiniteCyclicalExample.solution]) -plot_funcs(FiniteCyclicalExample.cFunc, mMin, 5) - -# %% [markdown] -# ![image.png](attachment:image.png) - -# %% - - -# %% diff --git a/examples/ConsIndShockModel/IndShockConsumerType.py b/examples/ConsIndShockModel/IndShockConsumerType.py deleted file mode 100644 index 2b197528c..000000000 --- a/examples/ConsIndShockModel/IndShockConsumerType.py +++ /dev/null @@ -1,283 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: collapsed,code_folding,heading_collapsed,hidden -# cell_metadata_json: true -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # IndShockConsumerType Documentation -# ## Consumption-Saving model with Idiosyncratic Income Shocks - -# %% {"code_folding": [0]} -# Initial imports and notebook setup, click arrow to show -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType -from HARK.utilities import plot_funcs_der, plot_funcs -import matplotlib.pyplot as plt -import numpy as np - -mystr = lambda number: "{:.4f}".format(number) - -# %% [markdown] -# The module `HARK.ConsumptionSaving.ConsIndShockModel` concerns consumption-saving models with idiosyncratic shocks to (non-capital) income. All of the models assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent. -# -# `ConsIndShockModel` includes: -# 1. A very basic "perfect foresight" model with no uncertainty. -# 2. A model with risk over transitory and permanent income shocks. -# 3. The model described in (2), with an interest rate for debt that differs from the interest rate for savings. -# -# This notebook provides documentation for the second of these models. -# $\newcommand{\CRRA}{\rho}$ -# $\newcommand{\DiePrb}{\mathsf{D}}$ -# $\newcommand{\PermGroFac}{\Gamma}$ -# $\newcommand{\Rfree}{\mathsf{R}}$ -# $\newcommand{\DiscFac}{\beta}$ - -# %% [markdown] -# ## Statement of idiosyncratic income shocks model -# -# Suppose we want to solve a model like the one analyzed in [BufferStockTheory](https://www.econ2.jhu.edu/people/ccarroll/papers/BufferStockTheory/), which has all the same features as the perfect foresight consumer, plus idiosyncratic shocks to income each period. Agents with this kind of model are represented by the class `IndShockConsumerType`. -# -# Specifically, this type of consumer receives two income shocks at the beginning of each period: a completely transitory shock $\newcommand{\tShkEmp}{\theta}{\tShkEmp_t}$ and a completely permanent shock $\newcommand{\pShk}{\psi}{\pShk_t}$. Moreover, the agent is subject to borrowing a borrowing limit: the ratio of end-of-period assets $A_t$ to permanent income $P_t$ must be greater than $\underline{a}$. As with the perfect foresight problem, this model is stated in terms of *normalized* variables, dividing all real variables by $P_t$: -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t} {~} u(c_t) + \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ (\PermGroFac_{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(m_{t+1}) \right], \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \text{$\underline{a}$}, \\ -# m_{t+1} &=& \Rfree/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\ -# (\psi_{t+1},\theta_{t+1}) &\sim& F_{t+1}, \\ -# \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1, \\ -# u(c) &=& \frac{c^{1-\rho}}{1-\rho}. -# \end{eqnarray*} - -# %% [markdown] -# ## Solution method for IndShockConsumerType -# -# With the introduction of (non-trivial) risk, the idiosyncratic income shocks model has no closed form solution and must be solved numerically. The function `solveConsIndShock` solves the one period problem for the `IndShockConsumerType` class. To do so, HARK uses the original version of the endogenous grid method (EGM) first described [here](https://www.econ2.jhu.edu/people/ccarroll/EndogenousGridpoints.pdf); see also the [SolvingMicroDSOPs](https://www.econ2.jhu.edu/people/ccarroll/SolvingMicroDSOPs/) lecture notes. -# -# Briefly, the transition equation for $m_{t+1}$ can be substituted into the problem definition; the second term of the reformulated maximand represents "end of period value of assets" $\mathfrak{v}_t(a_t)$ ("Gothic v"): -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t} {~} u(c_t) + \underbrace{\DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ (\PermGroFac_{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(\Rfree/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}) \right]}_{\equiv \mathfrak{v}_t(a_t)}. -# \end{eqnarray*} -# -# The first order condition with respect to $c_t$ is thus simply: -# -# \begin{eqnarray*} -# u^{\prime}(c_t) - \mathfrak{v}'_t(a_t) = 0 \Longrightarrow c_t^{-\CRRA} = \mathfrak{v}'_t(a_t) \Longrightarrow c_t = \mathfrak{v}'_t(a_t)^{-1/\CRRA}, -# \end{eqnarray*} -# -# and the marginal value of end-of-period assets can be computed as: -# -# \begin{eqnarray*} -# \mathfrak{v}'_t(a_t) = \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ \Rfree (\PermGroFac_{t+1}\psi_{t+1})^{-\CRRA} v'_{t+1}(\Rfree/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}) \right]. -# \end{eqnarray*} -# -# To solve the model, we choose an exogenous grid of $a_t$ values that span the range of values that could plausibly be achieved, compute $\mathfrak{v}'_t(a_t)$ at each of these points, calculate the value of consumption $c_t$ whose marginal utility is consistent with the marginal value of assets, then find the endogenous $m_t$ gridpoint as $m_t = a_t + c_t$. The set of $(m_t,c_t)$ gridpoints is then interpolated to construct the consumption function. - -# %% [markdown] -# ## Example parameter values to construct an instance of IndShockConsumerType -# -# In order to create an instance of `IndShockConsumerType`, the user must specify parameters that characterize the (age-varying) distribution of income shocks $F_{t+1}$, the artificial borrowing constraint $\underline{a}$, and the exogenous grid of end-of-period assets-above-minimum for use by EGM, along with all of the parameters for the perfect foresight model. The table below presents the complete list of parameter values required to instantiate an `IndShockConsumerType`, along with example values. -# -# | Parameter | Description | Code | Example value | Time-varying? | -# | :---: | --- | --- | --- | --- | -# | $\DiscFac$ |Intertemporal discount factor | $\texttt{DiscFac}$ | $0.96$ | | -# | $\CRRA$|Coefficient of relative risk aversion | $\texttt{CRRA}$ | $2.0$ | | -# | $\Rfree$ | Risk free interest factor | $\texttt{Rfree}$ | $1.03$ | | -# | $1 - \DiePrb_{t+1}$ |Survival probability | $\texttt{LivPrb}$ | $[0.98]$ | $\surd$ | -# |$\PermGroFac_{t+1}$|Permanent income growth factor|$\texttt{PermGroFac}$| $[1.01]$ | $\surd$ | -# | $\sigma_\psi$| Standard deviation of log permanent income shocks | $\texttt{PermShkStd}$ | $[0.1]$ |$\surd$ | -# | $N_\psi$| Number of discrete permanent income shocks | $\texttt{PermShkCount}$ | $7$ | | -# | $\sigma_\theta$| Standard deviation of log transitory income shocks | $\texttt{TranShkStd}$ | $[0.2]$ | $\surd$ | -# | $N_\theta$| Number of discrete transitory income shocks | $\texttt{TranShkCount}$ | $7$ | | -# | $\mho$ | Probability of being unemployed and getting $\theta=\underline{\theta}$ | $\texttt{UnempPrb}$ | $0.05$ | | -# | $\underline{\theta}$| Transitory shock when unemployed | $\texttt{IncUnemp}$ | $0.3$ | | -# | $\mho^{Ret}$ | Probability of being "unemployed" when retired | $\texttt{UnempPrb}$ | $0.0005$ | | -# | $\underline{\theta}^{Ret}$| Transitory shock when "unemployed" and retired | $\texttt{IncUnemp}$ | $0.0$ | | -# | $(none)$ | Period of the lifecycle model when retirement begins | $\texttt{T_retire}$ | $0$ | | -# | $(none)$ | Minimum value in assets-above-minimum grid | $\texttt{aXtraMin}$ | $0.001$ | | -# | $(none)$ | Maximum value in assets-above-minimum grid | $\texttt{aXtraMax}$ | $20.0$ | | -# | $(none)$ | Number of points in base assets-above-minimum grid | $\texttt{aXtraCount}$ | $48$ | | -# | $(none)$ | Exponential nesting factor for base assets-above-minimum grid | $\texttt{aXtraNestFac}$ | $3$ | | -# | $(none)$ | Additional values to add to assets-above-minimum grid | $\texttt{aXtraExtra}$ | $None$ | | -# | $\underline{a}$| Artificial borrowing constraint (normalized) | $\texttt{BoroCnstArt}$ | $0.0$ | | -# | $(none)$|Indicator for whether $\texttt{vFunc}$ should be computed | $\texttt{vFuncBool}$ | $True$ | | -# | $(none)$ |Indicator for whether $\texttt{cFunc}$ should use cubic splines | $\texttt{CubicBool}$ | $False$ | | -# |$T$| Number of periods in this type's "cycle" |$\texttt{T_cycle}$| $1$ | | -# |(none)| Number of times the "cycle" occurs |$\texttt{cycles}$| $0$ | | - -# %% {"code_folding": [0]} -IdiosyncDict = { - # Parameters shared with the perfect foresight model - "CRRA": 2.0, # Coefficient of relative risk aversion - "Rfree": 1.03, # Interest factor on assets - "DiscFac": 0.96, # Intertemporal discount factor - "LivPrb": [0.98], # Survival probability - "PermGroFac": [1.01], # Permanent income growth factor - # Parameters that specify the income distribution over the lifecycle - "PermShkStd": [0.1], # Standard deviation of log permanent shocks to income - "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks - "TranShkStd": [0.2], # Standard deviation of log transitory shocks to income - "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks - "UnempPrb": 0.05, # Probability of unemployment while working - "IncUnemp": 0.3, # Unemployment benefits replacement rate - "UnempPrbRet": 0.0005, # Probability of "unemployment" while retired - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "T_retire": 0, # Period of retirement (0 --> no retirement) - "tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future) - # Parameters for constructing the "assets above minimum" grid - "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 20, # Maximum end-of-period "assets above minimum" value - "aXtraCount": 48, # Number of points in the base grid of "assets above minimum" - "aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraExtra": [None], # Additional values to add to aXtraGrid - # A few other paramaters - "BoroCnstArt": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets - "vFuncBool": True, # Whether to calculate the value function during solution - "CubicBool": False, # Preference shocks currently only compatible with linear cFunc - "T_cycle": 1, # Number of periods in the cycle for this agent type - # Parameters only used in simulation - "AgentCount": 10000, # Number of agents of this type - "T_sim": 120, # Number of periods to simulate - "aNrmInitMean": -6.0, # Mean of log initial assets - "aNrmInitStd": 1.0, # Standard deviation of log initial assets - "pLvlInitMean": 0.0, # Mean of log initial permanent income - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor - "T_age": None, # Age after which simulated agents are automatically killed -} - -# %% [markdown] -# The distribution of permanent income shocks is specified as mean one lognormal, with an age-varying (underlying) standard deviation. The distribution of transitory income shocks is also mean one lognormal, but with an additional point mass representing unemployment; the transitory shocks are adjusted so that the distribution is still mean one. The continuous distributions are discretized with an equiprobable distribution. -# -# Optionally, the user can specify the period when the individual retires and escapes essentially all income risk as `T_retire`; this can be turned off by setting the parameter to $0$. In retirement, all permanent income shocks are turned off, and the only transitory shock is an "unemployment" shock, likely with small probability; this prevents the retired problem from degenerating into a perfect foresight model. -# -# The grid of assets above minimum $\texttt{aXtraGrid}$ is specified by its minimum and maximum level, the number of gridpoints, and the extent of exponential nesting. The greater the (integer) value of $\texttt{aXtraNestFac}$, the more dense the gridpoints will be at the bottom of the grid (and more sparse near the top); setting $\texttt{aXtraNestFac}$ to $0$ will generate an evenly spaced grid of $a_t$. -# -# The artificial borrowing constraint $\texttt{BoroCnstArt}$ can be set to `None` to turn it off. -# -# It is not necessary to compute the value function in this model, and it is not computationally free to do so. You can choose whether the value function should be calculated and returned as part of the solution of the model with $\texttt{vFuncBool}$. The consumption function will be constructed as a piecewise linear interpolation when $\texttt{CubicBool}$ is `False`, and will be a piecewise cubic spline interpolator if `True`. - -# %% [markdown] {"heading_collapsed": true} -# ## Solving and examining the solution of the idiosyncratic income shocks model -# -# The cell below creates an infinite horizon instance of `IndShockConsumerType` and solves its model by calling its `solve` method. - -# %% {"hidden": true} -IndShockExample = IndShockConsumerType(**IdiosyncDict) -IndShockExample.cycles = 0 # Make this type have an infinite horizon -IndShockExample.solve() - - -# %% [markdown] {"hidden": true} -# After solving the model, we can examine an element of this type's $\texttt{solution}$: - -# %% {"hidden": true} -print(vars(IndShockExample.solution[0])) - -# %% [markdown] {"hidden": true} -# The single-period solution to an idiosyncratic shocks consumer's problem has all of the same attributes as in the perfect foresight model, with a couple additions. The solution can include the marginal marginal value of market resources function $\texttt{vPPfunc}$, but this is only constructed if $\texttt{CubicBool}$ is `True`, so that the MPC can be accurately computed; when it is `False`, then $\texttt{vPPfunc}$ merely returns `NaN` everywhere. -# -# The `solveConsIndShock` function calculates steady state market resources and stores it in the attribute $\texttt{mNrmSS}$. This represents the steady state level of $m_t$ if *this period* were to occur indefinitely, but with income shocks turned off. This is relevant in a "one period infinite horizon" model like we've specified here, but is less useful in a lifecycle model. -# -# Let's take a look at the consumption function by plotting it, along with its derivative (the MPC): - -# %% {"hidden": true} -print("Consumption function for an idiosyncratic shocks consumer type:") -plot_funcs(IndShockExample.solution[0].cFunc, IndShockExample.solution[0].mNrmMin, 5) -print("Marginal propensity to consume for an idiosyncratic shocks consumer type:") -plot_funcs_der( - IndShockExample.solution[0].cFunc, IndShockExample.solution[0].mNrmMin, 5 -) - -# %% [markdown] {"hidden": true} -# The lower part of the consumption function is linear with a slope of 1, representing the *constrained* part of the consumption function where the consumer *would like* to consume more by borrowing-- his marginal utility of consumption exceeds the marginal value of assets-- but he is prevented from doing so by the artificial borrowing constraint. -# -# The MPC is a step function, as the $\texttt{cFunc}$ itself is a piecewise linear function; note the large jump in the MPC where the borrowing constraint begins to bind. -# -# If you want to look at the interpolation nodes for the consumption function, these can be found by "digging into" attributes of $\texttt{cFunc}$: - -# %% {"hidden": true} -print( - "mNrmGrid for unconstrained cFunc is ", - IndShockExample.solution[0].cFunc.functions[0].x_list, -) -print( - "cNrmGrid for unconstrained cFunc is ", - IndShockExample.solution[0].cFunc.functions[0].y_list, -) -print( - "mNrmGrid for borrowing constrained cFunc is ", - IndShockExample.solution[0].cFunc.functions[1].x_list, -) -print( - "cNrmGrid for borrowing constrained cFunc is ", - IndShockExample.solution[0].cFunc.functions[1].y_list, -) - -# %% [markdown] {"hidden": true} -# The consumption function in this model is an instance of `LowerEnvelope1D`, a class that takes an arbitrary number of 1D interpolants as arguments to its initialization method. When called, a `LowerEnvelope1D` evaluates each of its component functions and returns the lowest value. Here, the two component functions are the *unconstrained* consumption function-- how the agent would consume if the artificial borrowing constraint did not exist for *just this period*-- and the *borrowing constrained* consumption function-- how much he would consume if the artificial borrowing constraint is binding. -# -# The *actual* consumption function is the lower of these two functions, pointwise. We can see this by plotting the component functions on the same figure: - -# %% {"hidden": true} -plot_funcs(IndShockExample.solution[0].cFunc.functions, -0.25, 5.0) - -# %% [markdown] -# ## Simulating the idiosyncratic income shocks model -# -# In order to generate simulated data, an instance of `IndShockConsumerType` needs to know how many agents there are that share these particular parameters (and are thus *ex ante* homogeneous), the distribution of states for newly "born" agents, and how many periods to simulate. These simulation parameters are described in the table below, along with example values. -# -# | Description | Code | Example value | -# | :---: | --- | --- | -# | Number of consumers of this type | $\texttt{AgentCount}$ | $10000$ | -# | Number of periods to simulate | $\texttt{T_sim}$ | $120$ | -# | Mean of initial log (normalized) assets | $\texttt{aNrmInitMean}$ | $-6.0$ | -# | Stdev of initial log (normalized) assets | $\texttt{aNrmInitStd}$ | $1.0$ | -# | Mean of initial log permanent income | $\texttt{pLvlInitMean}$ | $0.0$ | -# | Stdev of initial log permanent income | $\texttt{pLvlInitStd}$ | $0.0$ | -# | Aggregrate productivity growth factor | $\texttt{PermGroFacAgg}$ | $1.0$ | -# | Age after which consumers are automatically killed | $\texttt{T_age}$ | $None$ | -# -# Here, we will simulate 10,000 consumers for 120 periods. All newly born agents will start with permanent income of exactly $P_t = 1.0 = \exp(\texttt{pLvlInitMean})$, as $\texttt{pLvlInitStd}$ has been set to zero; they will have essentially zero assets at birth, as $\texttt{aNrmInitMean}$ is $-6.0$; assets will be less than $1\%$ of permanent income at birth. -# -# These example parameter values were already passed as part of the parameter dictionary that we used to create `IndShockExample`, so it is ready to simulate. We need to set the `track_vars` attribute to indicate the variables for which we want to record a *history*. - -# %% -IndShockExample.track_vars = ["aNrm", "mNrm", "cNrm", "pLvl"] -IndShockExample.initialize_sim() -IndShockExample.simulate() - -# %% [markdown] -# We can now look at the simulated data in aggregate or at the individual consumer level. Like in the perfect foresight model, we can plot average (normalized) market resources over time, as well as average consumption: - -# %% -plt.plot(np.mean(IndShockExample.history["mNrm"], axis=1)) -plt.xlabel("Time") -plt.ylabel("Mean market resources") -plt.show() - -plt.plot(np.mean(IndShockExample.history["cNrm"], axis=1)) -plt.xlabel("Time") -plt.ylabel("Mean consumption") -plt.show() - -# %% [markdown] -# We could also plot individual consumption paths for some of the consumers-- say, the first five: - -# %% -plt.plot(IndShockExample.history["cNrm"][:, 0:5]) -plt.xlabel("Time") -plt.ylabel("Individual consumption paths") -plt.show() diff --git a/examples/ConsIndShockModel/IndShockConsumerType_Jacobian_Example.py b/examples/ConsIndShockModel/IndShockConsumerType_Jacobian_Example.py deleted file mode 100644 index 6126e1959..000000000 --- a/examples/ConsIndShockModel/IndShockConsumerType_Jacobian_Example.py +++ /dev/null @@ -1,180 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Computing Heterogenous Agent Jacobians in HARK -# -# By William Du -# -# This notebook illustrates how to compute Heterogenous Agent Jacobian matrices in HARK. -# -# These matrices are a fundamental building building block to solving Heterogenous Agent New Keynesian Models with the sequence space jacobian methodology. For more information, see [Auclert, Rognlie, Bardoszy, and Straub (2021)](https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA17434) -# -# For the IndShockConsumerType, Jacobians of Consumption and Saving can be computed with respect to the following parameters: -# LivPrb, PermShkStd,TranShkStd, DiscFac,UnempPrb, Rfree, IncUnemp. - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType - - -import time -import numpy as np -import matplotlib.pyplot as plt -from copy import copy, deepcopy - - -# %% [markdown] -# ## Create Agent - -# %% -# Dictionary for Agent - -Dict = { - # Solving Parameters - "aXtraMax": 1000, - "aXtraCount": 200, - # Transition Matrix Simulations Parameters - "mMax": 10000, - "mMin": 1e-6, - "mCount": 300, - "mFac": 3, -} - -# %% - -Agent = IndShockConsumerType(**Dict) - - -# %% [markdown] -# ## Compute Steady State - -# %% - -start = time.time() -Agent.compute_steady_state() -print("Seconds to compute steady state", time.time() - start) - - -# %% [markdown] -# ## Compute Jacobians -# -# Shocks possible: LivPrb, PermShkStd,TranShkStd, DiscFac,UnempPrb, Rfree, IncUnemp, DiscFac - -# %% [markdown] -# ### Shock to Standard Deviation to Permanent Income Shocks - -# %% - -start = time.time() - -CJAC_Perm, AJAC_Perm = Agent.calc_jacobian("PermShkStd", 300) - -print("Seconds to calculate Jacobian", time.time() - start) - - -# %% [markdown] -# #### Consumption Jacobians - -# %% - -plt.plot(CJAC_Perm.T[0]) -plt.plot(CJAC_Perm.T[10]) -plt.plot(CJAC_Perm.T[30]) -plt.show() - - -# %% [markdown] -# #### Asset Jacobians - -# %% - -plt.plot(AJAC_Perm.T[0]) -plt.plot(AJAC_Perm.T[10]) -plt.plot(AJAC_Perm.T[30]) -plt.plot(AJAC_Perm.T[60]) -plt.show() - - -# %% [markdown] -# ## Shock to Real Interest Rate - -# %% -CJAC_Rfree, AJAC_Rfree = Agent.calc_jacobian("Rfree", 300) - - -# %% [markdown] -# #### Consumption Jacobians - -# %% - -plt.plot(CJAC_Rfree.T[0]) -plt.plot(CJAC_Rfree.T[10]) -plt.plot(CJAC_Rfree.T[30]) -plt.plot(CJAC_Rfree.T[60]) -plt.show() - - -# %% [markdown] -# #### Asset Jacobians - -# %% - -plt.plot(AJAC_Rfree.T[0]) -plt.plot(AJAC_Rfree.T[10]) -plt.plot(AJAC_Rfree.T[30]) -plt.plot(AJAC_Rfree.T[60]) -plt.show() - -# %% [markdown] -# ## Shock to Unemployment Probability - -# %% -CJAC_UnempPrb, AJAC_UnempPrb = Agent.calc_jacobian("UnempPrb", 300) - - -# %% -plt.plot(CJAC_UnempPrb.T[0]) -plt.plot(CJAC_UnempPrb.T[10]) -plt.plot(CJAC_UnempPrb.T[30]) -plt.plot(CJAC_UnempPrb.T[60]) -plt.show() - -# %% -plt.plot(AJAC_UnempPrb.T[0]) -plt.plot(AJAC_UnempPrb.T[10]) -plt.plot(AJAC_UnempPrb.T[30]) -plt.plot(AJAC_UnempPrb.T[60]) -plt.show() - -# %% [markdown] -# ## Shock to Discount Factor - -# %% -CJAC_DiscFac, AJAC_DiscFac = Agent.calc_jacobian("DiscFac", 300) - - -# %% -plt.plot(CJAC_DiscFac.T[0]) -plt.plot(CJAC_DiscFac.T[10]) -plt.plot(CJAC_DiscFac.T[30]) -plt.plot(CJAC_DiscFac.T[60]) -plt.show() - -# %% -plt.plot(AJAC_DiscFac.T[0]) -plt.plot(AJAC_DiscFac.T[10]) -plt.plot(AJAC_DiscFac.T[30]) -plt.plot(AJAC_DiscFac.T[60]) -plt.show() diff --git a/examples/ConsIndShockModel/IndShockConsumerType_Transition_Matrix_Example.py b/examples/ConsIndShockModel/IndShockConsumerType_Transition_Matrix_Example.py deleted file mode 100644 index a516f822e..000000000 --- a/examples/ConsIndShockModel/IndShockConsumerType_Transition_Matrix_Example.py +++ /dev/null @@ -1,771 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Using Transition Matrix Methods under IndShockConsumerType -# -# By William Du (wdu9@jhu.edu) - -# %% [markdown] -# This Jupyter Notebook demonstrates how to non-stochastically simulate an economy with transition matrices with functions under the IndShockConsumerType. -# -# The three key functions to non stochastically simulate are: -# -# ##### define_distribution_grid -# - computes the grid of normalized market resources and the grid permanent income storing each as attributes of self. -# -# ###### calc_transition_matrix -# - computes transition matrix (matrices), a grid of consumption policies, and a grid asset policies stored as attributes of self. If the problem has a finite horizon, this function stores lists of transition matrices, consumption policies and asset policies grid for each period as attributes of self. -# -# ##### calc_ergodic_dist -# - computes the ergodic distribution stored as attributes. The distribution is stored as a vector (self.vec_erg_dstn) and as a grid (self.erg_dstn) -# -# - -# %% [markdown] -# ## Set up Computational Environment - -# %% - -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType - - -import time -from copy import copy, deepcopy -import numpy as np -import matplotlib.pyplot as plt - -import time - - -# %% [markdown] -# ## Set up the Dictionary - -# %% - -Dict = { - # Parameters shared with the perfect foresight model - "CRRA": 2, # Coefficient of relative risk aversion - "Rfree": 1.04**0.25, # Interest factor on assets - "DiscFac": 0.975, # Intertemporal discount factor - "LivPrb": [0.99375], # Survival probability - "PermGroFac": [1.00], # Permanent income growth factor - # Parameters that specify the income distribution over the lifecycle - "PermShkStd": [0.06], # Standard deviation of log permanent shocks to income - "PermShkCount": 5, # Number of points in discrete approximation to permanent income shocks - "TranShkStd": [0.2], # Standard deviation of log transitory shocks to income - "TranShkCount": 5, # Number of points in discrete approximation to transitory income shocks - "UnempPrb": 0.00, # Probability of unemployment while working - "IncUnemp": 0.0, # Unemployment benefits replacement rate - "UnempPrbRet": 0.0000, # Probability of "unemployment" while retired - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "T_retire": 0, # Period of retirement (0 --> no retirement) - "tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future) - # A few other parameters - "BoroCnstArt": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets - "T_cycle": 1, # Number of periods in the cycle for this agent type - # Parameters only used in simulation - "AgentCount": 200000, # Number of agents of this type - "T_sim": 1100, # Number of periods to simulate - "aNrmInitMean": np.log(0.0), # Mean of log initial assets , - # The value of np.log(0.0) causes the code to ensure newborns have have exactly 1.0 in market resources. - # Mean of log initial assets - "aNrmInitStd": 0.0, # Standard deviation of log initial assets - "pLvlInitMean": 0.0, # Mean of log initial permanent income - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor - "T_age": None, # Age after which simulated agents are automatically killed - # Parameters for constructing the "assets above minimum" grid - "aXtraMin": 0.0001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 10000, # Maximum end-of-period "assets above minimum" value - "aXtraCount": 130, # Number of points in the base grid of "assets above minimum" - "aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraExtra": [None], # Additional values to add to aXtraGrid - # Parameters for Transition Matrix Simulation - "mCount": 90, - "mFac": 3, - "mMin": 1e-4, - "mMax": 10000, -} - - -# %% [markdown] -# ## Create an Instance of IndShockConsumerType and Solve - -# %% - -example1 = IndShockConsumerType(**Dict) -example1.cycles = 0 -example1.solve() - -# %% [markdown] -# # Simulation: Transition Matrix vs Monte Carlo - -# %% [markdown] -# This section compares monte carlo methods against the transition matrix approach in computing aggregate consumption and aggregate assets. - -# %% [markdown] -# ## Method 1: Monte Carlo - -# %% - -# Simulation Parameters - -# Simulate -example1.track_vars = ["aLvl"] # Track level of assets -example1.initialize_sim() -example1.simulate() # Simulate with Monte Carlo - -# Compute Aggregate Consumption and Aggregate Assets -Monte_Carlo_Consumption = np.mean( - (example1.state_now["mNrm"] - example1.state_now["aNrm"]) - * example1.state_now["pLvl"] -) # Aggregate Consumption -Monte_Carlo_Assets = np.mean( - example1.state_now["aNrm"] * example1.state_now["pLvl"] -) # Aggregate Assets - - -# %% [markdown] -# ## Method 2: Transition Matrices - -# %% - -example1.define_distribution_grid(num_pointsP=110, timestonest=3) -p = example1.dist_pGrid # Grid of permanent income levels - -start = time.time() - -example1.calc_transition_matrix() -c = example1.cPol_Grid # Normalized Consumption Policy Grid -asset = example1.aPol_Grid # Normalized Asset Policy Grid - -example1.calc_ergodic_dist() -vecDstn = ( - example1.vec_erg_dstn -) # Distribution of market resources and permanent income as a vector (m*p)x1 vector where -# m is the number of market resource gridpoints and p is the number of permanent income gridpoints -erg_dstn = example1.erg_dstn - -print( - "Seconds to calculate both the transition matrix and the steady state distribution", - time.time() - start, -) - - -# %% - -# Compute Aggregate Consumption and Aggregate Assets -gridc = np.zeros((len(c), len(p))) -grida = np.zeros((len(asset), len(p))) - -for j in range(len(p)): - gridc[:, j] = p[j] * c # unnormalized Consumption policy grid - grida[:, j] = p[j] * asset # unnormalized Asset policy grid - -AggC = np.dot(gridc.flatten(), vecDstn) # Aggregate Consumption -AggA = np.dot(grida.flatten(), vecDstn) # Aggregate Assets - - -# %% [markdown] -# ### Comparing Steady State Outputs of Both Methods - -# %% - -print("TranMatrix Assets = " + str(AggA)) -print("Simulated Assets = " + str(Monte_Carlo_Assets)) - -print("TranMatrix Consumption = " + str(AggC)) -print("Simulated Consumption = " + str(Monte_Carlo_Consumption)) - - -# %% [markdown] -# ### Comparing Simulated Path of Aggregate Assets -# -# The following code plots the path of aggregate assets simulate from both Monte Carlo methods and transition matrix methods. - -# %% - - -aLvls = [] # Time series of aggregate assets - -for i in range(example1.T_sim): - aLvls.append( - np.mean(example1.history["aLvl"][i]) - ) # compute mean of aggregate assets across all agents for each period of the simulation -aLvls = np.array(aLvls) - -aLvl_tran_mat = [] -dstn = vecDstn -for i in range(example1.T_sim - 400): - A_val = np.dot(grida.flatten(), dstn) - - aLvl_tran_mat.append(A_val) - dstn = np.dot(example1.tran_matrix, dstn) - - -# %% -plt.rcParams["figure.figsize"] = (20, 10) -plt.plot( - aLvls[400:], label="Monte Carlo" -) # Plot time series path of aggregate assets using Monte Carlo simulation methods -plt.plot( - aLvl_tran_mat, label="Transition matrix" -) # Plot time series path of aggregate assets computed using transition matrix -plt.legend(prop={"size": 15}) -plt.ylim([0.37, 0.46]) -plt.show() - - -# %% [markdown] -# ### Precision vs Accuracy -# -# Notice the mean level of aggregate assets differ between both simulation methods. The transition matrix plots a perfectly horizontal line as the initial distribution of agents across market resources and permanent is the unit eigenvector of the steady state transition matrix. Thus, as we take the produce of the transition matrix and the initial distribution, the new distribution does not change, implying the level of aggregate assets does not change. In contrast, the time series path simulated from Monte Carlo methods vacillates. This is because Monte Carlo methods are truly stochastic, randomly drawing shocks from a income distribution, while transition matrix methods are non-stochastic, the shock values are preset and the grid over market resources have is fixed. This contrast highlights the limitation of both methods, the monte carlo leads to a more accurate, yet less precise, level of aggregate assets while the transition matrix leads in precision but requires a larger number of gridpoints to be accurate. - -# %% [markdown] -# ## Distribution of Normalized Market Resources - -# %% - -num_pts = len(example1.dist_mGrid) -mdstn = np.zeros(num_pts) - -for i in range(num_pts): - mdstn[i] = np.sum( - example1.erg_dstn[i] - ) # distribution of normalized market resources - -h = np.histogram(example1.state_now["mNrm"], bins=example1.dist_mGrid)[0] / np.sum( - np.histogram(example1.state_now["mNrm"], bins=example1.dist_mGrid)[0] -) # Form Monte Carlo wealth data and put into histogram/bins - - -plt.plot( - example1.dist_mGrid[: num_pts - 20], - mdstn[: num_pts - 20], - label="Transition Matrix", - linewidth=3.0, -) # distribution using transition matrix method -plt.plot( - example1.dist_mGrid[: num_pts - 20 - 1], - h[: num_pts - 20 - 1], - label="Monte Carlo", - linewidth=3.0, -) # distribution using Monte Carlo -plt.legend(prop={"size": 15}) -plt.xlim([-0.5, 10]) -plt.show() - - -# %% [markdown] -# ## Distributions of Permanent Income - -# %% - -dstn = example1.erg_dstn - -pdstn = np.zeros(len(dstn[0])) -for i in range(len(pdstn)): - pdstn[i] = np.sum(dstn[:, i]) - -h = np.histogram(example1.state_now["pLvl"], bins=example1.dist_pGrid)[0] / np.sum( - np.histogram(example1.state_now["pLvl"], bins=example1.dist_pGrid)[0] -) # Form Monte Carlo wealth data and put into histogram/bins -plt.plot( - example1.dist_pGrid[:-1], h, label="Monte Carlo", linewidth=3.0 -) # distribution using Monte Carlo -plt.plot(example1.dist_pGrid, pdstn, label="transition matrix", linewidth=3.0) -plt.ylabel("Probability") -plt.xlabel("Permanent Income") -plt.title("Distribution of Permanent Income") -plt.legend(prop={"size": 15}) -plt.xlim([-0.1, 4]) -plt.show() - - -# %% [markdown] -# ## Distribution of Wealth - -# %% -# Compute all possible mLvl values given permanent income grid and normalized market resources grid - -mLvl_vals = [] - -for m in example1.dist_mGrid: - for p in example1.dist_pGrid: - mLvl_vals.append(m * p) - -mLvl_vals = np.array(mLvl_vals) - - -aLvl_vals = [] - -for a in example1.aPol_Grid: - for p in example1.dist_pGrid: - aLvl_vals.append(a * p) - -aLvl_vals = np.array(aLvl_vals) - - -# %% -def jump_to_grid_fast(m_vals, probs, Dist_mGrid): - """ - Distributes values onto a predefined grid, maintaining the means. - - - Parameters - ---------- - m_vals: np.array - Market resource values - - probs: np.array - Shock probabilities associated with combinations of m_vals. - Can be thought of as the probability mass function of (m_vals). - - dist_mGrid : np.array - Grid over normalized market resources - - Returns - ------- - probGrid.flatten(): np.array - Probabilities of each gridpoint on the combined grid of market resources - - """ - - probGrid = np.zeros(len(Dist_mGrid)) - mIndex = np.digitize(m_vals, Dist_mGrid) - 1 - mIndex[m_vals <= Dist_mGrid[0]] = -1 - mIndex[m_vals >= Dist_mGrid[-1]] = len(Dist_mGrid) - 1 - - for i in range(len(m_vals)): - if mIndex[i] == -1: - mlowerIndex = 0 - mupperIndex = 0 - mlowerWeight = 1.0 - mupperWeight = 0.0 - elif mIndex[i] == len(Dist_mGrid) - 1: - mlowerIndex = -1 - mupperIndex = -1 - mlowerWeight = 1.0 - mupperWeight = 0.0 - else: - mlowerIndex = mIndex[i] - mupperIndex = mIndex[i] + 1 - mlowerWeight = (Dist_mGrid[mupperIndex] - m_vals[i]) / ( - Dist_mGrid[mupperIndex] - Dist_mGrid[mlowerIndex] - ) - mupperWeight = 1.0 - mlowerWeight - - probGrid[mlowerIndex] += probs[i] * mlowerWeight - probGrid[mupperIndex] += probs[i] * mupperWeight - - return probGrid.flatten() - - -# %% - -mLvl = ( - example1.state_now["mNrm"] * example1.state_now["pLvl"] -) # market resources from Monte Carlo Simulations -pmf = jump_to_grid_fast( - mLvl_vals, vecDstn, example1.dist_mGrid -) # probabilities/distribution from transition matrix methods -h = np.histogram(mLvl, bins=example1.dist_mGrid)[0] / np.sum( - np.histogram(mLvl, bins=example1.dist_mGrid)[0] -) # Form Monte Carlo wealth data and put into histogram/bins - -plt.plot( - example1.dist_mGrid[: num_pts - 20 - 1], - h[: num_pts - 20 - 1], - label="Monte Carlo", - linewidth=3.0, -) # distribution using Monte Carlo -plt.plot( - example1.dist_mGrid[: num_pts - 20], - pmf[: num_pts - 20], - label="transition Matrix", - linewidth=3.0, -) -plt.xlabel("Market Resources (Levels)") -plt.title("Distribution of Market Resources") -plt.legend(prop={"size": 15}) -plt.xlim([-0.5, 20]) -plt.show() - - -# %% [markdown] -# ## Distribution of Liquid Assets - -# %% - -asset_Lvl = example1.state_now["aLvl"] # market resources from Monte Carlo Simulations -pmf = jump_to_grid_fast( - aLvl_vals, vecDstn, example1.aPol_Grid -) # probabilities/distribution from transition matrix methods -h = np.histogram(asset_Lvl, bins=example1.aPol_Grid)[0] / np.sum( - np.histogram(asset_Lvl, bins=example1.aPol_Grid)[0] -) # Form Monte Carlo wealth data and put into histogram/bins - -plt.plot( - example1.aPol_Grid[: num_pts - 10 - 1], - h[: num_pts - 10 - 1], - label="Monte Carlo", - linewidth=3.0, -) # distribution using Monte Carlo -plt.plot( - example1.aPol_Grid[: num_pts - 10], - pmf[: num_pts - 10], - label="transition Matrix", - linewidth=3.0, -) -plt.xlabel("Liquid Assets (Levels)") -plt.title("Distribution of Liquids Assets") -plt.legend() -plt.xlim([-0.5, 10]) -plt.show() - -# %% [markdown] -# # Calculating the Path of Consumption given an Perfect foresight MIT shock -# -# This section details an experiment to exhibit how to the transition matrix method can be utilized to compute the paths of aggregate consumption and aggregate assets given a pertubation in a variable for one period. In particular, in this experiment, in period t=0, agents learn that there will be a shock in the interest rate in period t=10. Given this, the simulated paths of aggregate consumption and aggregate assets will be computed and plotted. -# -# - -# %% [markdown] -# ### Compute Steady State Distribution -# -# We will want the simulation to begin at the economy's steady state. Therefore first we will compute the steady state distribution over market resources and permanent income. This will be the distribution for which the computed transition matrices will be applied/multiplied to. - -# %% - -ss = IndShockConsumerType(**Dict) -ss.cycles = 0 -ss.solve() - - -# %% [markdown] -# ## Simulating With Harmenberg (2021) Method -# -# Harmenberg (2021) method may also be implemented when simulating with transition matrices. In the following cells, we compute the steady distribution using Harmenberg's method. -# -# For more information on Harmenberg's Method to dramatically improve simulation times see https://econ-ark.org/materials/harmenberg-aggregation?launch -# -# - -# %% - -# Change the income process to use Neutral Measure -ss.neutral_measure = True -ss.update_income_process() - -ss.mCount = 1000 -ss.mMax = 3000 - - -# %% - -# Set up grid and calculate steady state transition Matrices - -start = time.time() - -ss.define_distribution_grid() -ss.calc_transition_matrix() - -c = ss.cPol_Grid # Normalized Consumption Policy grid -a = ss.aPol_Grid # Normalized Asset Policy grid - -ss.calc_ergodic_dist() # Calculate steady state distribution -vecDstn_fast = ( - ss.vec_erg_dstn -) # Distribution as a vector (mx1) where m is the number of gridpoint on the market resources grid - -print( - "Seconds to calculate both the transition matrix and the steady state distribution with Harmenberg", - time.time() - start, -) - - -AggA_fast = np.dot(ss.aPol_Grid, vecDstn_fast) - - -# %% [markdown] -# Computing the transition matrix and ergodic distribution with the harmenberg measure is significantly faster. (Note the number of gridpoints on the market resources grid is 1000 instead of 90. - -# %% - - -plt.plot( - aLvls[100:], label="Monte Carlo", linewidth=2.0 -) # Plot time series path of aggregate assets using Monte Carlo simulation methods -plt.plot( - np.ones(example1.T_sim - 100) * AggA, label="transition matrix", linewidth=3.0 -) # Plot time series path of aggregate assets computed using transition matrix -plt.plot( - np.ones(example1.T_sim - 100) * AggA_fast, - label="transition matrix _harmenberg", - linewidth=3.0, -) # Plot time series path of aggregate assets computed using transition matrix -plt.ylabel("Aggregate Assets") -plt.legend(prop={"size": 15}) -plt.rcParams["figure.figsize"] = (20, 10) -plt.show() - - -# %% [markdown] -# ### Note* Increasing the number of gridpoints increases the accuracy of the transition matrix method - -# %% -Agg_AVals = [] - -mpoints = [100, 150, 200, 500, 3000] -for i in mpoints: - ss.mCount = i - - ss.define_distribution_grid() - ss.calc_transition_matrix() - - ss.calc_ergodic_dist() # Calculate steady state distribution - vecDstn_fast = ( - ss.vec_erg_dstn - ) # Distribution as a vector (mx1) where m is the number of gridpoint on the market resources grid - Asset_val = np.dot(ss.aPol_Grid, vecDstn_fast) - - Agg_AVals.append(Asset_val) - - -# %% -for i in range(len(Agg_AVals)): - plt.plot( - np.ones(example1.T_sim - 100) * Agg_AVals[i], - label="gridpoints = " + str(mpoints[i]), - linewidth=2.0, - ) - -plt.plot( - aLvls[100:], label="Monte Carlo", linewidth=2.0 -) # Plot time series path of aggregate assets using Monte Carlo simulation methods -plt.ylabel("Aggregate Assets") -plt.legend(prop={"size": 15}) -plt.rcParams["figure.figsize"] = (20, 10) -plt.show() - - -# %% [markdown] -# Note the Harmenberg method not only reduces the time of computation but also improves the accuracy of the simulated path of assets. - -# %% [markdown] -# ### Monte Carlo Simulation with Harmenberg Trick - -# %% - -ss.AgentCount = 25000 -ss.T_sim = 700 -ss.initialize_sim() -ss.simulate() - - -# %% [markdown] -# ## Solve an Agent who Anticipates a Change in the Real Interest Rate -# -# Now that we have the steady state distributions of which simulations will begin from, we will now solve an agent who anticipates a change in the real rate in period t=10. I first solve the agent's problem provide the consumption policies to be used to calculate the transition matrices of this economy. - -# %% - -# We will solve a finite horizon problem that begins at the steady state computed above. -# Therefore parameters must be specified as lists, each item's index indicating the period of the horizon. - -params = deepcopy(Dict) -params["T_cycle"] = 20 -params["LivPrb"] = params["T_cycle"] * [ss.LivPrb[0]] -params["PermGroFac"] = params["T_cycle"] * [1] -params["PermShkStd"] = params["T_cycle"] * [ss.PermShkStd[0]] -params["TranShkStd"] = params["T_cycle"] * [ss.TranShkStd[0]] -params["Rfree"] = params["T_cycle"] * [ss.Rfree] -params["DiscFac"] = params["T_cycle"] * [ss.DiscFac] - -FinHorizonAgent = IndShockConsumerType(**params) -FinHorizonAgent.cycles = 1 - -FinHorizonAgent.del_from_time_inv( - "Rfree" -) # delete Rfree from time invariant list since it varies overtime -FinHorizonAgent.add_to_time_vary("Rfree") - -FinHorizonAgent.del_from_time_inv( - "DiscFac" -) # delete Rfree from time invariant list since it varies overtime -FinHorizonAgent.add_to_time_vary("DiscFac") - -FinHorizonAgent.IncShkDstn = params["T_cycle"] * [ss.IncShkDstn[0]] -FinHorizonAgent.cFunc_terminal_ = deepcopy( - ss.solution[0].cFunc -) # Set Terminal Solution as Steady State Consumption Function -FinHorizonAgent.track_vars = ["cNrm", "pLvl", "aNrm"] -FinHorizonAgent.T_sim = params["T_cycle"] -FinHorizonAgent.AgentCount = ss.AgentCount - - -# %% [markdown] -# ### Implement perturbation in Real Interest Rate - -# %% - -dx = -0.05 # Change in the Interest Rate -i = 10 # Period in which the change in the interest rate occurs - -FinHorizonAgent.Rfree = ( - (i) * [ss.Rfree] + [ss.Rfree + dx] + (params["T_cycle"] - i - 1) * [ss.Rfree] -) # Sequence of interest rates the agent faces - -# FinHorizonAgent.DiscFac = (i)*[ss.DiscFac] + [ss.DiscFac + dx] + (params['T_cycle'] - i -1 )*[ss.DiscFac] # Sequence of interest rates the agent faces - - -# %% [markdown] -# ### Solve Agent - -# %% - -FinHorizonAgent.solve() - - -# %% [markdown] -# ### Simulate with Monte Carlo with Harmenberg Trick - -# %% - -# Simulate with Monte Carlo - -FinHorizonAgent.PerfMITShk = True - -start = time.time() - -# Use Harmenberg Improvement for Monte Carlo -FinHorizonAgent.neutral_measure = True -FinHorizonAgent.update_income_process() - -FinHorizonAgent.initialize_sim() - -# Begin simulation at steady state distribution of permanent income and permanent income -FinHorizonAgent.state_now["aNrm"] = ss.state_now["aNrm"] -FinHorizonAgent.state_now["pLvl"] = ss.state_now["pLvl"] -FinHorizonAgent.state_now["mNrm"] = ss.state_now["mNrm"] - -FinHorizonAgent.simulate() - -print("seconds past : " + str(time.time() - start)) - -# Compute path of aggregate consumption -clvl = [] -alvl = [] -for i in range(FinHorizonAgent.T_sim): - # C = np.mean(FinHorizonAgent.history['pLvl'][i,:]*(FinHorizonAgent.history['cNrm'][i,:] )) #Aggregate Consumption for period i - - C = np.mean( - (FinHorizonAgent.history["cNrm"][i, :]) - ) # Aggregate Consumption for period i - - clvl.append(C) - - # A = np.mean(FinHorizonAgent.history['pLvl'][i,:]*FinHorizonAgent.history['aNrm'][i,:]) #Aggregate Consumption for period i - A = np.mean( - FinHorizonAgent.history["aNrm"][i, :] - ) # Aggregate Consumption for period i - - alvl.append(A) - - -# %% [markdown] -# ### Calculate Transition Matrices with Neutral Measure (Harmenberg 2021) -# After the agent solves his problem, the consumption policies are stored in the solution attribute of self. calc_transition_matrix() will automatically call these attributes to compute the transition matrices. -# -# In the cell below we calculate the transition matrix while utilizing the neutral measure for speed efficiency. - -# %% - -# Change Income Process to allow permanent income shocks to be drawn from neutral measure -FinHorizonAgent.mCount = ss.mCount -FinHorizonAgent.mMax = ss.mMax - -# Calculate Transition Matrices -FinHorizonAgent.define_distribution_grid() - -start = time.time() -FinHorizonAgent.calc_transition_matrix() -print("Seconds to calc_transition_matrix", time.time() - start) - - -# %% [markdown] -# ### Compute Path of Aggregate Consumption - -# %% -AggC_fast = [] # List of aggregate consumption for each period t -AggA_fast = [] # List of aggregate consumption for each period t - -dstn = vecDstn_fast # Initial distribution set as steady state distribution - -c_ = FinHorizonAgent.cPol_Grid # Consumption Policy Grid this period -a_ = FinHorizonAgent.aPol_Grid # asset policy grid - -c_.append(c) -a_.append(asset) -for i in range(20): - T_mat = FinHorizonAgent.tran_matrix[i] - dstn = np.dot(T_mat, dstn) - - C = np.dot(c_[i], dstn) # Compute Aggregate Consumption this period - AggC_fast.append(C[0]) - - A = np.dot(a_[i], dstn) # Compute Aggregate Assets this period - AggA_fast.append(A[0]) - -AggC_fast = np.array(AggC_fast) -AggC_fast = AggC_fast.T - -AggA_fast = np.array(AggA_fast) -AggA_fast = AggA_fast.T - - -# %% [markdown] -# ### Path of Aggregate Consumption given an anticipated interest rate shock at $t=10$ -# - -# %% - -# plt.plot(AggC, label = 'without Harmenberg') #Without Neutral Measure -plt.plot( - AggC_fast, label=" Transition Matrices", linewidth=3.0 -) # With Harmenberg Improvement -plt.plot( - clvl, label="Monte Carlo", linewidth=3.0 -) # Monte Carlo with Hamenberg Improvement -plt.legend(prop={"size": 15}) -plt.ylim([0.95, 1.08]) -plt.show() - - -# %% [markdown] -# ### Path of Aggregate Assets given an anticipated interest rate shock at $t=10$ -# - -# %% -plt.plot( - AggA_fast, label=" Transition Matrices", linewidth=3.0 -) # With Harmenberg Improvement -plt.plot(alvl, label="Monte Carlo", linewidth=3.0) -plt.legend(prop={"size": 15}) -plt.show() - - -# %% diff --git a/examples/ConsIndShockModel/KinkedRconsumerType.py b/examples/ConsIndShockModel/KinkedRconsumerType.py deleted file mode 100644 index 287e9bb2e..000000000 --- a/examples/ConsIndShockModel/KinkedRconsumerType.py +++ /dev/null @@ -1,230 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: collapsed,code_folding -# cell_metadata_json: true -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # KinkedRconsumerType: Consumption-saving model with idiosyncratic income shocks and different interest rates on borrowing and saving - - -# %% {"code_folding": [0]} -# Initial imports and notebook setup, click arrow to show - -import matplotlib.pyplot as plt -import numpy as np - -from HARK.ConsumptionSaving.ConsIndShockModel import KinkedRconsumerType -from HARK.utilities import plot_funcs_der, plot_funcs - -mystr = lambda number: "{:.4f}".format(number) - -# %% [markdown] -# The module `HARK.ConsumptionSaving.ConsIndShockModel` concerns consumption-saving models with idiosyncratic shocks to (non-capital) income. All of the models assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent. -# -# `ConsIndShockModel` currently includes three models: -# 1. A very basic "perfect foresight" model with no uncertainty. -# 2. A model with risk over transitory and permanent income shocks. -# 3. The model described in (2), with an interest rate for debt that differs from the interest rate for savings. -# -# This notebook provides documentation for the third of these models. -# $\newcommand{\CRRA}{\rho}$ -# $\newcommand{\DiePrb}{\mathsf{D}}$ -# $\newcommand{\PermGroFac}{\Gamma}$ -# $\newcommand{\Rfree}{\mathsf{R}}$ -# $\newcommand{\DiscFac}{\beta}$ - -# %% [markdown] -# ## Statement of "kinked R" model -# -# Consider a small extension to the model faced by `IndShockConsumerType`s: that the interest rate on borrowing $a_t < 0$ is greater than the interest rate on saving $a_t > 0$. Consumers who face this kind of problem are represented by the $\texttt{KinkedRconsumerType}$ class. -# -# For a full theoretical treatment, this model analyzed in [A Theory of the Consumption Function, With -# and Without Liquidity Constraints](https://www.econ2.jhu.edu/people/ccarroll/ATheoryv3JEP.pdf) -# and its [expanded edition](https://www.econ2.jhu.edu/people/ccarroll/ATheoryv3NBER.pdf). -# -# Continuing to work with *normalized* variables (e.g. $m_t$ represents the level of market resources divided by permanent income), the "kinked R" model can be stated as: -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t} {~} U(c_t) + \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ (\PermGroFac_{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(m_{t+1}) \right], \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \underline{a}, \\ -# m_{t+1} &=& \Rfree_t/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\ -# \Rfree_t &=& \cases{\Rfree_{boro} \texttt{ if } a_t < 0 \\ -# \Rfree_{save} \texttt{ if } a_t \geq 0},\\ -# \Rfree_{boro} &>& \Rfree_{save}, \\ -# (\psi_{t+1},\theta_{t+1}) &\sim& F_{t+1}, \\ -# \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1. -# \end{eqnarray*} - -# %% [markdown] -# ## Solving the "kinked R" model -# -# The solution method for the "kinked R" model is nearly identical to that of the `IndShockConsumerType` on which it is based, using the endogenous grid method; see the notebook for that model for more information. The only significant difference is that the interest factor varies by $a_t$ across the exogenously chosen grid of end-of-period assets, with a discontinuity in $\Rfree$ at $a_t=0$. -# -# To correctly handle this, the `solveConsKinkedR` function inserts *two* instances of $a_t=0$ into the grid of $a_t$ values: the first corresponding to $\Rfree_{boro}$ ($a_t = -0$) and the other corresponding to $\Rfree_{save}$ ($a_t = +0$). The two consumption levels (and corresponding endogenous $m_t$ gridpoints) represent points at which the agent's first order condition is satisfied at *exactly* $a_t=0$ at the two different interest factors. In between these two points, the first order condition *does not hold with equality*: the consumer will end the period with exactly $a_t=0$, consuming $c_t=m_t$, but his marginal utility of consumption exceeds the marginal value of saving and is less than the marginal value of borrowing. This generates a consumption function with *two* kinks: two concave portions (for borrowing and saving) with a linear segment of slope 1 in between. - -# %% [markdown] -# ## Example parameter values to construct an instance of KinkedRconsumerType -# -# The parameters required to create an instance of `KinkedRconsumerType` are nearly identical to those for `IndShockConsumerType`. The only difference is that the parameter $\texttt{Rfree}$ is replaced with $\texttt{Rboro}$ and $\texttt{Rsave}$. -# -# While the parameter $\texttt{CubicBool}$ is required to create a valid `KinkedRconsumerType` instance, it must be set to `False`; cubic spline interpolation has not yet been implemented for this model. In the future, this restriction will be lifted. -# -# | Parameter | Description | Code | Example value | Time-varying? | -# | :---: | --- | --- | --- | --- | -# | $\DiscFac$ |Intertemporal discount factor | $\texttt{DiscFac}$ | $0.96$ | | -# | $\CRRA $ |Coefficient of relative risk aversion | $\texttt{CRRA}$ | $2.0$ | | -# | $\Rfree_{boro}$ | Risk free interest factor for borrowing | $\texttt{Rboro}$ | $1.20$ | | -# | $\Rfree_{save}$ | Risk free interest factor for saving | $\texttt{Rsave}$ | $1.01$ | | -# | $1 - \DiePrb_{t+1}$ |Survival probability | $\texttt{LivPrb}$ | $[0.98]$ | $\surd$ | -# |$\PermGroFac_{t+1}$|Permanent income growth factor|$\texttt{PermGroFac}$| $[1.01]$ | $\surd$ | -# | $\sigma_\psi $ | Standard deviation of log permanent income shocks | $\texttt{PermShkStd}$ | $[0.1]$ |$\surd$ | -# | $N_\psi $ | Number of discrete permanent income shocks | $\texttt{PermShkCount}$ | $7$ | | -# | $\sigma_\theta $ | Standard deviation of log transitory income shocks | $\texttt{TranShkStd}$ | $[0.2]$ | $\surd$ | -# | $N_\theta $ | Number of discrete transitory income shocks | $\texttt{TranShkCount}$ | $7$ | | -# | $\mho$ | Probability of being unemployed and getting $\theta=\underline{\theta}$ | $\texttt{UnempPrb}$ | $0.05$ | | -# | $\underline{\theta} $ | Transitory shock when unemployed | $\texttt{IncUnemp}$ | $0.3$ | | -# | $\mho^{Ret}$ | Probability of being "unemployed" when retired | $\texttt{UnempPrb}$ | $0.0005$ | | -# | $\underline{\theta}^{Ret} $ | Transitory shock when "unemployed" and retired | $\texttt{IncUnemp}$ | $0.0$ | | -# | $(none)$ | Period of the lifecycle model when retirement begins | $\texttt{T_retire}$ | $0$ | | -# | $(none)$ | Minimum value in assets-above-minimum grid | $\texttt{aXtraMin}$ | $0.001$ | | -# | $(none)$ | Maximum value in assets-above-minimum grid | $\texttt{aXtraMax}$ | $20.0$ | | -# | $(none)$ | Number of points in base assets-above-minimum grid | $\texttt{aXtraCount}$ | $48$ | | -# | $(none)$ | Exponential nesting factor for base assets-above-minimum grid | $\texttt{aXtraNestFac}$ | $3$ | | -# | $(none)$ | Additional values to add to assets-above-minimum grid | $\texttt{aXtraExtra}$ | $None$ | | -# | $\underline{a} $ | Artificial borrowing constraint (normalized) | $\texttt{BoroCnstArt}$ | $None$ | | -# | $(none) $ |Indicator for whether $\texttt{vFunc}$ should be computed | $\texttt{vFuncBool}$ | $True$ | | -# | $(none)$ |Indicator for whether $\texttt{cFunc}$ should use cubic splines | $\texttt{CubicBool}$ | $False$ | | -# |$T$| Number of periods in this type's "cycle" |$\texttt{T_cycle}$| $1$ | | -# |(none)| Number of times the "cycle" occurs |$\texttt{cycles}$| $0$ | | -# -# These example parameters are almost identical to those used for `IndShockExample` in the prior notebook, except that the interest rate on borrowing is 20% (like a credit card), and the interest rate on saving is 1%. Moreover, the artificial borrowing constraint has been set to `None`. The cell below defines a parameter dictionary with these example values. - -# %% {"code_folding": [0]} -KinkedRdict = { # Click the arrow to expand this parameter dictionary - # Parameters shared with the perfect foresight model - "CRRA": 2.0, # Coefficient of relative risk aversion - "DiscFac": 0.96, # Intertemporal discount factor - "LivPrb": [0.98], # Survival probability - "PermGroFac": [1.01], # Permanent income growth factor - "BoroCnstArt": None, # Artificial borrowing constraint; imposed minimum level of end-of period assets - # New parameters unique to the "kinked R" model - "Rboro": 1.20, # Interest factor on borrowing (a < 0) - "Rsave": 1.01, # Interest factor on saving (a > 0) - # Parameters that specify the income distribution over the lifecycle (shared with IndShockConsumerType) - "PermShkStd": [0.1], # Standard deviation of log permanent shocks to income - "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks - "TranShkStd": [0.2], # Standard deviation of log transitory shocks to income - "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks - "UnempPrb": 0.05, # Probability of unemployment while working - "IncUnemp": 0.3, # Unemployment benefits replacement rate - "UnempPrbRet": 0.0005, # Probability of "unemployment" while retired - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "T_retire": 0, # Period of retirement (0 --> no retirement) - "tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future) - # Parameters for constructing the "assets above minimum" grid (shared with IndShockConsumerType) - "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 20, # Maximum end-of-period "assets above minimum" value - "aXtraCount": 48, # Number of points in the base grid of "assets above minimum" - "aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraExtra": [None], # Additional values to add to aXtraGrid - # A few other paramaters (shared with IndShockConsumerType) - "vFuncBool": True, # Whether to calculate the value function during solution - "CubicBool": False, # Preference shocks currently only compatible with linear cFunc - "T_cycle": 1, # Number of periods in the cycle for this agent type - # Parameters only used in simulation (shared with PerfForesightConsumerType) - "AgentCount": 10000, # Number of agents of this type - "T_sim": 500, # Number of periods to simulate - "aNrmInitMean": -6.0, # Mean of log initial assets - "aNrmInitStd": 1.0, # Standard deviation of log initial assets - "pLvlInitMean": 0.0, # Mean of log initial permanent income - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor - "T_age": None, # Age after which simulated agents are automatically killed -} - -# %% [markdown] -# ## Solving and examining the solution of the "kinked R" model -# -# The cell below creates an infinite horizon instance of `KinkedRconsumerType` and solves its model by calling its `solve` method. - -# %% -KinkyExample = KinkedRconsumerType(**KinkedRdict) -KinkyExample.cycles = 0 # Make the example infinite horizon -KinkyExample.solve() - -# %% [markdown] -# An element of a `KinkedRconsumerType`'s solution will have all the same attributes as that of a `IndShockConsumerType`; see that notebook for details. -# -# We can plot the consumption function of our "kinked R" example, as well as the MPC: - -# %% -print("Kinked R consumption function:") -plot_funcs(KinkyExample.solution[0].cFunc, KinkyExample.solution[0].mNrmMin, 5) - -print("Kinked R marginal propensity to consume:") -plot_funcs_der(KinkyExample.solution[0].cFunc, KinkyExample.solution[0].mNrmMin, 5) - -# %% [markdown] -# ## Simulating the "kinked R" model -# -# In order to generate simulated data, an instance of `KinkedRconsumerType` needs to know how many agents there are that share these particular parameters (and are thus *ex ante* homogeneous), the distribution of states for newly "born" agents, and how many periods to simulated. These simulation parameters are described in the table below, along with example values. -# -# | Description | Code | Example value | -# | :---: | --- | --- | -# | Number of consumers of this type | $\texttt{AgentCount}$ | $10000$ | -# | Number of periods to simulate | $\texttt{T_sim}$ | $500$ | -# | Mean of initial log (normalized) assets | $\texttt{aNrmInitMean}$ | $-6.0$ | -# | Stdev of initial log (normalized) assets | $\texttt{aNrmInitStd}$ | $1.0$ | -# | Mean of initial log permanent income | $\texttt{pLvlInitMean}$ | $0.0$ | -# | Stdev of initial log permanent income | $\texttt{pLvlInitStd}$ | $0.0$ | -# | Aggregrate productivity growth factor | $\texttt{PermGroFacAgg}$ | $1.0$ | -# | Age after which consumers are automatically killed | $\texttt{T_age}$ | $None$ | -# -# Here, we will simulate 10,000 consumers for 500 periods. All newly born agents will start with permanent income of exactly $P_t = 1.0 = \exp(\texttt{pLvlInitMean})$, as $\texttt{pLvlInitStd}$ has been set to zero; they will have essentially zero assets at birth, as $\texttt{aNrmInitMean}$ is $-6.0$; assets will be less than $1\%$ of permanent income at birth. -# -# These example parameter values were already passed as part of the parameter dictionary that we used to create `KinkyExample`, so it is ready to simulate. We need to set the `track_vars` attribute to indicate the variables for which we want to record a *history*. - -# %% -KinkyExample.track_vars = ["mNrm", "cNrm", "pLvl"] -KinkyExample.initialize_sim() -KinkyExample.simulate() - -# %% [markdown] -# We can plot the average (normalized) market resources in each simulated period: - -# %% -plt.plot(np.mean(KinkyExample.history["mNrm"], axis=1)) -plt.xlabel("Time") -plt.ylabel("Mean market resources") -plt.show() - -# %% [markdown] -# Now let's plot the distribution of (normalized) assets $a_t$ for the current population, after simulating for $500$ periods; this should be fairly close to the long run distribution: - -# %% -plt.plot( - np.sort(KinkyExample.state_now["aNrm"]), - np.linspace(0.0, 1.0, KinkyExample.AgentCount), -) -plt.xlabel("End-of-period assets") -plt.ylabel("Cumulative distribution") -plt.ylim(-0.01, 1.01) -plt.show() - -# %% [markdown] -# We can see there's a significant point mass of consumers with *exactly* $a_t=0$; these are consumers who do not find it worthwhile to give up a bit of consumption to begin saving (because $\Rfree_{save}$ is too low), and also are not willing to finance additional consumption by borrowing (because $\Rfree_{boro}$ is too high). -# -# The smaller point masses in this distribution are due to $\texttt{HARK}$ drawing simulated income shocks from the discretized distribution, rather than the "true" lognormal distributions of shocks. For consumers who ended $t-1$ with $a_{t-1}=0$ in assets, there are only 8 values the transitory shock $\theta_{t}$ can take on, and thus only 8 values of $m_t$ thus $a_t$ they can achieve; the value of $\psi_t$ is immaterial to $m_t$ when $a_{t-1}=0$. You can verify this by changing $\texttt{TranShkCount}$ to some higher value, like 25, in the dictionary above, then running the subsequent cells; the smaller point masses will not be visible to the naked eye. diff --git a/examples/ConsIndShockModel/PerfForesightConsumerType.py b/examples/ConsIndShockModel/PerfForesightConsumerType.py deleted file mode 100644 index 861f10599..000000000 --- a/examples/ConsIndShockModel/PerfForesightConsumerType.py +++ /dev/null @@ -1,298 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: collapsed,code_folding,name,title,incorrectly_encoded_metadata,pycharm -# cell_metadata_json: true -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # PerfForesightConsumerType: Perfect foresight consumption-saving - - -# %% {"code_folding": [0]} -# Initial imports and notebook setup, click arrow to show - -from copy import copy - -import matplotlib.pyplot as plt -import numpy as np - -from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType -from HARK.utilities import plot_funcs - -mystr = lambda number: "{:.4f}".format(number) - -# %% [markdown] -# The module `HARK.ConsumptionSaving.ConsIndShockModel` concerns consumption-saving models with idiosyncratic shocks to (non-capital) income. All of the models assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent. -# -# `ConsIndShockModel` currently includes three models: -# 1. A very basic "perfect foresight" model with no uncertainty. -# 2. A model with risk over transitory and permanent income shocks. -# 3. The model described in (2), with an interest rate for debt that differs from the interest rate for savings. -# -# This notebook provides documentation for the first of these three models. -# $\newcommand{\CRRA}{\rho}$ -# $\newcommand{\DiePrb}{\mathsf{D}}$ -# $\newcommand{\PermGroFac}{\Gamma}$ -# $\newcommand{\Rfree}{\mathsf{R}}$ -# $\newcommand{\DiscFac}{\beta}$ - -# %% [markdown] -# ## Statement of perfect foresight consumption-saving model -# -# The `PerfForesightConsumerType` class the problem of a consumer with Constant Relative Risk Aversion utility -# ${\CRRA}$ -# \begin{equation} -# U(C) = \frac{C^{1-\CRRA}}{1-\rho}, -# \end{equation} -# has perfect foresight about everything except whether he will die between the end of period $t$ and the beginning of period $t+1$, which occurs with probability $\DiePrb_{t+1}$. Permanent labor income $P_t$ grows from period $t$ to period $t+1$ by factor $\PermGroFac_{t+1}$. -# -# At the beginning of period $t$, the consumer has an amount of market resources $M_t$ (which includes both market wealth and currrent income) and must choose how much of those resources to consume $C_t$ and how much to retain in a riskless asset $A_t$, which will earn return factor $\Rfree$. The consumer cannot necessarily borrow arbitarily; instead, he might be constrained to have a wealth-to-income ratio at least as great as some "artificial borrowing constraint" $\underline{a} \leq 0$. -# -# The agent's flow of future utility $U(C_{t+n})$ from consumption is geometrically discounted by factor $\DiscFac$ per period. If the consumer dies, he receives zero utility flow for the rest of time. -# -# The agent's problem can be written in Bellman form as: -# -# \begin{eqnarray*} -# V_t(M_t,P_t) &=& \max_{C_t}~U(C_t) ~+ \DiscFac (1 - \DiePrb_{t+1}) V_{t+1}(M_{t+1},P_{t+1}), \\ -# & s.t. & \\ -# A_t &=& M_t - C_t, \\ -# A_t/P_t &\geq& \underline{a}, \\ -# M_{t+1} &=& \Rfree A_t + Y_{t+1}, \\ -# Y_{t+1} &=& P_{t+1}, \\ -# P_{t+1} &=& \PermGroFac_{t+1} P_t. -# \end{eqnarray*} -# -# The consumer's problem is characterized by a coefficient of relative risk aversion $\CRRA$, an intertemporal discount factor $\DiscFac$, an interest factor $\Rfree$, and age-varying sequences of the permanent income growth factor $\PermGroFac_t$ and survival probability $(1 - \DiePrb_t)$. -# -# While it does not reduce the computational complexity of the problem (as permanent income is deterministic, given its initial condition $P_0$), HARK represents this problem with *normalized* variables (represented in lower case), dividing all real variables by permanent income $P_t$ and utility levels by $P_t^{1-\CRRA}$. The Bellman form of the model thus reduces to: -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t}~U(c_t) ~+ \DiscFac (1 - \DiePrb_{t+1}) \PermGroFac_{t+1}^{1-\CRRA} v_{t+1}(m_{t+1}), \\ -# & s.t. & \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \underline{a}, \\ -# m_{t+1} &=& \Rfree/\PermGroFac_{t+1} a_t + 1. -# \end{eqnarray*} - -# %% [markdown] -# ## Solution method for PerfForesightConsumerType -# -# Because of the assumptions of CRRA utility, no risk other than mortality, and no artificial borrowing constraint, the problem has a closed form solution. In fact, the consumption function is perfectly linear, and the value function composed with the inverse utility function is also linear. The mathematical solution of this model is described in detail in the lecture notes [PerfForesightCRRA](https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA). -# -# The one period problem for this model is solved by the function `solveConsPerfForesight`, which creates an instance of the class `ConsPerfForesightSolver`. To construct an instance of the class `PerfForesightConsumerType`, several parameters must be passed to its constructor as shown in the table below. - -# %% [markdown] -# ## Example parameter values to construct an instance of PerfForesightConsumerType -# -# | Parameter | Description | Code | Example value | Time-varying? | -# | :---: | --- | --- | --- | --- | -# | $\DiscFac$ |Intertemporal discount factor | $\texttt{DiscFac}$ | $0.96$ | | -# | $\CRRA $ |Coefficient of relative risk aversion | $\texttt{CRRA}$ | $2.0$ | | -# | $\Rfree$ | Risk free interest factor | $\texttt{Rfree}$ | $1.03$ | | -# | $1 - \DiePrb_{t+1}$ |Survival probability | $\texttt{LivPrb}$ | $[0.98]$ | $\surd$ | -# |$\PermGroFac_{t+1}$|Permanent income growth factor|$\texttt{PermGroFac}$| $[1.01]$ | $\surd$ | -# |$\underline{a}$|Artificial borrowing constraint|$\texttt{BoroCnstArt}$| $None$ | | -# |$(none)$|Maximum number of gridpoints in consumption function |$\texttt{aXtraCount}$| $200$ | | -# |$T$| Number of periods in this type's "cycle" |$\texttt{T_cycle}$| $1$ | | -# |(none)| Number of times the "cycle" occurs |$\texttt{cycles}$| $0$ | | -# -# Note that the survival probability and income growth factor have time subscripts; likewise, the example values for these parameters are *lists* rather than simply single floats. This is because those parameters are *time-varying*: their values can depend on which period of the problem the agent is in. All time-varying parameters *must* be specified as lists, even if the same value occurs in each period for this type. -# -# The artificial borrowing constraint can be any non-positive `float`, or it can be `None` to indicate no artificial borrowing constraint. The maximum number of gridpoints in the consumption function is only relevant if the borrowing constraint is not `None`; without an upper bound on the number of gridpoints, kinks in the consumption function will propagate indefinitely in an infinite horizon model if there is a borrowing constraint, eventually resulting in an overflow error. If there is no artificial borrowing constraint, then the number of gridpoints used to represent the consumption function is always exactly two. -# -# The last two parameters in the table specify the "nature of time" for this type: the number of (non-terminal) periods in this type's "cycle", and the number of times that the "cycle" occurs. *Every* subclass of `AgentType` uses these two code parameters to define the nature of time. Here, `T_cycle` has the value $1$, indicating that there is exactly one period in the cycle, while `cycles` is $0$, indicating that the cycle is repeated in *infinite* number of times-- it is an infinite horizon model, with the same "kind" of period repeated over and over. -# -# In contrast, we could instead specify a life-cycle model by setting `T_cycle` to $1$, and specifying age-varying sequences of income growth and survival probability. In all cases, the number of elements in each time-varying parameter should exactly equal $\texttt{T_cycle}$. -# -# The parameter $\texttt{AgentCount}$ specifies how many consumers there are of this *type*-- how many individuals have these exact parameter values and are *ex ante* homogeneous. This information is not relevant for solving the model, but is needed in order to simulate a population of agents, introducing *ex post* heterogeneity through idiosyncratic shocks. Of course, simulating a perfect foresight model is quite boring, as there are *no* idiosyncratic shocks other than death! -# -# The cell below defines a dictionary that can be passed to the constructor method for `PerfForesightConsumerType`, with the values from the table here. - -# %% {"code_folding": []} -PerfForesightDict = { - # Parameters actually used in the solution method - "CRRA": 2.0, # Coefficient of relative risk aversion - "Rfree": 1.03, # Interest factor on assets - "DiscFac": 0.96, # Default intertemporal discount factor - "LivPrb": [0.98], # Survival probability - "PermGroFac": [1.01], # Permanent income growth factor - "BoroCnstArt": None, # Artificial borrowing constraint - "aXtraCount": 200, # Maximum number of gridpoints in consumption function - # Parameters that characterize the nature of time - "T_cycle": 1, # Number of periods in the cycle for this agent type - "cycles": 0, # Number of times the cycle occurs (0 --> infinitely repeated) -} - -# %% [markdown] -# ## Solving and examining the solution of the perfect foresight model -# -# With the dictionary we have just defined, we can create an instance of `PerfForesightConsumerType` by passing the dictionary to the class (as if the class were a function). This instance can then be solved by invoking its `solve` method. - -# %% -PFexample = PerfForesightConsumerType(**PerfForesightDict) -PFexample.cycles = 0 -PFexample.solve() - -# %% [markdown] -# The $\texttt{solve}$ method fills in the instance's attribute `solution` as a time-varying list of solutions to each period of the consumer's problem. In this case, `solution` will be a list with exactly one instance of the class `ConsumerSolution`, representing the solution to the infinite horizon model we specified. - -# %% -print(PFexample.solution) - -# %% [markdown] -# Each element of `solution` has a few attributes. To see all of them, we can use the $\texttt{vars}$ built in function: -# -# the consumption functions reside in the attribute $\texttt{cFunc}$ of each element of `ConsumerType.solution`. This method creates a (time varying) attribute $\texttt{cFunc}$ that contains a list of consumption functions. - -# %% -print(vars(PFexample.solution[0])) - -# %% [markdown] -# The two most important attributes of a single period solution of this model are the (normalized) consumption function $\texttt{cFunc}$ and the (normalized) value function $\texttt{vFunc}$. Let's plot those functions near the lower bound of the permissible state space (the attribute $\texttt{mNrmMin}$ tells us the lower bound of $m_t$ where the consumption function is defined). - -# %% -print("Linear perfect foresight consumption function:") -mMin = PFexample.solution[0].mNrmMin -plot_funcs(PFexample.solution[0].cFunc, mMin, mMin + 10.0) - -# %% -print("Perfect foresight value function:") -plot_funcs(PFexample.solution[0].vFunc, mMin + 0.1, mMin + 10.1) - -# %% [markdown] -# An element of `solution` also includes the (normalized) marginal value function $\texttt{vPfunc}$, and the lower and upper bounds of the marginal propensity to consume (MPC) $\texttt{MPCmin}$ and $\texttt{MPCmax}$. Note that with a linear consumption function, the MPC is constant, so its lower and upper bound are identical. -# -# ### Liquidity constrained perfect foresight example -# -# Without an artificial borrowing constraint, a perfect foresight consumer is free to borrow against the PDV of his entire future stream of labor income-- his "human wealth" $\texttt{hNrm}$-- and he will consume a constant proportion of his total wealth (market resources plus human wealth). If we introduce an artificial borrowing constraint, both of these features vanish. In the cell below, we define a parameter dictionary that prevents the consumer from borrowing *at all*, create and solve a new instance of `PerfForesightConsumerType` with it, and then plot its consumption function. - -# %% {"pycharm": {"name": "#%%\n"}} -LiqConstrDict = copy(PerfForesightDict) -LiqConstrDict["BoroCnstArt"] = 0.0 # Set the artificial borrowing constraint to zero - -LiqConstrExample = PerfForesightConsumerType(**LiqConstrDict) -LiqConstrExample.cycles = 0 # Make this type be infinite horizon -LiqConstrExample.solve() - -print("Liquidity constrained perfect foresight consumption function:") -plot_funcs(LiqConstrExample.solution[0].cFunc, 0.0, 10.0) - -# %% {"incorrectly_encoded_metadata": "pycharm= [markdown] {\"name\": \"#%% md\\n\"}"} -# At this time, the value function for a perfect foresight consumer with an artificial borrowing constraint is not computed nor included as part of its $\texttt{solution}$. - -# %% [markdown] -# ## Simulating the perfect foresight consumer model -# -# Suppose we wanted to simulate many consumers who share the parameter values that we passed to `PerfForesightConsumerType`-- an *ex ante* homogeneous *type* of consumers. To do this, our instance would have to know *how many* agents there are of this type, as well as their initial levels of assets $a_t$ and permanent income $P_t$. -# -# ### Setting simulation parameters -# -# Let's fill in this information by passing another dictionary to `PFexample` with simulation parameters. The table below lists the parameters that an instance of `PerfForesightConsumerType` needs in order to successfully simulate its model using the `simulate` method. -# -# | Description | Code | Example value | -# | :---: | --- | --- | -# | Number of consumers of this type | $\texttt{AgentCount}$ | $10000$ | -# | Number of periods to simulate | $\texttt{T_sim}$ | $120$ | -# | Mean of initial log (normalized) assets | $\texttt{aNrmInitMean}$ | $-6.0$ | -# | Stdev of initial log (normalized) assets | $\texttt{aNrmInitStd}$ | $1.0$ | -# | Mean of initial log permanent income | $\texttt{pLvlInitMean}$ | $0.0$ | -# | Stdev of initial log permanent income | $\texttt{pLvlInitStd}$ | $0.0$ | -# | Aggregrate productivity growth factor | $\texttt{PermGroFacAgg}$ | $1.0$ | -# | Age after which consumers are automatically killed | $\texttt{T_age}$ | $None$ | -# -# We have specified the model so that initial assets and permanent income are both distributed lognormally, with mean and standard deviation of the underlying normal distributions provided by the user. -# -# The parameter $\texttt{PermGroFacAgg}$ exists for compatibility with more advanced models that employ aggregate productivity shocks; it can simply be set to 1. -# -# In infinite horizon models, it might be useful to prevent agents from living extraordinarily long lives through a fortuitous sequence of mortality shocks. We have thus provided the option of setting $\texttt{T_age}$ to specify the maximum number of periods that a consumer can live before they are automatically killed (and replaced with a new consumer with initial state drawn from the specified distributions). This can be turned off by setting it to `None`. -# -# The cell below puts these parameters into a dictionary, then gives them to `PFexample`. Note that all of these parameters *could* have been passed as part of the original dictionary; we omitted them above for simplicity. - -# %% {"pycharm": {"name": "#%%\n"}} -SimulationParams = { - "AgentCount": 10000, # Number of agents of this type - "T_sim": 120, # Number of periods to simulate - "aNrmInitMean": -6.0, # Mean of log initial assets - "aNrmInitStd": 1.0, # Standard deviation of log initial assets - "pLvlInitMean": 0.0, # Mean of log initial permanent income - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor - "T_age": None, # Age after which simulated agents are automatically killed -} - -PFexample.assign_parameters(**SimulationParams) - -# %% [markdown] {"incorrectly_encoded_metadata": "pycharm= [markdown] {\"name\": \"#%% md\\n\"}"} -# To generate simulated data, we need to specify which variables we want to track the "history" of for this instance. To do so, we set the `track_vars` attribute of our `PerfForesightConsumerType` instance to be a list of strings with the simulation variables we want to track. -# -# In this model, valid arguments to `track_vars` include $\texttt{mNrm}$, $\texttt{cNrm}$, $\texttt{aNrm}$, and $\texttt{pLvl}$. Because this model has no idiosyncratic shocks, our simulated data will be quite boring. -# -# ### Generating simulated data -# -# Before simulating, the `initialize_sim` method must be invoked. This resets our instance back to its initial state, drawing a set of initial $\texttt{aNrm}$ and $\texttt{pLvl}$ values from the specified distributions and storing them in the attributes $\texttt{aNrmNow_init}$ and $\texttt{pLvlNow_init}$. It also resets this instance's internal random number generator, so that the same initial states will be set every time `initialize_sim` is called. In models with non-trivial shocks, this also ensures that the same sequence of shocks will be generated on every simulation run. -# -# Finally, the `simulate` method can be called. - -# %% {"pycharm": {"name": "#%%\n"}} -PFexample.track_vars = ["mNrm"] -PFexample.initialize_sim() -PFexample.simulate() - -# %% {"incorrectly_encoded_metadata": "pycharm= [markdown] {\"name\": \"#%% md\\n\"}"} -# Each simulation variable $\texttt{X}$ named in $\texttt{track_vars}$ will have the *history* of that variable for each agent stored in the attribute $\texttt{X_hist}$ as an array of shape $(\texttt{T_sim},\texttt{AgentCount})$. To see that the simulation worked as intended, we can plot the mean of $m_t$ in each simulated period: - -# %% {"pycharm": {"name": "#%%\n"}} -plt.plot(np.mean(PFexample.history["mNrm"], axis=1)) -plt.xlabel("Time") -plt.ylabel("Mean normalized market resources") -plt.show() - -# %% [markdown] {"incorrectly_encoded_metadata": "pycharm= [markdown] {\"name\": \"#%% md\\n\"}"} -# A perfect foresight consumer can borrow against the PDV of his future income-- his human wealth-- and thus as time goes on, our simulated agents approach the (very negative) steady state level of $m_t$ while being steadily replaced with consumers with roughly $m_t=1$. -# -# The slight wiggles in the plotted curve are due to consumers randomly dying and being replaced; their replacement will have an initial state drawn from the distributions specified by the user. To see the current distribution of ages, we can look at the attribute $\texttt{t_age}$. - -# %% {"pycharm": {"name": "#%%\n"}} -N = PFexample.AgentCount -F = np.linspace(0.0, 1.0, N) -plt.plot(np.sort(PFexample.t_age), F) -plt.xlabel("Current age of consumers") -plt.ylabel("Cumulative distribution") -plt.show() - -# %% [markdown] {"incorrectly_encoded_metadata": "pycharm= [markdown] {\"name\": \"#%% md\\n\"}"} -# The distribution is (discretely) exponential, with a point mass at 120 with consumers who have survived since the beginning of the simulation. -# -# One might wonder why HARK requires users to call `initialize_sim` before calling `simulate`: Why doesn't `simulate` just call `initialize_sim` as its first step? We have broken up these two steps so that users can simulate some number of periods, change something in the environment, and then resume the simulation. -# -# When called with no argument, `simulate` will simulate the model for $\texttt{T_sim}$ periods. The user can optionally pass an integer specifying the number of periods to simulate (which should not exceed $\texttt{T_sim}$). -# -# In the cell below, we simulate our perfect foresight consumers for 80 periods, then seize a bunch of their assets (dragging their wealth even more negative), then simulate for the remaining 40 periods. -# -# The `state_prev` attribute of an AgenType stores the values of the model's state variables in the _previous_ period of the simulation. - -# %% {"pycharm": {"name": "#%%\n"}} -PFexample.initialize_sim() -PFexample.simulate(80) -PFexample.state_prev[ - "aNrm" -] += -5.0 # Adjust all simulated consumers' assets downward by 5 -PFexample.simulate(40) - -plt.plot(np.mean(PFexample.history["mNrm"], axis=1)) -plt.xlabel("Time") -plt.ylabel("Mean normalized market resources") -plt.show() - -# %% diff --git a/examples/ConsPortfolioModel/example_ConsPortfolioModel.py b/examples/ConsPortfolioModel/example_ConsPortfolioModel.py deleted file mode 100644 index 0494d7a5c..000000000 --- a/examples/ConsPortfolioModel/example_ConsPortfolioModel.py +++ /dev/null @@ -1,431 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: title,-all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Portfolio Models in HARK -# -# In this notebook, we consider the solution and simulation of a number of microeconomic problems in the context of optimal portfolio choice. -# -# The agents in this model are first defined using the dictionary from the `PerfForesightConsumerType` class and additional attributes are added using the `IndShockConsumerType` class. -# -# From there, the `ConsPortfolioDict` dictionary is introduced to create the `PortfolioConsumerType` and each of the subseqeunt agent types using it. - -# %% -from copy import copy, deepcopy -from time import time - -import matplotlib.pyplot as plt -import numpy as np - -from HARK.ConsumptionSaving.ConsIndShockModel import ( - init_lifecycle, - time_params, - dist_params, - income_params, - liv_prb, -) -from HARK.ConsumptionSaving.ConsPortfolioModel import ( - PortfolioConsumerType, - init_portfolio, -) -from HARK.utilities import plot_funcs - - -# %% [markdown] -# ## 1. The baseline model of optimal portfolio choice - -# %% -# Initial attempt for defining a dictionary for the Portfolio consumer type -- needs to be tested! - -ConsPortfolioDict = { - # Parameters shared with the Perfect foresight consumer type - "CRRA": 5.0, # Coefficient of relative risk aversion, - "Rfree": 1.03, # Interest factor on assets - "DiscFac": 0.90, # Intertemporal discount factor - "LivPrb": [0.98], # Survival probability - "PermGroFac": [1.01], # Permanent income growth factor - "BoroCnstArt": 0.0, # Artificial borrowing constraint - "MaxKinks": 400, # Maximum number of grid points to allow in cFunc (should be large) - "AgentCount": 10000, # Number of agents of this type (only matters for simulation) - "aNrmInitMean": 0.0, # Mean of log initial assets (only matters for simulation) - "aNrmInitStd": 1.0, # Standard deviation of log initial assets (only for simulation) - "pLvlInitMean": 0.0, # Mean of log initial permanent income (only matters for simulation) - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income (only matters for simulation) - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor: portion of PermGroFac attributable to aggregate productivity growth (only matters for simulation) - "T_age": None, # Age after which simulated agents are automatically killed - "T_cycle": 1, # Number of periods in the cycle for this agent type - "PerfMITShk": False, # Do Perfect Foresight MIT Shock: Forces Newborns to follow solution path of the agent he/she replaced when True - # assets above grid parameters - "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 100, # Maximum end-of-period "assets above minimum" value - "aXtraNestFac": 1, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraCount": 200, # Number of points in the grid of "assets above minimum" - "aXtraExtra": [ - None - ], # Some other value of "assets above minimum" to add to the grid, not used - # Income process variables - "PermShkStd": [0.1], # Standard deviation of log permanent income shocks - "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks - "TranShkStd": [0.1], # Standard deviation of log transitory income shocks - "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks - "UnempPrb": 0.05, # Probability of unemployment while working - "UnempPrbRet": 0.005, # Probability of "unemployment" while retired - "IncUnemp": 0.3, # Unemployment benefits replacement rate - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "tax_rate": 0.0, # Flat income tax rate - "T_retire": 0, # Period of retirement (0 --> no retirement) - "vFuncBool": False, # Whether to calculate the value function during solution - "CubicBool": False, # Use cubic spline interpolation when True, linear interpolation when False - "neutral_measure": False, # Use permanent income neutral measure (see Harmenberg 2021) during simulations when True. - "NewbornTransShk": False, # Whether Newborns have transitory shock. The default is False. - # Attributes specific to the Portfolio consumer - "RiskyAvg": 1.08, # Average return of the risky asset - "RiskyStd": 0.20, # Standard deviation of (log) risky returns - "RiskyCount": 5, # Number of integration nodes to use in approximation of risky returns - "ShareCount": 25, # Number of discrete points in the risky share approximation - "AdjustPrb": 1.0, # Probability that the agent can adjust their risky portfolio share each period - "DiscreteShareBool": False, # Flag for whether to optimize risky share on a discrete grid only -} - -# %% -# Make and solve an example portfolio choice consumer type -print("Now solving an example portfolio choice problem; this might take a moment...") -MyType = PortfolioConsumerType(**ConsPortfolioDict) -MyType.cycles = 0 -t0 = time() -MyType.solve() -t1 = time() -MyType.cFunc = [MyType.solution[t].cFuncAdj for t in range(MyType.T_cycle)] -MyType.ShareFunc = [MyType.solution[t].ShareFuncAdj for t in range(MyType.T_cycle)] -print( - "Solving an infinite horizon portfolio choice problem took " - + str(t1 - t0) - + " seconds." -) - -# %% -# Plot the consumption and risky-share functions -print("Consumption function over market resources:") -plot_funcs(MyType.cFunc[0], 0.0, 20.0) -print("Risky asset share as a function of market resources:") -print("Optimal (blue) versus Theoretical Limit (orange)") -plt.xlabel("Normalized Market Resources") -plt.ylabel("Portfolio Share") -plt.ylim(0.0, 1.0) -# Since we are using a discretization of the lognormal distribution, -# the limit is numerically computed and slightly different from -# the analytical limit obtained by Merton and Samuelson for infinite wealth -plot_funcs( - [ - MyType.ShareFunc[0], - lambda m: MyType.ShareLimit * np.ones_like(m), - ], - 0.0, - 200.0, -) - - -# %% -# Now simulate this consumer type -MyType.track_vars = ["cNrm", "Share", "aNrm", "t_age"] -MyType.T_sim = 100 -MyType.initialize_sim() -MyType.simulate() - - -# %% -print("\n\n\n") -print("For derivation of the numerical limiting portfolio share") -print("as market resources approach infinity, see") -print( - "https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/Portfolio-CRRA/" -) - - -# %% [markdown] -# ## 2. Discrete portfolio choice - -# %% -# Make another example type, but this one optimizes risky portfolio share only -# on the discrete grid of values implicitly chosen by RiskyCount, using explicit -# value maximization. -DiscConsPortfolioDict = ConsPortfolioDict.copy() -DiscConsPortfolioDict["DiscreteShareBool"] = True -# Have to actually construct value function for this to work -DiscConsPortfolioDict["vFuncBool"] = True - -# %% -# Create the discrete type using the dictionary, then change relevant attributes -DiscreteType = PortfolioConsumerType(**DiscConsPortfolioDict) -DiscreteType.cycles = 0 - -print("Now solving a discrete choice portfolio problem; this might take a minute...") - -t0 = time() -DiscreteType.solve() -t1 = time() -DiscreteType.cFunc = [ - DiscreteType.solution[t].cFuncAdj for t in range(DiscreteType.T_cycle) -] -DiscreteType.ShareFunc = [ - DiscreteType.solution[t].ShareFuncAdj for t in range(DiscreteType.T_cycle) -] -print( - "Solving an infinite horizon discrete portfolio choice problem took " - + str(t1 - t0) - + " seconds." -) - - -# %% -# Plot the consumption and risky-share functions -print("Consumption function over market resources:") -plot_funcs(DiscreteType.cFunc[0], 0.0, 50.0) -print("Risky asset share as a function of market resources:") -print("Optimal (blue) versus Theoretical Limit (orange)") -plt.xlabel("Normalized Market Resources") -plt.ylabel("Portfolio Share") -plt.ylim(0.0, 1.0) -# Since we are using a discretization of the lognormal distribution, -# the limit is numerically computed and slightly different from -# the analytical limit obtained by Merton and Samuelson for infinite wealth -plot_funcs( - [DiscreteType.ShareFunc[0], lambda m: DiscreteType.ShareLimit * np.ones_like(m)], - 0.0, - 200.0, -) - - -# %% -print("\n\n\n") - - -# %% [markdown] -# ## 3. A model of "sticky" portfolio choice - -# %% -# Make another example type, but this one can only update their risky portfolio -# share in any particular period with 15% probability. -StickyConsPortfolioDict = ConsPortfolioDict.copy() -StickyConsPortfolioDict["AdjustPrb"] = 0.15 - - -# %% -# Make and solve a discrete portfolio choice consumer type -print( - 'Now solving a portfolio choice problem with "sticky" portfolio shares; this might take a moment...' -) -StickyType = PortfolioConsumerType(**StickyConsPortfolioDict) -StickyType.cycles = 0 -t0 = time() -StickyType.solve() -t1 = time() -StickyType.cFuncAdj = [ - StickyType.solution[t].cFuncAdj for t in range(StickyType.T_cycle) -] -StickyType.cFuncFxd = [ - StickyType.solution[t].cFuncFxd for t in range(StickyType.T_cycle) -] -StickyType.ShareFunc = [ - StickyType.solution[t].ShareFuncAdj for t in range(StickyType.T_cycle) -] -print( - "Solving an infinite horizon sticky portfolio choice problem took " - + str(t1 - t0) - + " seconds." -) - - -# %% -# Plot the consumption and risky-share functions -print( - "Consumption function over market resources when the agent can adjust his portfolio:" -) -plot_funcs(StickyType.cFuncAdj[0], 0.0, 50.0) - - -# %% -print( - "Consumption function over market resources when the agent CAN'T adjust, by current share:" -) -M = np.linspace(0.0, 50.0, 200) -for s in np.linspace(0.0, 1.0, 21): - C = StickyType.cFuncFxd[0](M, s * np.ones_like(M)) - plt.plot(M, C) -plt.xlim(0.0, 50.0) -plt.ylim(0.0, None) -plt.show() - - -# %% -print("Risky asset share function over market resources (when possible to adjust):") -print("Optimal (blue) versus Theoretical Limit (orange)") -plt.xlabel("Normalized Market Resources") -plt.ylabel("Portfolio Share") -plt.ylim(0.0, 1.0) -plot_funcs( - [StickyType.ShareFunc[0], lambda m: StickyType.ShareLimit * np.ones_like(m)], - 0.0, - 200.0, -) - - -# %% [markdown] -# Notice the wiggle in the blue line. This reflects the fact that the maximum grid point for which the solution is calculated is $a=100$ and the (incorrect) assumption built into the model that the portfolio share asymptotes to the frictionless analytical case. An alternative (not yet implemented) would be to calculate the implicit limit defined by the rate of geometric decay among the last grid points and assume that this is the limit. -# -# The difference between the two is likely due to the agent's inability to adjust their portfolio. -# - -# %% [markdown] -# ## 4. Life-cycle model of portfolio choice - -# %% -LC_ConsPortfolioDict = copy(ConsPortfolioDict) -LC_ConsPortfolioDict.update(time_params) -LC_ConsPortfolioDict.update(dist_params) -# Note the income specification overrides the pLvlInitMean from the SCF. -LC_ConsPortfolioDict.update(income_params) -LC_ConsPortfolioDict.update({"LivPrb": liv_prb}) - -LC_ConsPortfolioDict["RiskyAvg"] = [1.08] * init_lifecycle["T_cycle"] -LC_ConsPortfolioDict["RiskyStd"] = list( - np.linspace(0.20, 0.30, init_lifecycle["T_cycle"]) -) -LC_ConsPortfolioDict["RiskyAvgTrue"] = 1.08 -LC_ConsPortfolioDict["RiskyStdTrue"] = 0.20 -AgeVaryingRiskPercType = PortfolioConsumerType(**LC_ConsPortfolioDict) -AgeVaryingRiskPercType.cycles = 1 - -# %% -# Solve the agent type with age-varying risk perceptions -# print('Now solving a portfolio choice problem with age-varying risk perceptions...') -t0 = time() -AgeVaryingRiskPercType.solve() -AgeVaryingRiskPercType.cFunc = [ - AgeVaryingRiskPercType.solution[t].cFuncAdj - for t in range(AgeVaryingRiskPercType.T_cycle) -] -AgeVaryingRiskPercType.ShareFunc = [ - AgeVaryingRiskPercType.solution[t].ShareFuncAdj - for t in range(AgeVaryingRiskPercType.T_cycle) -] -t1 = time() -print( - "Solving a " - + str(AgeVaryingRiskPercType.T_cycle) - + " period portfolio choice problem with age-varying risk perceptions took " - + str(t1 - t0) - + " seconds." -) - - -# %% -# Plot the consumption and risky-share functions -print("Consumption function over market resources in each lifecycle period:") -plot_funcs(AgeVaryingRiskPercType.cFunc, 0.0, 20.0) -print("Risky asset share function over market resources in each lifecycle period:") -plot_funcs(AgeVaryingRiskPercType.ShareFunc, 0.0, 200.0) - - -# %% [markdown] -# ## 5. Portfolio choice with Merton-Samuelson limiting shares -# -# The code below tests the mathematical limits of the model and features the definition of a Merton-Samuelson type consumer. - -# %% -# Create a grid of market resources for the plots -mMin = 0 # Minimum ratio of assets to income to plot -mMax = 5 * 1e2 # Maximum ratio of assets to income to plot -mPts = 1000 # Number of points to plot -plot_point_max = 1000 -aXtraMax = plot_point_max * 10 # Maximum asset level - -eevalgrid = np.linspace(0, mMax, mPts) # range of values of assets for the plot - -# Number of points that will be used to approximate the risky distribution -risky_count_grid = [5, 50, 200] -# Plot by ages (time periods) at which to plot. We will use the default life-cycle calibration. -ages = [2, 4, 6, 8] - -# %% -# Creating the dictionary for the Merton-Samuelson consumer type -MertonPortfolioDict = copy(ConsPortfolioDict) -MertonPortfolioDict.update(time_params) -MertonPortfolioDict.update(dist_params) -# Note the income specification overrides the pLvlInitMean from the SCF. -MertonPortfolioDict.update(income_params) -MertonPortfolioDict.update({"LivPrb": liv_prb}) - -MertonPortfolioDict["RiskyAvgTrue"] = 1.08 -MertonPortfolioDict["RiskyStdTrue"] = 0.20 - - -# Create a function to compute the Merton-Samuelson limiting portfolio share. -def RiskyShareMertSamLogNormal(RiskPrem, CRRA, RiskyVar): - return RiskPrem / (CRRA * RiskyVar) - - -# %% Calibration and solution -for rcount in risky_count_grid: - # Create a new dictionary and replace the number of points that - # approximate the risky return distribution - - # Create new dictionary copying the default. - MertonPortfolioDict["RiskyCount"] = rcount - - # Create and solve agent - agent = PortfolioConsumerType(**MertonPortfolioDict) - agent.solve() - - # Compute the analytical Merton-Samuelson limiting portfolio share - RiskyVar = agent.RiskyStd**2 - RiskPrem = agent.RiskyAvg - agent.Rfree - MS_limit = RiskyShareMertSamLogNormal(RiskPrem, agent.CRRA, RiskyVar) - - # Now compute the limiting share numerically, using the approximated - # distribution - agent.update_ShareLimit() - NU_limit = agent.ShareLimit - - plt.figure() - for a in ages: - plt.plot( - eevalgrid, agent.solution[a].ShareFuncAdj(eevalgrid), label="t = %i" % (a) - ) - - plt.axhline( - NU_limit, c="k", ls="-.", label="Exact limit as $m\\rightarrow \\infty$." - ) - plt.axhline( - MS_limit, c="k", ls="--", label="M&S Limit without returns discretization." - ) - - plt.ylim(0, 1.05) - plt.xlim(eevalgrid[0] + 1, eevalgrid[-1]) - plt.xscale("log") - plt.legend() - plt.title( - "Risky Portfolio Share by Age\n Risky distribution with {points} equiprobable points".format( - points=rcount - ) - ) - plt.xlabel("Wealth (m)") - - plt.ioff() - plt.draw() - - -# %% diff --git a/examples/ConsPortfolioModel/example_ConsRiskyAssetModel.py b/examples/ConsPortfolioModel/example_ConsRiskyAssetModel.py deleted file mode 100644 index 42d950491..000000000 --- a/examples/ConsPortfolioModel/example_ConsRiskyAssetModel.py +++ /dev/null @@ -1,312 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Example Implementations of `HARK.ConsumptionSaving.ConsRiskyAssetModel` - -# %% -from time import time - -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType -from HARK.ConsumptionSaving.ConsPortfolioModel import ( - init_portfolio, - PortfolioConsumerType, -) -from HARK.ConsumptionSaving.ConsRiskyAssetModel import ( - RiskyAssetConsumerType, - FixedPortfolioShareRiskyAssetConsumerType, -) -from HARK.utilities import plot_funcs_der, plot_funcs - - -# %% -mystr = lambda number: "{:.4f}".format(number) - - -# %% [markdown] -# ## Idiosyncratic Income Shocks Consumer Type - -# %% -# Make and solve an example consumer with idiosyncratic income shocks -# Sse init_portfolio parameters to compare to results of PortfolioConsumerType -IndShockExample = IndShockConsumerType(**init_portfolio) -IndShockExample.cycles = 0 # Make this type have an infinite horizon - - -# %% -start_time = time() -IndShockExample.solve() -end_time = time() -print( - "Solving a consumer with idiosyncratic shocks took " - + mystr(end_time - start_time) - + " seconds." -) -IndShockExample.unpack("cFunc") - - -# %% -# Plot the consumption function and MPC for the infinite horizon consumer -print("Concave consumption function:") -plot_funcs(IndShockExample.cFunc[0], 0.0, 5.0) -print("Marginal consumption function:") -plot_funcs_der(IndShockExample.cFunc[0], 0.0, 5.0) - -# %% [markdown] -# ## Risky Return Consumer Type - -# %% -# Make and solve an example consumer with risky returns to savings -# Use init_portfolio parameters to compare to results of PortfolioConsumerType -RiskyReturnExample = RiskyAssetConsumerType(**init_portfolio) -RiskyReturnExample.cycles = 0 # Make this type have an infinite horizon - - -# %% -start_time = time() -RiskyReturnExample.solve() -end_time = time() -print( - "Solving a consumer with risky returns took " - + mystr(end_time - start_time) - + " seconds." -) -RiskyReturnExample.unpack("cFunc") - - -# %% -# Plot the consumption function and MPC for the risky asset consumer -print("Concave consumption function:") -plot_funcs(RiskyReturnExample.cFunc[0], 0.0, 5.0) -print("Marginal consumption function:") -plot_funcs_der(RiskyReturnExample.cFunc[0], 0.0, 5.0) - -# %% [markdown] -# ## Compare Idiosyncratic Income Shocks with Risky Return - -# %% -# Compare the consumption functions for the various agents in this notebook. -print("Consumption functions for idiosyncratic shocks vs risky returns:") -plot_funcs( - [ - IndShockExample.cFunc[0], # blue - RiskyReturnExample.cFunc[0], # orange - ], - 0.0, - 20.0, -) - - -# %% [markdown] -# ## Risky Return Consumer Type with Portfolio Choice - -# %% -# Make and solve an example risky consumer with a portfolio choice -init_portfolio["PortfolioBool"] = True -PortfolioChoiceExample = RiskyAssetConsumerType(**init_portfolio) -PortfolioChoiceExample.cycles = 0 # Make this type have an infinite horizon - - -# %% -start_time = time() -PortfolioChoiceExample.solve() -end_time = time() -print( - "Solving a consumer with risky returns and portfolio choice took " - + mystr(end_time - start_time) - + " seconds." -) -PortfolioChoiceExample.unpack("cFunc") -PortfolioChoiceExample.unpack("ShareFunc") - - -# %% -# Plot the consumption function and MPC for the portfolio choice consumer -print("Concave consumption function:") -plot_funcs(PortfolioChoiceExample.cFunc[0], 0.0, 5.0) -print("Marginal consumption function:") -plot_funcs_der(PortfolioChoiceExample.cFunc[0], 0.0, 5.0) - -# %% [markdown] -# ## Compare Income Shocks, Risky Return, and RR w/ Portfolio Choice - -# %% -# Compare the consumption functions for the various agents in this notebook. -print( - "Consumption functions for idiosyncratic shocks vs risky returns vs portfolio choice:" -) -plot_funcs( - [ - IndShockExample.cFunc[0], # blue - RiskyReturnExample.cFunc[0], # orange - PortfolioChoiceExample.cFunc[0], # green - ], - 0.0, - 20.0, -) - - -# %% [markdown] -# ## Portfolio Consumer Type - -# %% -# Make and solve an example portfolio choice consumer -PortfolioTypeExample = PortfolioConsumerType() -PortfolioTypeExample.cycles = 0 # Make this type have an infinite horizon - - -# %% -start_time = time() -PortfolioTypeExample.solve() -end_time = time() -print( - "Solving a consumer with portfolio choice took " - + mystr(end_time - start_time) - + " seconds." -) -PortfolioTypeExample.unpack("cFuncAdj") -PortfolioTypeExample.unpack("ShareFuncAdj") - - -# %% -# Plot the consumption function and MPC for the portfolio choice consumer -print("Concave consumption function:") -plot_funcs(PortfolioTypeExample.cFuncAdj[0], 0.0, 5.0) -print("Marginal consumption function:") -plot_funcs_der(PortfolioTypeExample.cFuncAdj[0], 0.0, 5.0) - - -# %% [markdown] -# ## Compare RR w/ Portfolio Choice with Portfolio Choice Type - -# %% -# Compare the consumption functions for the various portfolio choice types. -print( - "Consumption functions for portfolio choice type vs risky asset with portfolio choice:" -) -plot_funcs( - [ - PortfolioTypeExample.cFuncAdj[0], # blue - PortfolioChoiceExample.cFunc[0], # orange - ], - 0.0, - 20.0, -) - - -# %% -# Compare the share functions for the various portfolio choice types. -print("Share functions for portfolio choice type vs risky asset with portfolio choice:") -plot_funcs( - [ - PortfolioTypeExample.ShareFuncAdj[0], # blue - PortfolioChoiceExample.ShareFunc[0], # orange - ], - 0, - 200, -) - - -# %% [markdown] -# ## Risky Return Given Fixed Portfolio Share - -# %% -FixedShareExample = FixedPortfolioShareRiskyAssetConsumerType(**init_portfolio) -FixedShareExample.cycles = 0 - - -# %% -start_time = time() -FixedShareExample.solve() -end_time = time() -print( - "Solving a consumer with fixed portfolio share took " - + mystr(end_time - start_time) - + " seconds." -) -FixedShareExample.unpack("cFunc") - - -# %% -# Plot the consumption function and MPC for the infinite horizon consumer -print("Concave consumption function:") -plot_funcs(FixedShareExample.cFunc[0], 0.0, 5.0) -print("Marginal consumption function:") -plot_funcs_der(FixedShareExample.cFunc[0], 0.0, 5.0) - - -# %% [markdown] -# ## Compare Idiosyncratic Shock Type with Fixed Share at 0.0 Type - -# %% -# Compare the consumption functions for the various idiosyncratic shocks -print("Consumption functions for idiosyncratic shocks vs fixed share at 0.0:") -plot_funcs( - [ - IndShockExample.cFunc[0], # blue - FixedShareExample.cFunc[0], # orange - ], - 0.0, - 20.0, -) - - -# %% [markdown] -# ## Fixed Share at 1.0 Type - -# %% -init_portfolio["RiskyShareFixed"] = [1.0] -RiskyFixedExample = FixedPortfolioShareRiskyAssetConsumerType(**init_portfolio) -RiskyFixedExample.cycles = 0 - - -# %% -start_time = time() -RiskyFixedExample.solve() -end_time = time() -print( - "Solving a consumer with share fixed at 1.0 took " - + mystr(end_time - start_time) - + " seconds." -) -RiskyFixedExample.unpack("cFunc") - - -# %% -# Plot the consumption function and MPC for the portfolio choice consumer -print("Concave consumption function:") -plot_funcs(RiskyFixedExample.cFunc[0], 0.0, 5.0) -print("Marginal consumption function:") -plot_funcs_der(RiskyFixedExample.cFunc[0], 0.0, 5.0) - - -# %% [markdown] -# ## Compare Fixed Share at 1.0 Type with Risky Return Type - -# %% -# Compare the consumption functions for the various risky shocks -print("Consumption functions for risky asset vs fixed share at 1.0:") -plot_funcs( - [ - RiskyReturnExample.cFunc[0], # blue - RiskyFixedExample.cFunc[0], # orange - ], - 0.0, - 200.0, -) - - -# %% diff --git a/examples/ConsPortfolioModel/example_ConsSequentialPortfolioModel.py b/examples/ConsPortfolioModel/example_ConsSequentialPortfolioModel.py deleted file mode 100644 index 963b948e6..000000000 --- a/examples/ConsPortfolioModel/example_ConsSequentialPortfolioModel.py +++ /dev/null @@ -1,171 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: title,-all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -""" -Example implementations of SequentialPortfolioConsumerType -""" -from copy import copy -from time import time - -import matplotlib.pyplot as plt -import numpy as np - -from HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle -from HARK.ConsumptionSaving.ConsPortfolioModel import ( - SequentialPortfolioConsumerType, - init_portfolio, -) -from HARK.utilities import plot_funcs - -# %% -# Make and solve an example portfolio choice consumer type -print("Now solving an example portfolio choice problem; this might take a moment...") -MyType = SequentialPortfolioConsumerType() -MyType.cycles = 0 -t0 = time() -MyType.solve() -t1 = time() -MyType.cFunc = [MyType.solution[t].cFuncAdj for t in range(MyType.T_cycle)] -MyType.ShareFunc = [MyType.solution[t].ShareFuncAdj for t in range(MyType.T_cycle)] -MyType.SequentialShareFunc = [ - MyType.solution[t].SequentialShareFuncAdj for t in range(MyType.T_cycle) -] -print( - "Solving an infinite horizon portfolio choice problem took " - + str(t1 - t0) - + " seconds." -) - -# %% -# Plot the consumption and risky-share functions -print("Consumption function over market resources:") -plot_funcs(MyType.cFunc[0], 0.0, 20.0) - -# %% -# Since we are using a discretization of the lognormal distribution, -# the limit is numerically computed and slightly different from -# the analytical limit obtained by Merton and Samuelson for infinite wealth -print("Risky asset share as a function of liquid assets:") -print("Optimal (blue/orange) versus Theoretical Limit (green)") -plt.xlabel("Normalized Liquid Assets") -plt.ylabel("Portfolio Share") -plt.ylim(0.0, 1.0) -plt.xlim(0.0, 200.0) -mgrid = np.linspace(0.0, 300.0, 300) -cgrid = MyType.cFunc[0](mgrid) -shares = MyType.ShareFunc[0](mgrid) -agrid = mgrid - cgrid -plt.plot(agrid, shares) -plot_funcs( - [ - MyType.SequentialShareFunc[0], - lambda a: MyType.ShareLimit * np.ones_like(a), - ], - 0.0, - 200.0, -) - -# Note that the orange line lies right on top of the blue line and they are basically -# indistinguishable. This is expected, as deciding saving and risky share simultaneously -# should give the same result as when doing it sequentially. - -# %% -print("\n\n\n") -print("For derivation of the numerical limiting portfolio share") -print("as market resources approach infinity, see") -print( - "https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/Portfolio-CRRA/" -) - -# %% -print("\n\n\n") - -# %% -"" -# Make another example type, but this one can only update their risky portfolio -# share in any particular period with 15% probability. -init_sticky_share = init_portfolio.copy() -init_sticky_share["AdjustPrb"] = 0.15 - -# %% -# Make and solve a discrete portfolio choice consumer type -print( - 'Now solving a portfolio choice problem with "sticky" portfolio shares; this might take a moment...' -) -StickyType = SequentialPortfolioConsumerType(**init_sticky_share) -StickyType.cycles = 0 -t0 = time() -StickyType.solve() -t1 = time() -StickyType.cFuncAdj = [ - StickyType.solution[t].cFuncAdj for t in range(StickyType.T_cycle) -] -StickyType.cFuncFxd = [ - StickyType.solution[t].cFuncFxd for t in range(StickyType.T_cycle) -] -StickyType.ShareFunc = [ - StickyType.solution[t].ShareFuncAdj for t in range(StickyType.T_cycle) -] -StickyType.SequentialShareFunc = [ - StickyType.solution[t].SequentialShareFuncAdj for t in range(StickyType.T_cycle) -] -print( - "Solving an infinite horizon sticky portfolio choice problem took " - + str(t1 - t0) - + " seconds." -) - -# %% -# Plot the consumption and risky-share functions -print( - "Consumption function over market resources when the agent can adjust his portfolio:" -) -plot_funcs(StickyType.cFuncAdj[0], 0.0, 50.0) - -# %% -print( - "Consumption function over market resources when the agent CAN'T adjust, by current share:" -) -M = np.linspace(0.0, 50.0, 100) -for s in np.linspace(0.0, 1.0, 5): - C = StickyType.cFuncFxd[0](M, s * np.ones_like(M)) - plt.plot(M, C) -plt.xlim(0.0, 50.0) -plt.ylim(0.0, None) -plt.show() - -# %% -print("Risky asset share function over market resources (when possible to adjust):") -print("Optimal (blue/orange) versus Theoretical Limit (green)") -plt.xlabel("Normalized Market Resources") -plt.ylabel("Portfolio Share") -plt.ylim(0.0, 1.0) -mgrid = np.linspace(0.0, 200.0, 1000) -cgrid = MyType.cFunc[0](mgrid) -shares = MyType.ShareFunc[0](mgrid) -agrid = mgrid - cgrid -plt.plot(agrid, shares) -plot_funcs( - [ - StickyType.SequentialShareFunc[0], - lambda a: StickyType.ShareLimit * np.ones_like(a), - ], - 0.0, - 100.0, -) - -# %% diff --git a/examples/ConsumptionSaving/example_ConsAggShockModel.py b/examples/ConsumptionSaving/example_ConsAggShockModel.py deleted file mode 100644 index cfcb8bbaa..000000000 --- a/examples/ConsumptionSaving/example_ConsAggShockModel.py +++ /dev/null @@ -1,298 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Example ConsAggShockModel - -# %% -from time import process_time -import numpy as np -import matplotlib.pyplot as plt -from HARK.utilities import plot_funcs -from HARK.distribution import DiscreteDistribution -from HARK.ConsumptionSaving.ConsAggShockModel import ( - AggShockConsumerType, - CobbDouglasEconomy, - AggShockMarkovConsumerType, - CobbDouglasMarkovEconomy, -) -from copy import deepcopy - - -def mystr(number): - return "{:.4f}".format(number) - - -# %% -# Solve an AggShockConsumerType's microeconomic problem -solve_agg_shocks_micro = False -# Solve for the equilibrium aggregate saving rule in a CobbDouglasEconomy -solve_agg_shocks_market = True -# Solve an AggShockMarkovConsumerType's microeconomic problem -solve_markov_micro = False -# Solve for the equilibrium aggregate saving rule in a CobbDouglasMarkovEconomy -solve_markov_market = False -# Solve a simple Krusell-Smith-style two state, two shock model -solve_krusell_smith = True -# Solve a CobbDouglasEconomy with many states, potentially utilizing the "state jumper" -solve_poly_state = False - -# %% [markdown] -# ### Example implementation of AggShockConsumerType - -# %% -if solve_agg_shocks_micro or solve_agg_shocks_market: - # Make an aggregate shocks consumer type - AggShockExample = AggShockConsumerType() - AggShockExample.cycles = 0 - - # Make a Cobb-Douglas economy for the agents - EconomyExample = CobbDouglasEconomy(agents=[AggShockExample]) - EconomyExample.make_AggShkHist() # Simulate a history of aggregate shocks - - # Have the consumers inherit relevant objects from the economy - AggShockExample.get_economy_data(EconomyExample) - -# %% -if solve_agg_shocks_micro: - # Solve the microeconomic model for the aggregate shocks example type (and display results) - t_start = process_time() - AggShockExample.solve() - t_end = process_time() - print( - "Solving an aggregate shocks consumer took " - + mystr(t_end - t_start) - + " seconds." - ) - print( - "Consumption function at each aggregate market resources-to-labor ratio gridpoint:" - ) - m_grid = np.linspace(0, 10, 200) - AggShockExample.unpack("cFunc") - for M in AggShockExample.Mgrid.tolist(): - mMin = AggShockExample.solution[0].mNrmMin(M) - c_at_this_M = AggShockExample.cFunc[0](m_grid + mMin, M * np.ones_like(m_grid)) - plt.plot(m_grid + mMin, c_at_this_M) - plt.ylim(0.0, None) - plt.show() - -# %% -if solve_agg_shocks_market: - # Solve the "macroeconomic" model by searching for a "fixed point dynamic rule" - t_start = process_time() - print( - "Now solving for the equilibrium of a Cobb-Douglas economy. This might take a few minutes..." - ) - EconomyExample.solve() - t_end = process_time() - print( - 'Solving the "macroeconomic" aggregate shocks model took ' - + str(t_end - t_start) - + " seconds." - ) - - print("Aggregate savings as a function of aggregate market resources:") - plot_funcs(EconomyExample.AFunc, 0, 2 * EconomyExample.kSS) - print( - "Consumption function at each aggregate market resources gridpoint (in general equilibrium):" - ) - AggShockExample.unpack("cFunc") - m_grid = np.linspace(0, 10, 200) - AggShockExample.unpack("cFunc") - for M in AggShockExample.Mgrid.tolist(): - mMin = AggShockExample.solution[0].mNrmMin(M) - c_at_this_M = AggShockExample.cFunc[0](m_grid + mMin, M * np.ones_like(m_grid)) - plt.plot(m_grid + mMin, c_at_this_M) - plt.ylim(0.0, None) - plt.show() - -# %% [markdown] -# ### Example Implementations of AggShockMarkovConsumerType - -# %% -if solve_markov_micro or solve_markov_market or solve_krusell_smith: - # Make a Markov aggregate shocks consumer type - AggShockMrkvExample = AggShockMarkovConsumerType() - AggShockMrkvExample.IncShkDstn = [2 * [AggShockMrkvExample.IncShkDstn[0]]] - AggShockMrkvExample.cycles = 0 - - # Make a Cobb-Douglas economy for the agents - MrkvEconomyExample = CobbDouglasMarkovEconomy(agents=[AggShockMrkvExample]) - MrkvEconomyExample.DampingFac = 0.2 # Turn down damping - MrkvEconomyExample.make_AggShkHist() # Simulate a history of aggregate shocks - AggShockMrkvExample.get_economy_data( - MrkvEconomyExample - ) # Have the consumers inherit relevant objects from the economy - -# %% -if solve_markov_micro: - # Solve the microeconomic model for the Markov aggregate shocks example type (and display results) - t_start = process_time() - AggShockMrkvExample.solve() - t_end = process_time() - print( - "Solving an aggregate shocks Markov consumer took " - + mystr(t_end - t_start) - + " seconds." - ) - - print( - "Consumption function at each aggregate market \ - resources-to-labor ratio gridpoint (for each macro state):" - ) - m_grid = np.linspace(0, 10, 200) - AggShockMrkvExample.unpack("cFunc") - for i in range(2): - for M in AggShockMrkvExample.Mgrid.tolist(): - mMin = AggShockMrkvExample.solution[0].mNrmMin[i](M) - c_at_this_M = AggShockMrkvExample.cFunc[0][i]( - m_grid + mMin, M * np.ones_like(m_grid) - ) - plt.plot(m_grid + mMin, c_at_this_M) - plt.ylim(0.0, None) - plt.show() - -# %% -if solve_markov_market: - # Solve the "macroeconomic" model by searching for a "fixed point dynamic rule" - t_start = process_time() - print("Now solving a two-state Markov economy. This should take a few minutes...") - MrkvEconomyExample.solve() - t_end = process_time() - print( - 'Solving the "macroeconomic" aggregate shocks model took ' - + str(t_end - t_start) - + " seconds." - ) - - print( - "Consumption function at each aggregate market \ - resources-to-labor ratio gridpoint (for each macro state):" - ) - m_grid = np.linspace(0, 10, 200) - AggShockMrkvExample.unpack("cFunc") - for i in range(2): - for M in AggShockMrkvExample.Mgrid.tolist(): - mMin = AggShockMrkvExample.solution[0].mNrmMin[i](M) - c_at_this_M = AggShockMrkvExample.cFunc[0][i]( - m_grid + mMin, M * np.ones_like(m_grid) - ) - plt.plot(m_grid + mMin, c_at_this_M) - plt.ylim(0.0, None) - plt.show() - -# %% -if solve_krusell_smith: - # Make a Krusell-Smith agent type - # NOTE: These agents aren't exactly like KS, as they don't have serially correlated unemployment - KSexampleType = deepcopy(AggShockMrkvExample) - KSexampleType.IncShkDstn[0] = [ - DiscreteDistribution( - np.array([0.96, 0.04]), np.array([[1.0, 1.0], [1.0 / 0.96, 0.0]]) - ), - DiscreteDistribution( - np.array([0.90, 0.10]), np.array([[1.0, 1.0], [1.0 / 0.90, 0.0]]) - ), - ] - - # Make a KS economy - KSeconomy = deepcopy(MrkvEconomyExample) - KSeconomy.agents = [KSexampleType] - KSeconomy.AggShkDstn = [ - DiscreteDistribution( - np.array([1.0]), - np.array([[1.0], [1.05]]), - ), - DiscreteDistribution( - np.array([1.0]), - np.array([[1.0], [0.95]]), - ), - ] - KSeconomy.PermGroFacAgg = [1.0, 1.0] - KSexampleType.get_economy_data(KSeconomy) - KSeconomy.make_AggShkHist() - - # Solve the K-S model - t_start = process_time() - print( - "Now solving a Krusell-Smith-style economy. This should take about a minute..." - ) - KSeconomy.solve() - t_end = process_time() - print("Solving the Krusell-Smith model took " + str(t_end - t_start) + " seconds.") - -# %% -if solve_poly_state: - StateCount = 15 # Number of Markov states - GrowthAvg = 1.01 # Average permanent income growth factor - GrowthWidth = 0.02 # PermGroFacAgg deviates from PermGroFacAgg in this range - Persistence = 0.90 # Probability of staying in the same Markov state - PermGroFacAgg = np.linspace( - GrowthAvg - GrowthWidth, GrowthAvg + GrowthWidth, num=StateCount - ) - - # Make the Markov array with chosen states and persistence - PolyMrkvArray = np.zeros((StateCount, StateCount)) - for i in range(StateCount): - for j in range(StateCount): - if i == j: - PolyMrkvArray[i, j] = Persistence - elif (i == (j - 1)) or (i == (j + 1)): - PolyMrkvArray[i, j] = 0.5 * (1.0 - Persistence) - PolyMrkvArray[0, 0] += 0.5 * (1.0 - Persistence) - PolyMrkvArray[StateCount - 1, StateCount - 1] += 0.5 * (1.0 - Persistence) - - # Make a consumer type to inhabit the economy - PolyStateExample = AggShockMarkovConsumerType() - PolyStateExample.MrkvArray = PolyMrkvArray - PolyStateExample.PermGroFacAgg = PermGroFacAgg - PolyStateExample.IncShkDstn[0] = StateCount * [PolyStateExample.IncShkDstn[0]] - PolyStateExample.cycles = 0 - - # Make a Cobb-Douglas economy for the agents - # Use verbose=False to remove printing of intercept - PolyStateEconomy = CobbDouglasMarkovEconomy( - agents=[PolyStateExample], verbose=False - ) - PolyStateEconomy.MrkvArray = PolyMrkvArray - PolyStateEconomy.PermGroFacAgg = PermGroFacAgg - PolyStateEconomy.PermShkAggStd = StateCount * [0.006] - PolyStateEconomy.TranShkAggStd = StateCount * [0.003] - PolyStateEconomy.slope_prev = StateCount * [1.0] - PolyStateEconomy.intercept_prev = StateCount * [0.0] - PolyStateEconomy.update() - PolyStateEconomy.makeAggShkDstn() - PolyStateEconomy.make_AggShkHist() # Simulate a history of aggregate shocks - PolyStateExample.get_economy_data( - PolyStateEconomy - ) # Have the consumers inherit relevant objects from the economy - - # Solve the many state model - t_start = process_time() - print( - "Now solving an economy with " - + str(StateCount) - + " Markov states. This might take a while..." - ) - PolyStateEconomy.solve() - t_end = process_time() - print( - "Solving a model with " - + str(StateCount) - + " states took " - + str(t_end - t_start) - + " seconds." - ) diff --git a/examples/ConsumptionSaving/example_ConsGenIncProcessModel.py b/examples/ConsumptionSaving/example_ConsGenIncProcessModel.py deleted file mode 100644 index e11953a2c..000000000 --- a/examples/ConsumptionSaving/example_ConsGenIncProcessModel.py +++ /dev/null @@ -1,204 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from HARK.utilities import plot_funcs -from time import process_time -import matplotlib.pyplot as plt -import numpy as np -from HARK.ConsumptionSaving.ConsGenIncProcessModel import ( - IndShockExplicitPermIncConsumerType, - IndShockConsumerType, - PersistentShockConsumerType, - init_explicit_perm_inc, -) - - -def mystr(number): - return "{:.4f}".format(number) - - -# %% -do_simulation = True - -# %% -# Display information about the pLvlGrid used in these examples -print( - "The infinite horizon examples presented here use a grid of persistent income levels (pLvlGrid)" -) -print( - "based on percentiles of the long run distribution of pLvl for the given parameters. These percentiles" -) -print( - "are specified in the attribute pLvlPctiles. Here, the lowest percentile is " - + str(init_explicit_perm_inc["pLvlPctiles"][0] * 100) - + " and the highest" -) -print("percentile is " + str(init_explicit_perm_inc["pLvlPctiles"][-1] * 100) + ".\n") - -# %% -# Make and solve an example "explicit permanent income" consumer with idiosyncratic shocks -ExplicitExample = IndShockExplicitPermIncConsumerType() -t_start = process_time() -ExplicitExample.solve() -t_end = process_time() -print( - "Solving an explicit permanent income consumer took " - + mystr(t_end - t_start) - + " seconds." -) - -# %% -# Plot the consumption function at various permanent income levels -print("Consumption function by pLvl for explicit permanent income consumer:") -pLvlGrid = ExplicitExample.pLvlGrid[0] -mLvlGrid = np.linspace(0, 20, 300) -for p in pLvlGrid: - M_temp = mLvlGrid + ExplicitExample.solution[0].mLvlMin(p) - C = ExplicitExample.solution[0].cFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) -plt.xlim(0.0, 20.0) -plt.ylim(0.0, None) -plt.xlabel("Market resource level mLvl") -plt.ylabel("Consumption level cLvl") -plt.show() - -# %% -# Now solve the *exact same* problem, but with the permanent income normalization -NormalizedExample = IndShockConsumerType(**init_explicit_perm_inc) -NormalizedExample.cycles = 0 -t_start = process_time() -NormalizedExample.solve() -t_end = process_time() -print( - "Solving the equivalent problem with permanent income normalized out took " - + mystr(t_end - t_start) - + " seconds." -) - -# %% -# Show that the normalized consumption function for the "explicit permanent income" consumer -# is almost identical for every permanent income level (and the same as the normalized problem's -# cFunc), but is less accurate due to extrapolation outside the bounds of pLvlGrid. -print("Normalized consumption function by pLvl for explicit permanent income consumer:") -pLvlGrid = ExplicitExample.pLvlGrid[0] -mNrmGrid = np.linspace(0, 20, 300) -for p in pLvlGrid: - M_temp = mNrmGrid * p + ExplicitExample.solution[0].mLvlMin(p) - C = ExplicitExample.solution[0].cFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp / p, C / p) -plt.xlim(0.0, 20.0) -plt.ylim(0.0, None) -plt.xlabel("Normalized market resources mNrm") -plt.ylabel("Normalized consumption cNrm") -plt.show() -print( - "Consumption function for normalized problem (without explicit permanent income):" -) -mNrmMin = NormalizedExample.solution[0].mNrmMin -plot_funcs(NormalizedExample.solution[0].cFunc, mNrmMin, mNrmMin + 20) - -# %% [markdown] -# The "explicit permanent income" solution deviates from the solution to the normalized problem because -# of errors from extrapolating beyond the bounds of the pLvlGrid. -# The error is largest for pLvl values -# near the upper and lower bounds, and propagates toward the center of the distribution. - -# %% -# Plot the value function at various permanent income levels -if ExplicitExample.vFuncBool: - pGrid = np.linspace(0.1, 3.0, 24) - M = np.linspace(0.001, 5, 300) - for p in pGrid: - M_temp = M + ExplicitExample.solution[0].mLvlMin(p) - C = ExplicitExample.solution[0].vFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) - plt.ylim([-200, 0]) - plt.xlabel("Market resource level mLvl") - plt.ylabel("Value v") - plt.show() - -# %% -# Simulate some data -if do_simulation: - ExplicitExample.T_sim = 500 - ExplicitExample.track_vars = ["mLvl", "cLvl", "pLvl"] - ExplicitExample.make_shock_history() # This is optional - ExplicitExample.initialize_sim() - ExplicitExample.simulate() - plt.plot(np.mean(ExplicitExample.history["mLvl"], axis=1)) - plt.xlabel("Simulated time period") - plt.ylabel("Average market resources mLvl") - plt.show() - -# %% -# Make and solve an example "persistent idisyncratic shocks" consumer -PersistentExample = PersistentShockConsumerType() -t_start = process_time() -PersistentExample.solve() -t_end = process_time() -print( - "Solving a persistent income shocks consumer took " - + mystr(t_end - t_start) - + " seconds." -) - -# %% -# Plot the consumption function at various levels of persistent income pLvl -print( - "Consumption function by persistent income level pLvl for a consumer with AR1 coefficient of " - + str(PersistentExample.PrstIncCorr) - + ":" -) -pLvlGrid = PersistentExample.pLvlGrid[0] -mLvlGrid = np.linspace(0, 20, 300) -for p in pLvlGrid: - M_temp = mLvlGrid + PersistentExample.solution[0].mLvlMin(p) - C = PersistentExample.solution[0].cFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) -plt.xlim(0.0, 20.0) -plt.ylim(0.0, None) -plt.xlabel("Market resource level mLvl") -plt.ylabel("Consumption level cLvl") -plt.show() - -# %% -# Plot the value function at various persistent income levels -if PersistentExample.vFuncBool: - pGrid = PersistentExample.pLvlGrid[0] - M = np.linspace(0.001, 5, 300) - for p in pGrid: - M_temp = M + PersistentExample.solution[0].mLvlMin(p) - C = PersistentExample.solution[0].vFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) - plt.ylim([-200, 0]) - plt.xlabel("Market resource level mLvl") - plt.ylabel("Value v") - plt.show() - -# %% -# Simulate some data -if do_simulation: - PersistentExample.T_sim = 500 - PersistentExample.track_vars = ["mLvl", "cLvl", "pLvl"] - PersistentExample.initialize_sim() - PersistentExample.simulate() - plt.plot(np.mean(PersistentExample.history["mLvl"], axis=1)) - plt.xlabel("Simulated time period") - plt.ylabel("Average market resources mLvl") - plt.show() - -# %% diff --git a/examples/ConsumptionSaving/example_ConsIndShock.py b/examples/ConsumptionSaving/example_ConsIndShock.py deleted file mode 100644 index c160a4900..000000000 --- a/examples/ConsumptionSaving/example_ConsIndShock.py +++ /dev/null @@ -1,198 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import ( - PerfForesightConsumerType, - IndShockConsumerType, - KinkedRconsumerType, - init_lifecycle, - init_cyclical, -) -from HARK.utilities import plot_funcs_der, plot_funcs -from time import time - -# %% -mystr = lambda number: "{:.4f}".format(number) - - -# %% -do_simulation = True - -# %% -# Make and solve an example perfect foresight consumer -PFexample = PerfForesightConsumerType() -# Make this type have an infinite horizon -PFexample.cycles = 0 - -# %% -start_time = time() -PFexample.solve() -end_time = time() -print( - "Solving a perfect foresight consumer took " - + mystr(end_time - start_time) - + " seconds." -) -PFexample.unpack("cFunc") - -# %% -# Plot the perfect foresight consumption function -print("Perfect foresight consumption function:") -mMin = PFexample.solution[0].mNrmMin -plot_funcs(PFexample.cFunc[0], mMin, mMin + 10) - -# %% -if do_simulation: - PFexample.T_sim = 120 # Set number of simulation periods - PFexample.track_vars = ["mNrm"] - PFexample.initialize_sim() - PFexample.simulate() - -# %% -# Make and solve an example consumer with idiosyncratic income shocks -IndShockExample = IndShockConsumerType() -IndShockExample.cycles = 0 # Make this type have an infinite horizon - -# %% -start_time = time() -IndShockExample.solve() -end_time = time() -print( - "Solving a consumer with idiosyncratic shocks took " - + mystr(end_time - start_time) - + " seconds." -) -IndShockExample.unpack("cFunc") - -# %% -# Plot the consumption function and MPC for the infinite horizon consumer -print("Concave consumption function:") -plot_funcs(IndShockExample.cFunc[0], IndShockExample.solution[0].mNrmMin, 5) -print("Marginal consumption function:") -plot_funcs_der(IndShockExample.cFunc[0], IndShockExample.solution[0].mNrmMin, 5) - -# %% -# Compare the consumption functions for the perfect foresight and idiosyncratic -# shock types. Risky income cFunc asymptotically approaches perfect foresight cFunc. -print("Consumption functions for perfect foresight vs idiosyncratic shocks:") -plot_funcs( - [PFexample.cFunc[0], IndShockExample.cFunc[0]], - IndShockExample.solution[0].mNrmMin, - 100, -) - -# %% -# Compare the value functions for the two types -if IndShockExample.vFuncBool: - print("Value functions for perfect foresight vs idiosyncratic shocks:") - plot_funcs( - [PFexample.solution[0].vFunc, IndShockExample.solution[0].vFunc], - IndShockExample.solution[0].mNrmMin + 0.5, - 10, - ) - -# %% -# Simulate some data; results stored in mNrm_hist, cNrm_hist, and pLvl_hist -if do_simulation: - IndShockExample.T_sim = 120 - IndShockExample.track_vars = ["mNrm", "cNrm", "pLvl"] - IndShockExample.make_shock_history() # This is optional, simulation will draw shocks on the fly if it isn't run. - IndShockExample.initialize_sim() - IndShockExample.simulate() - -# %% -# Make and solve an idiosyncratic shocks consumer with a finite lifecycle -LifecycleExample = IndShockConsumerType(**init_lifecycle) -LifecycleExample.cycles = ( - 1 # Make this consumer live a sequence of periods exactly once -) - -# %% -start_time = time() -LifecycleExample.solve() -end_time = time() -print("Solving a lifecycle consumer took " + mystr(end_time - start_time) + " seconds.") -LifecycleExample.unpack("cFunc") - -# %% -# Plot the consumption functions during working life -print("Consumption functions while working:") -mMin = min( - [LifecycleExample.solution[t].mNrmMin for t in range(LifecycleExample.T_cycle)] -) -plot_funcs(LifecycleExample.cFunc[: LifecycleExample.T_retire], mMin, 5) - -# %% -# Plot the consumption functions during retirement -print("Consumption functions while retired:") -plot_funcs(LifecycleExample.cFunc[LifecycleExample.T_retire :], 0, 5) - -# %% -# Simulate some data; results stored in mNrm_hist, cNrm_hist, pLvl_hist, and t_age_hist -if do_simulation: - LifecycleExample.T_sim = 120 - LifecycleExample.track_vars = ["mNrm", "cNrm", "pLvl", "t_age"] - LifecycleExample.initialize_sim() - LifecycleExample.simulate() - -# %% -# Make and solve a "cyclical" consumer type who lives the same four quarters repeatedly. -# The consumer has income that greatly fluctuates throughout the year. -CyclicalExample = IndShockConsumerType(**init_cyclical) -CyclicalExample.cycles = 0 - -# %% -start_time = time() -CyclicalExample.solve() -end_time = time() -print("Solving a cyclical consumer took " + mystr(end_time - start_time) + " seconds.") -CyclicalExample.unpack("cFunc") - -# %% -# Plot the consumption functions for the cyclical consumer type -print("Quarterly consumption functions:") -mMin = min([X.mNrmMin for X in CyclicalExample.solution]) -plot_funcs(CyclicalExample.cFunc, mMin, 5) - -# %% -# Simulate some data; results stored in cHist, mHist, bHist, aHist, MPChist, and pHist -if do_simulation: - CyclicalExample.T_sim = 480 - CyclicalExample.track_vars = ["mNrm", "cNrm", "pLvl", "t_cycle"] - CyclicalExample.initialize_sim() - CyclicalExample.simulate() - -# %% -# Make and solve an agent with a kinky interest rate -KinkyExample = KinkedRconsumerType() -KinkyExample.cycles = 0 # Make the Example infinite horizon - -# %% -start_time = time() -KinkyExample.solve() -end_time = time() -print("Solving a kinky consumer took " + mystr(end_time - start_time) + " seconds.") -KinkyExample.unpack("cFunc") -print("Kinky consumption function:") -plot_funcs(KinkyExample.cFunc[0], KinkyExample.solution[0].mNrmMin, 5) - -# %% -if do_simulation: - KinkyExample.T_sim = 120 - KinkyExample.track_vars = ["mNrm", "cNrm", "pLvl"] - KinkyExample.initialize_sim() - KinkyExample.simulate() diff --git a/examples/ConsumptionSaving/example_ConsLaborModel.py b/examples/ConsumptionSaving/example_ConsLaborModel.py deleted file mode 100644 index 925113b07..000000000 --- a/examples/ConsumptionSaving/example_ConsLaborModel.py +++ /dev/null @@ -1,302 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.9.16 -# --- - -# %% -from HARK.ConsumptionSaving.ConsLaborModel import ( - LaborIntMargConsumerType, - init_labor_lifecycle, -) -import numpy as np -import matplotlib.pyplot as plt -from time import process_time - -# %% -mystr = lambda number: "{:.4f}".format(number) # Format numbers as strings - -# %% -do_simulation = True - -# %% -# Make and solve a labor intensive margin consumer i.e. a consumer with utility for leisure -LaborIntMargExample = LaborIntMargConsumerType(verbose=0) -LaborIntMargExample.cycles = 0 - -# %% -t_start = process_time() -LaborIntMargExample.solve() -t_end = process_time() -print( - "Solving a labor intensive margin consumer took " - + str(t_end - t_start) - + " seconds." -) - -# %% -t = 0 -bMin_orig = 0.0 -bMax = 100.0 - -# %% -# Plot the consumption function at various transitory productivity shocks -TranShkSet = LaborIntMargExample.TranShkGrid[t] -bMin = bMin_orig -B = np.linspace(bMin, bMax, 300) -bMin = bMin_orig -for Shk in TranShkSet: - B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk) - C = LaborIntMargExample.solution[t].cFunc(B_temp, Shk * np.ones_like(B_temp)) - plt.plot(B_temp, C) - bMin = np.minimum(bMin, B_temp[0]) -plt.xlabel("Beginning of period bank balances") -plt.ylabel("Normalized consumption level") -plt.xlim(bMin, bMax - bMin_orig + bMin) -plt.ylim(0.0, None) -plt.show() - -# %% -# Plot the marginal consumption function at various transitory productivity shocks -TranShkSet = LaborIntMargExample.TranShkGrid[t] -bMin = bMin_orig -B = np.linspace(bMin, bMax, 300) -for Shk in TranShkSet: - B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk) - C = LaborIntMargExample.solution[t].cFunc.derivativeX( - B_temp, Shk * np.ones_like(B_temp) - ) - plt.plot(B_temp, C) - bMin = np.minimum(bMin, B_temp[0]) -plt.xlabel("Beginning of period bank balances") -plt.ylabel("Marginal propensity to consume") -plt.xlim(bMin, bMax - bMin_orig + bMin) -plt.ylim(0.0, 1.0) -plt.show() - -# %% -# Plot the labor function at various transitory productivity shocks -TranShkSet = LaborIntMargExample.TranShkGrid[t] -bMin = bMin_orig -B = np.linspace(0.0, bMax, 300) -for Shk in TranShkSet: - B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk) - Lbr = LaborIntMargExample.solution[t].LbrFunc(B_temp, Shk * np.ones_like(B_temp)) - bMin = np.minimum(bMin, B_temp[0]) - plt.plot(B_temp, Lbr) -plt.xlabel("Beginning of period bank balances") -plt.ylabel("Labor supply") -plt.xlim(bMin, bMax - bMin_orig + bMin) -plt.ylim(0.0, 1.0) -plt.show() - -# %% -# Plot the marginal value function at various transitory productivity shocks -pseudo_inverse = True -TranShkSet = LaborIntMargExample.TranShkGrid[t] -bMin = bMin_orig -B = np.linspace(0.0, bMax, 300) -for Shk in TranShkSet: - B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk) - if pseudo_inverse: - vP = LaborIntMargExample.solution[t].vPfunc.cFunc( - B_temp, Shk * np.ones_like(B_temp) - ) - else: - vP = LaborIntMargExample.solution[t].vPfunc(B_temp, Shk * np.ones_like(B_temp)) - bMin = np.minimum(bMin, B_temp[0]) - plt.plot(B_temp, vP) -plt.xlabel("Beginning of period bank balances") -if pseudo_inverse: - plt.ylabel("Pseudo inverse marginal value") -else: - plt.ylabel("Marginal value") -plt.xlim(bMin, bMax - bMin_orig + bMin) -plt.ylim(0.0, None) -plt.show() - -# %% -if do_simulation: - t_start = process_time() - LaborIntMargExample.T_sim = 120 # Set number of simulation periods - LaborIntMargExample.track_vars = ["bNrm", "cNrm"] - LaborIntMargExample.initialize_sim() - LaborIntMargExample.simulate() - t_end = process_time() - print( - "Simulating " - + str(LaborIntMargExample.AgentCount) - + " intensive-margin labor supply consumers for " - + str(LaborIntMargExample.T_sim) - + " periods took " - + mystr(t_end - t_start) - + " seconds." - ) - - N = LaborIntMargExample.AgentCount - CDF = np.linspace(0.0, 1, N) - - plt.plot(np.sort(LaborIntMargExample.controls["cNrm"]), CDF) - plt.xlabel( - "Consumption cNrm in " + str(LaborIntMargExample.T_sim) + "th simulated period" - ) - plt.ylabel("Cumulative distribution") - plt.xlim(0.0, None) - plt.ylim(0.0, 1.0) - plt.show() - - plt.plot(np.sort(LaborIntMargExample.controls["Lbr"]), CDF) - plt.xlabel( - "Labor supply Lbr in " + str(LaborIntMargExample.T_sim) + "th simulated period" - ) - plt.ylabel("Cumulative distribution") - plt.xlim(0.0, 1.0) - plt.ylim(0.0, 1.0) - plt.show() - - plt.plot(np.sort(LaborIntMargExample.state_now["aNrm"]), CDF) - plt.xlabel( - "End-of-period assets aNrm in " - + str(LaborIntMargExample.T_sim) - + "th simulated period" - ) - plt.ylabel("Cumulative distribution") - plt.xlim(0.0, 20.0) - plt.ylim(0.0, 1.0) - plt.show() - -# %% -# Make and solve a labor intensive margin consumer with a finite lifecycle -LifecycleExample = LaborIntMargConsumerType(**init_labor_lifecycle) -LifecycleExample.cycles = ( - 1 # Make this consumer live a sequence of periods exactly once -) - -# %% -start_time = process_time() -LifecycleExample.solve() -end_time = process_time() -print( - "Solving a lifecycle labor intensive margin consumer took " - + str(end_time - start_time) - + " seconds." -) -LifecycleExample.unpack("cFunc") - -# %% -bMax = 20.0 - -# %% -# Plot the consumption function in each period of the lifecycle, using median shock -B = np.linspace(0.0, bMax, 300) -b_min = np.inf -b_max = -np.inf -for t in range(LifecycleExample.T_cycle): - TranShkSet = LifecycleExample.TranShkGrid[t] - Shk = TranShkSet[int(len(TranShkSet) // 2)] # Use the median shock, more or less - B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk) - C = LifecycleExample.solution[t].cFunc(B_temp, Shk * np.ones_like(B_temp)) - plt.plot(B_temp, C) - b_min = np.minimum(b_min, B_temp[0]) - b_max = np.maximum(b_min, B_temp[-1]) -plt.title("Consumption function across periods of the lifecycle") -plt.xlabel("Beginning of period bank balances") -plt.ylabel("Normalized consumption level") -plt.xlim(b_min, b_max) -plt.ylim(0.0, None) -plt.show() - -# %% -# Plot the marginal consumption function in each period of the lifecycle, using median shock -B = np.linspace(0.0, bMax, 300) -b_min = np.inf -b_max = -np.inf -for t in range(LifecycleExample.T_cycle): - TranShkSet = LifecycleExample.TranShkGrid[t] - Shk = TranShkSet[int(len(TranShkSet) // 2)] # Use the median shock, more or less - B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk) - MPC = LifecycleExample.solution[t].cFunc.derivativeX( - B_temp, Shk * np.ones_like(B_temp) - ) - plt.plot(B_temp, MPC) - b_min = np.minimum(b_min, B_temp[0]) - b_max = np.maximum(b_min, B_temp[-1]) -plt.title("Marginal consumption function across periods of the lifecycle") -plt.xlabel("Beginning of period bank balances") -plt.ylabel("Marginal propensity to consume") -plt.xlim(b_min, b_max) -plt.ylim(0.0, 1.0) -plt.show() - -# %% -# Plot the labor supply function in each period of the lifecycle, using median shock -B = np.linspace(0.0, bMax, 300) -b_min = np.inf -b_max = -np.inf -for t in range(LifecycleExample.T_cycle): - TranShkSet = LifecycleExample.TranShkGrid[t] - Shk = TranShkSet[int(len(TranShkSet) // 2)] # Use the median shock, more or less - B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk) - L = LifecycleExample.solution[t].LbrFunc(B_temp, Shk * np.ones_like(B_temp)) - plt.plot(B_temp, L) - b_min = np.minimum(b_min, B_temp[0]) - b_max = np.maximum(b_min, B_temp[-1]) -plt.title("Labor supply function across periods of the lifecycle") -plt.xlabel("Beginning of period bank balances") -plt.ylabel("Labor supply") -plt.xlim(b_min, b_max) -plt.ylim(0.0, 1.01) -plt.show() - -# %% -# Plot the marginal value function at various transitory productivity shocks -pseudo_inverse = True -TranShkSet = LifecycleExample.TranShkGrid[t] -B = np.linspace(0.0, bMax, 300) -b_min = np.inf -b_max = -np.inf -for t in range(LifecycleExample.T_cycle): - TranShkSet = LifecycleExample.TranShkGrid[t] - Shk = TranShkSet[int(len(TranShkSet) / 2)] # Use the median shock, more or less - B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk) - if pseudo_inverse: - vP = LifecycleExample.solution[t].vPfunc.cFunc( - B_temp, Shk * np.ones_like(B_temp) - ) - else: - vP = LifecycleExample.solution[t].vPfunc(B_temp, Shk * np.ones_like(B_temp)) - plt.plot(B_temp, vP) - b_min = np.minimum(b_min, B_temp[0]) - b_max = np.maximum(b_min, B_temp[-1]) -plt.xlabel("Beginning of period bank balances") -if pseudo_inverse: - plt.ylabel("Pseudo inverse marginal value") -else: - plt.ylabel("Marginal value") -plt.title("Marginal value across periods of the lifecycle") -plt.xlim(b_min, b_max) -plt.ylim(0.0, None) -plt.show() - -# %% diff --git a/examples/ConsumptionSaving/example_ConsMarkovModel.py b/examples/ConsumptionSaving/example_ConsMarkovModel.py deleted file mode 100644 index 5d0614fa3..000000000 --- a/examples/ConsumptionSaving/example_ConsMarkovModel.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from HARK.utilities import plot_funcs -from time import process_time -from copy import deepcopy, copy -import numpy as np -from HARK.ConsumptionSaving.ConsIndShockModel import init_idiosyncratic_shocks -from HARK.ConsumptionSaving.ConsMarkovModel import MarkovConsumerType -from HARK.distribution import DiscreteDistributionLabeled - -mystr = lambda number: "{:.4f}".format(number) -do_simulation = True - -# %% [markdown] -# This module defines consumption-saving models in which an agent has CRRA utility over consumption, geometrically discounts future utility flows and expects to experience transitory and permanent shocks to his/her income. Moreover, in any given period s/he is in exactly one of several discrete states. This state evolves from period to period according to a Markov process. - -# %% [markdown] -# In this model, an agent is very similar to the one in the "idiosyncratic shocks" model of $\texttt{ConsPrefShockModel}$, except that here, an agent's income distribution ($F_{\psi t},F_{\theta t}$), permanent income growth rate $\Gamma_{t+1}$ and interest factor $R$ are all functions of the Markov state and might vary across states. -# -# The agent's problem can be written in Bellman form as: -# -# \begin{eqnarray*} -# v_t(m_t,s_t) &=& \max_{c_t} u(c_t) + \beta (1-\mathsf{D}_{t+1}) \mathbb{E} [v_{t+1}(m_{t+1}, s_{t+1}) ], \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \underline{a}, \\ -# m_{t+1} &=& \frac{R(s_{t+1})}{\Gamma(s_{t+1})\psi_{t+1}} a_t + \theta_{t+1}, \\ -# \theta_{t} \sim F_{\theta t}(s_t), &\qquad& \psi_{t} \sim F_{\psi t}(s_t), \mathbb{E} [F_{\psi t}(s_t)] = 1, \\ -# Prob[s_{t+1}=j| s_t=i] &=& \triangle_{ij}, \\ -# u(c) &=& \frac{c^{1-\rho}}{1-\rho} -# \end{eqnarray*} -# -# The Markov matrix $\triangle$ is giving transition probabilities from current state $i$ to future state $j$. - -# %% [markdown] -# The one period problem for this model is solved by the function $\texttt{solveConsMarkov}$, which creates an instance of the class $\texttt{ConsMarkovSolver}$. The class $\texttt{MarkovConsumerType}$ extends $\texttt{IndShockConsumerType}$ to represents agents in this model. -# -# To construct an instance of this class, the same attributes as for $\texttt{IndShockConsumerType}$ are required, except for one as described below: - -# %% [markdown] -# ### Additional parameter value to solve an instance of MarkovConsumerType -# -# | Param | Description | Code | Value | Constructed | -# | :---: | --- | --- | --- | :---: | -# | $\triangle$ |Discrete state transition probability matrix | $\texttt{MrkvArray}$ | |$\surd$ | -# -# The attribute $\texttt{MrkvArray}$ is a $\texttt{numpy.array}$ of size ($N_s$, $N_s$) corresponding to the number of discrete states. -# -# Note that $\texttt{MrkvArray}$ is am element of $\texttt{time_inv}$, so that the same transition probabilities are used for each period. However, it can be moved to $\texttt{time_vary}$ and specified as a list of $\texttt{array}$s instead. -# -# The attributes $\texttt{Rfree}$, $\texttt{PermGroFac}$ and $\texttt{IncomeDstn}$ should be specified as arrays or lists with $N_s$ elements for each period. - -# %% [markdown] -# ### Solve MarkovConsumerType -# -# When the $\texttt{MarkovConsumerType}$ method of a $\texttt{MarkovConsumerType}$ is invoked, the $\texttt{solution}$ attribute is populated with a list of $\texttt{ConsumerSolution}$ objects, which each have the same attributes as the "idiosyncratic shocks" model. However, each attribute is now a list (or array) whose elements are *state-conditional* values of that object. -# -# For example, in a model with 4 discrete states, each the $\texttt{cFunc}$ attribute of each element of $\texttt{solution}$ is a length-4 list whose elements are state-conditional consumption functions. That is, $\texttt{cFunc[2]}$ is the consumption function when $s_t = 2$. -# -# $\texttt{ConsMarkovModel}$ is compatible with cubic spline interpolation for the consumption functions, so $\texttt{CubicBool = True}$ will not generate an exception. The problem is solved using the [method of endogenous gridpoints](https://www.econ2.jhu.edu/people/ccarroll/EndogenousGridpoints.pdf), which is moderately more complicated than in the basic $\texttt{ConsPrefShockModel}$. - -# %% -# Define the Markov transition matrix for serially correlated unemployment -unemp_length = 5 # Averange length of unemployment spell -urate_good = 0.05 # Unemployment rate when economy is in good state -urate_bad = 0.12 # Unemployment rate when economy is in bad state -bust_prob = 0.01 # Probability of economy switching from good to bad -recession_length = 20 # Averange length of bad state -p_reemploy = 1.0 / unemp_length -p_unemploy_good = p_reemploy * urate_good / (1 - urate_good) -p_unemploy_bad = p_reemploy * urate_bad / (1 - urate_bad) -boom_prob = 1.0 / recession_length -MrkvArray = np.array( - [ - [ - (1 - p_unemploy_good) * (1 - bust_prob), - p_unemploy_good * (1 - bust_prob), - (1 - p_unemploy_good) * bust_prob, - p_unemploy_good * bust_prob, - ], - [ - p_reemploy * (1 - bust_prob), - (1 - p_reemploy) * (1 - bust_prob), - p_reemploy * bust_prob, - (1 - p_reemploy) * bust_prob, - ], - [ - (1 - p_unemploy_bad) * boom_prob, - p_unemploy_bad * boom_prob, - (1 - p_unemploy_bad) * (1 - boom_prob), - p_unemploy_bad * (1 - boom_prob), - ], - [ - p_reemploy * boom_prob, - (1 - p_reemploy) * boom_prob, - p_reemploy * (1 - boom_prob), - (1 - p_reemploy) * (1 - boom_prob), - ], - ] -) - -# %% [markdown] -# Several variant examples of the model will be illustrated below such that: -# 1. Model with serially correlated unemployment -# 2. Model with period of "unemployment immunity" -# 3. Model with serially correlated permanent income growth -# 4. Model with serially correlated interest factor -# -# ### 1. Serial Unemployment -# -# Let's create a consumer similar to the one in "idiosyncratic shock" model but who faces serially correlated unemployment during boom or bust cycles of the economy. - -# %% -# Make a consumer with serially correlated unemployment, subject to boom and bust cycles -init_serial_unemployment = copy(init_idiosyncratic_shocks) -init_serial_unemployment["MrkvArray"] = [MrkvArray] -init_serial_unemployment["UnempPrb"] = 0.0 # to make income distribution when employed -init_serial_unemployment["global_markov"] = False -SerialUnemploymentExample = MarkovConsumerType(**init_serial_unemployment) -SerialUnemploymentExample.cycles = 0 -SerialUnemploymentExample.vFuncBool = False # for easy toggling here - -# %% -# Replace the default (lognormal) income distribution with a custom one -employed_income_dist = DiscreteDistributionLabeled( - pmv=np.ones(1), atoms=np.array([[1.0], [1.0]]), var_names=["PermShk", "TranShk"] -) # Definitely get income -unemployed_income_dist = DiscreteDistributionLabeled( - pmv=np.ones(1), atoms=np.array([[1.0], [0.0]]), var_names=["PermShk", "TranShk"] -) # Definitely don't -SerialUnemploymentExample.IncShkDstn = [ - [ - employed_income_dist, - unemployed_income_dist, - employed_income_dist, - unemployed_income_dist, - ] -] - -# %% [markdown] -# Note that $\texttt{MarkovConsumerType}$ currently has no method to automatically construct a valid IncomeDstn - $\texttt{IncomeDstn}$ is manually constructed in each case. Writing a method to supersede $\texttt{IndShockConsumerType.update_income_process}$ for the “Markov model” would be a welcome contribution! - -# %% -# Interest factor, permanent growth rates, and survival probabilities are constant arrays -SerialUnemploymentExample.assign_parameters( - Rfree=np.array(4 * [SerialUnemploymentExample.Rfree]) -) -SerialUnemploymentExample.PermGroFac = [ - np.array(4 * SerialUnemploymentExample.PermGroFac) -] -SerialUnemploymentExample.LivPrb = [SerialUnemploymentExample.LivPrb * np.ones(4)] - -# %% -# Solve the serial unemployment consumer's problem and display solution -start_time = process_time() -SerialUnemploymentExample.solve() -end_time = process_time() -print( - "Solving a Markov consumer with serially correlated unemployment took " - + mystr(end_time - start_time) - + " seconds." -) -print("Consumption functions for each discrete state:") -plot_funcs(SerialUnemploymentExample.solution[0].cFunc, 0, 50) -if SerialUnemploymentExample.vFuncBool: - print("Value functions for each discrete state:") - plot_funcs(SerialUnemploymentExample.solution[0].vFunc, 5, 50) - -# %% -# Simulate some data; results stored in cHist, mNrm_hist, cNrm_hist, and Mrkv_hist -if do_simulation: - SerialUnemploymentExample.T_sim = 120 - SerialUnemploymentExample.MrkvPrbsInit = [0.25, 0.25, 0.25, 0.25] - SerialUnemploymentExample.track_vars = ["mNrm", "cNrm"] - SerialUnemploymentExample.make_shock_history() # This is optional - SerialUnemploymentExample.initialize_sim() - SerialUnemploymentExample.simulate() - -# %% [markdown] -# ### 2. Unemployment immunity for a fixed period -# -# Let's create a consumer similar to the one in "idiosyncratic shock" model but who occasionally gets "unemployment immunity" for a fixed period in an economy subject to boom and bust cycles. - -# %% -# Make a consumer who occasionally gets "unemployment immunity" for a fixed period -UnempPrb = 0.05 # Probability of becoming unemployed each period -ImmunityPrb = 0.01 # Probability of becoming "immune" to unemployment -ImmunityT = 6 # Number of periods of immunity - -# %% -StateCount = ImmunityT + 1 # Total number of Markov states -IncomeDstnReg = DiscreteDistributionLabeled( - pmv=np.array([1 - UnempPrb, UnempPrb]), - atoms=np.array([[1.0, 1.0], [1.0 / (1.0 - UnempPrb), 0.0]]), - var_names=["PermShk", "TranShk"], -) # Ordinary income distribution -IncomeDstnImm = DiscreteDistributionLabeled( - pmv=np.array([1.0]), - atoms=np.array([[1.0], [1.0]]), - var_names=["PermShk", "TranShk"], -) -IncomeDstn = [IncomeDstnReg] + ImmunityT * [ - IncomeDstnImm -] # Income distribution for each Markov state, in a list - -# %% -# Make the Markov transition array. MrkvArray[i,j] is the probability of transitioning -# to state j in period t+1 from state i in period t. -MrkvArray = np.zeros((StateCount, StateCount)) -MrkvArray[0, 0] = ( - 1.0 - ImmunityPrb -) # Probability of not becoming immune in ordinary state: stay in ordinary state -MrkvArray[ - 0, ImmunityT -] = ImmunityPrb # Probability of becoming immune in ordinary state: begin immunity periods -for j in range(ImmunityT): - MrkvArray[ - j + 1, j - ] = 1.0 # When immune, have 100% chance of transition to state with one fewer immunity periods remaining - -# %% -init_unemployment_immunity = copy(init_idiosyncratic_shocks) -init_unemployment_immunity["MrkvArray"] = [MrkvArray] -ImmunityExample = MarkovConsumerType(**init_unemployment_immunity) -ImmunityExample.assign_parameters( - Rfree=np.array(np.array(StateCount * [1.03])), # Interest factor same in all states - PermGroFac=[ - np.array(StateCount * [1.01]) - ], # Permanent growth factor same in all states - LivPrb=[np.array(StateCount * [0.98])], # Same survival probability in all states - BoroCnstArt=None, # No artificial borrowing constraint - cycles=0, -) # Infinite horizon -ImmunityExample.IncShkDstn = [IncomeDstn] - -# %% -# Solve the unemployment immunity problem and display the consumption functions -start_time = process_time() -ImmunityExample.solve() -end_time = process_time() -print( - 'Solving an "unemployment immunity" consumer took ' - + mystr(end_time - start_time) - + " seconds." -) -print("Consumption functions for each discrete state:") -mNrmMin = np.min([ImmunityExample.solution[0].mNrmMin[j] for j in range(StateCount)]) -plot_funcs(ImmunityExample.solution[0].cFunc, mNrmMin, 10) - -# %% [markdown] -# ### 3. Serial permanent income growth -# -# Let's create a consumer similar to the one in "idiosyncratic shock" model but who faces serially correlated permanent income growth in an economy subject to boom and bust cycles. - -# %% -# Make a consumer with serially correlated permanent income growth -UnempPrb = 0.05 # Unemployment probability -StateCount = 5 # Number of permanent income growth rates -Persistence = ( - 0.5 # Probability of getting the same permanent income growth rate next period -) - -# %% -IncomeDstnReg = DiscreteDistributionLabeled( - pmv=np.array([1 - UnempPrb, UnempPrb]), - atoms=np.array([[1.0, 1.0], [1.0, 0.0]]), - var_names=["PermShk", "TranShk"], -) -IncomeDstn = StateCount * [ - IncomeDstnReg -] # Same simple income distribution in each state - -# %% -# Make the state transition array for this type: Persistence probability of remaining in the same state, equiprobable otherwise -MrkvArray = Persistence * np.eye(StateCount) + (1.0 / StateCount) * ( - 1.0 - Persistence -) * np.ones((StateCount, StateCount)) - -# %% -init_serial_growth = copy(init_idiosyncratic_shocks) -init_serial_growth["MrkvArray"] = [MrkvArray] -SerialGroExample = MarkovConsumerType(**init_serial_growth) -SerialGroExample.assign_parameters( - Rfree=np.array( - np.array(StateCount * [1.03]) - ), # Same interest factor in each Markov state - PermGroFac=[ - np.array([0.97, 0.99, 1.01, 1.03, 1.05]) - ], # Different permanent growth factor in each Markov state - LivPrb=[np.array(StateCount * [0.98])], # Same survival probability in all states - cycles=0, -) -SerialGroExample.IncShkDstn = [IncomeDstn] - - -# %% [markdown] -# ### 4. Serial Interest factor -# -# Finally, suppose that the consumer faces a interest factor serially correlated while his/her permanent income growth rate is constant. - -# %% -# Solve the serially correlated permanent growth shock problem and display the consumption functions -start_time = process_time() -SerialGroExample.solve() -end_time = process_time() -print( - "Solving a serially correlated growth consumer took " - + mystr(end_time - start_time) - + " seconds." -) -print("Consumption functions for each discrete state:") -plot_funcs(SerialGroExample.solution[0].cFunc, 0, 10) - -# %% -# Make a consumer with serially correlated interest factors -SerialRExample = deepcopy(SerialGroExample) # Same as the last problem... -SerialRExample.assign_parameters( - PermGroFac=[ - np.array(StateCount * [1.01]) - ], # ...but now the permanent growth factor is constant... - Rfree=np.array([1.01, 1.02, 1.03, 1.04, 1.05]), -) # ...and the interest factor is what varies across states - -# %% -# Solve the serially correlated interest rate problem and display the consumption functions -start_time = process_time() -SerialRExample.solve() -end_time = process_time() -print( - "Solving a serially correlated interest consumer took " - + mystr(end_time - start_time) - + " seconds." -) -print("Consumption functions for each discrete state:") -plot_funcs(SerialRExample.solution[0].cFunc, 0, 10) - -# %% diff --git a/examples/ConsumptionSaving/example_ConsMedModel.py b/examples/ConsumptionSaving/example_ConsMedModel.py deleted file mode 100644 index 1b556def0..000000000 --- a/examples/ConsumptionSaving/example_ConsMedModel.py +++ /dev/null @@ -1,143 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from HARK.rewards import CRRAutility_inv -from time import time -import matplotlib.pyplot as plt -import numpy as np -from HARK.ConsumptionSaving.ConsMedModel import MedShockConsumerType - -# %% -mystr = lambda number: "{:.4f}".format(number) - -# %% -do_simulation = True - -# %% [markdown] -# This module defines consumption-saving models in which an agent faces medical expenditures and the optimal spending is shared between consumption and medical care. -# -# In this model, the agent consumes two goods: an ordinary composite consumption and medical care, which yield CRRAutility, and the coefficients on the goods might be different. The agent expects to receive shocks to permanent and transitory income as well as multiplicative shocks to utility from medical care (medical need shocks). - -# %% [markdown] -# The agent's problem can be written in Bellman form as: -# -# \begin{eqnarray*} -# v_t(M_t,p_t, medShk_t) &=& \max_{c_t, med_t} U(c_t, med_t) + \beta (1-\mathsf{D}_{t+1}) \mathbb{E} [v_{t+1}(M_{t+1}, p_{t+1}, medShk_{t+1})], \\ -# a_t &=& M_t - c_t, \\ -# a_t &\geq& \underline{a}, \\ -# M_{t+1} &=& R a_t + \theta_{t+1}, \\ -# p_{t+1} &=& \gimel_{t+1}(p_t)\psi_{t+1}, \\ -# medShk_{t+1} &=& ,\\ -# \psi_t \sim F_{\psi t} &\qquad& \theta_t \sim F_{\theta t}, \mathbb{E} [F_{\psi t}] = 1, \\ -# U(c, med) &=& \frac{c^{1-\rho}}{1-\rho}\frac{med^{1-\rho_{med}}}{1-\rho_{med}}. -# \end{eqnarray*} - -# %% [markdown] -# The one period problem for this model is solved by the function $\texttt{solveConsMedShock}$, which creates an instance of the class $\texttt{ConsMedShockSolver}$. The class $\texttt{MedShockConsumerType}$ extends $\texttt{PersistentShockConsumerType}$ from $\texttt{GenIncProcessModel}$ to represents agents in this model. - -# %% -# Make and solve an example medical shocks consumer type -MedicalExample = MedShockConsumerType() -t_start = time() -MedicalExample.solve() -t_end = time() -print("Solving a medical shocks consumer took " + mystr(t_end - t_start) + " seconds.") - -# %% -# Plot the consumption function -M = np.linspace(0, 30, 300) -pLvl = 1.0 -P = pLvl * np.ones_like(M) -for j in range(MedicalExample.MedShkDstn[0].pmv.size): - MedShk = MedicalExample.MedShkDstn[0].atoms.flatten()[j] * np.ones_like(M) - M_temp = M + MedicalExample.solution[0].mLvlMin(pLvl) - C = MedicalExample.solution[0].cFunc(M_temp, P, MedShk) - plt.plot(M_temp, C) -print("Consumption function by medical need shock (constant permanent income)") -plt.show() - -# %% -# Plot the medical care function -for j in range(MedicalExample.MedShkDstn[0].pmv.size): - MedShk = MedicalExample.MedShkDstn[0].atoms.flatten()[j] * np.ones_like(M) - Med = MedicalExample.solution[0].MedFunc(M_temp, P, MedShk) - plt.plot(M_temp, Med) -print("Medical care function by medical need shock (constant permanent income)") -plt.ylim([0, 20]) -plt.show() - -# %% -# Plot the savings function -for j in range(MedicalExample.MedShkDstn[0].pmv.size): - MedShk = MedicalExample.MedShkDstn[0].atoms.flatten()[j] * np.ones_like(M) - Sav = ( - M_temp - - MedicalExample.solution[0].cFunc(M_temp, P, MedShk) - - MedicalExample.MedPrice[0] - * MedicalExample.solution[0].MedFunc(M_temp, P, MedShk) - ) - plt.plot(M_temp, Sav) -print("End of period savings by medical need shock (constant permanent income)") -plt.show() - -# %% -# Plot the marginal value function -M = np.linspace(0.0, 30, 300) -for p in range(MedicalExample.pLvlGrid[0].size): - pLvl = MedicalExample.pLvlGrid[0][p] - M_temp = pLvl * M + MedicalExample.solution[0].mLvlMin(pLvl) - P = pLvl * np.ones_like(M) - vP = MedicalExample.solution[0].vPfunc(M_temp, P) ** (-1.0 / MedicalExample.CRRA) - plt.plot(M_temp, vP) -print("Marginal value function (pseudo inverse)") -plt.show() - -# %% -if MedicalExample.vFuncBool: - # Plot the value function - M = np.linspace(0.0, 1, 300) - for p in range(MedicalExample.pLvlGrid[0].size): - pLvl = MedicalExample.pLvlGrid[0][p] - M_temp = pLvl * M + MedicalExample.solution[0].mLvlMin(pLvl) - P = pLvl * np.ones_like(M) - v = CRRAutility_inv( - MedicalExample.solution[0].vFunc(M_temp, P), gam=MedicalExample.CRRA - ) - plt.plot(M_temp, v) - print("Value function (pseudo inverse)") - plt.show() - -# %% -if do_simulation: - t_start = time() - MedicalExample.T_sim = 100 - MedicalExample.track_vars = ["mLvl", "cLvl", "Med"] - MedicalExample.make_shock_history() - MedicalExample.initialize_sim() - MedicalExample.simulate() - t_end = time() - print( - "Simulating " - + str(MedicalExample.AgentCount) - + " agents for " - + str(MedicalExample.T_sim) - + " periods took " - + mystr(t_end - t_start) - + " seconds." - ) - -# %% diff --git a/examples/ConsumptionSaving/example_ConsPrefShockModel.py b/examples/ConsumptionSaving/example_ConsPrefShockModel.py deleted file mode 100644 index 633268fed..000000000 --- a/examples/ConsumptionSaving/example_ConsPrefShockModel.py +++ /dev/null @@ -1,206 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -import matplotlib.pyplot as plt -from HARK.utilities import plot_funcs -from time import process_time -import numpy as np -from HARK.ConsumptionSaving.ConsPrefShockModel import ( - PrefShockConsumerType, - KinkyPrefConsumerType, -) - -mystr = lambda number: "{:.4f}".format(number) -do_simulation = True - -# %% [markdown] -# This module defines consumption-saving models in which agents have CRRA utility over a unitary consumption good, geometric discounting, who face idiosyncratic shocks to income and to their utility or preferences. That is, this module contains models that extend `ConsIndShockModel` with preference shocks. -# -# `ConsPrefShockModel` currently solves two types of models: -# 1. An extension of `ConsIndShock`, but with an iid lognormal multiplicative shock each period. -# 2. A combination of (1) and $\texttt{ConsKinkedR}$, demonstrating how to construct a new model -# by inheriting from multiple classes. - -# %% [markdown] -# ### Multiplicative Shocks to Utility -# -# In this model, an agent is very similar to the one in the "idiosyncratic shocks" model, except that in `ConsPrefShockModel` an agent receives an iid multiplicative shock to his/her utility at the beginning of each period, before making the consumption decision. -# -# The agent's problem can be written in (normalized) Bellman form as: -# -# \begin{eqnarray*} -# v_t(m_t,\eta_t) &=& \max_{c_t} u(\eta_t.c_t) + \beta (1-\mathsf{D}_{t+1}) \mathbb{E} [(\Gamma_{t+1}\psi_{t+1})^{1-\rho} v_{t+1}(m_{t+1}, \eta_{t+1}) ], \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \underline{a}, \\ -# m_{t+1} &=& R/(\Gamma_{t+1}\psi_{t+1}) a_t + \theta_{t+1}, \\ -# \psi_{t},\theta_{t} \sim F_{t}, &\qquad& \mathbb{E} [F_{\psi t}] = 1, \\ -# u(c) &=& \frac{c^{1-\rho}}{1-\rho}, \eta_t \sim F_{\eta t} -# \end{eqnarray*} -# - -# %% [markdown] -# The one period problem for this model is solved by the function `solveConsPrefShock`, which creates an instance of the class `ConsPrefShockSolver`. The class `PrefShockConsumerType` extends `IndShockConsumerType` to represents agents in this model. -# -# To construct an instance of this class, 3 additional attributes must be passed to the constructor as shown in the table below (parameters can be either "primitive" if they are directly specified by the user or "constructed" if they are built by a class method using simple parameters specified by the user). - -# %% [markdown] -# ### Additional parameter values to solve an instance of PrefShockConsumerType -# -# | Param | Description | Code | Value | Constructed | -# | :---: | --- | --- | --- | :---: | -# | $N{\eta}$ |Number of discrete points in "body" of preference shock distribution | $\texttt{PerfShkCount}$ | 12 |$\surd$ | -# | $N{\eta}$ |Number of discrete points in "tails" of preference shock distribution | $\texttt{PrefShk_tail_N}$ |4|$\surd$ | -# | $\sigma_{\eta}$ |Log standard deviation of multiplicative utility shocks | $\texttt{PerfShkStd}$ | [0.30] |$\surd$ | -# -# - -# %% [markdown] -# ### Constructed inputs to solve ConsPrefShockModel -# -# * The tails of the preference shock distribution are of great importance for the accuracy of the solution and are underrepresented by the default equiprobable discrete approximation (unless a very large number of points are used). -# To fix this issue, the attribute $\texttt{PerfShk_tail_N}$ specifies the number of points in each " augmented tail" section of the preference shock discrete approximation. -# See [HARK.utilities.approxLognormal](https://github.com/econ-ark/HARK/blob/master/HARK/utilities.py) for more details. -# -# -# * The standard deviation of preference shocks might vary by period. Therefore, $\texttt{PerShkStd}$ should be input as a list. - -# %% [markdown] -# Note that the `solve` method of `PerfShockConsumerType` populates the `solution` with a list of `ConsumerSolution` instances. These single-period-solution objects have the same attributes as the "idiosyncratic shocks" model, but the attribute $\texttt{cFunc}$ is defined over the space of ($m_{t}$, $\eta_{t}$) rather than just $m_{t}$. -# -# The value function $\texttt{vFunc}$ and marginal value $\texttt{vPfunc}$, however, are defined *only* over $m_{t}$, as they represent expected (marginal) value *just before* the preference shock $\eta_{t}$ is realized. - -# %% -# Make and solve a preference shock consumer -PrefShockExample = PrefShockConsumerType() -PrefShockExample.cycles = 0 # Infinite horizon - -# %% -t_start = process_time() -PrefShockExample.solve() -t_end = process_time() -print("Solving a preference shock consumer took " + str(t_end - t_start) + " seconds.") - -# %% -# Plot the consumption function at each discrete shock -m = np.linspace(PrefShockExample.solution[0].mNrmMin, 5, 200) -print("Consumption functions at each discrete shock:") -for j in range(PrefShockExample.PrefShkDstn[0].pmv.size): - PrefShk = PrefShockExample.PrefShkDstn[0].atoms.flatten()[j] - c = PrefShockExample.solution[0].cFunc(m, PrefShk * np.ones_like(m)) - plt.plot(m, c) -plt.xlim([0.0, None]) -plt.ylim([0.0, None]) -plt.show() - -# %% -print("Consumption function (and MPC) when shock=1:") -c = PrefShockExample.solution[0].cFunc(m, np.ones_like(m)) -k = PrefShockExample.solution[0].cFunc.derivativeX(m, np.ones_like(m)) -plt.plot(m, c) -plt.plot(m, k) -plt.xlim([0.0, None]) -plt.ylim([0.0, None]) -plt.show() - -# %% -if PrefShockExample.vFuncBool: - print("Value function (unconditional on shock):") - plot_funcs( - PrefShockExample.solution[0].vFunc, - PrefShockExample.solution[0].mNrmMin + 0.5, - 5, - ) - -# %% -# Test the simulator for the pref shock class -if do_simulation: - PrefShockExample.T_sim = 120 - PrefShockExample.track_vars = ["cNrm"] - PrefShockExample.make_shock_history() # This is optional - PrefShockExample.initialize_sim() - PrefShockExample.simulate() - -# %% [markdown] -# ### Utility Shocks and Different Interest Rates -# -# In this model, an agent face idiosyncratic shocks to permanent and transitory income and multiplicative shocks to utility *and* faces a different interst rate on borrowing vs saving. This agent's model is identical to that of the `ConsPrefShockModel` with the addition of the interst rate rule from the `kinkedRConsumerType` from `ConsIndShock` model. - -# %% [markdown] -# The one period problem of this model is solved by the function `solveConsKinkyPref`, which creates an instance of `ConsKinkyPrefSolver`. The class `KinkyPrefConsumerType` represents agents in this model. -# -# Thanks to HARK's object-oriented approach to solution methods, it is trivial to combine two models to make a new one. In this current case, the solver and consumer classes each inherit from both $\texttt{KinkedR}$ and $\texttt{PrefShock}$ and only need a trivial constructor function to rectify the differences between the two. -# -# - -# %% [markdown] -# ### Constructed inputs to solve KinkyPref -# -# * The attributes required to properly construct an instance of $\texttt{KinkyPrefConsumerType}$ are the same as $\texttt{PrefShockConsumerType}$, except that $\texttt{Rfree}$ should not be replace with $\texttt{Rboro}$ and $\texttt{Rsave}$ - like the "kinked R" parent model. -# -# * Also, as in $\texttt{KinkedR}$ and $\texttt{PrefShock}$, $\texttt{KinkyPref}$ is not yet compatible with cubic spline interpolation of the consumption function. - -# %% -# Make and solve a "kinky preferece" consumer, whose model combines KinkedR and PrefShock -KinkyPrefExample = KinkyPrefConsumerType() -KinkyPrefExample.cycles = 0 # Infinite horizon - -# %% -t_start = process_time() -KinkyPrefExample.solve() -t_end = process_time() -print("Solving a kinky preference consumer took " + str(t_end - t_start) + " seconds.") - -# %% -# Plot the consumption function at each discrete shock -m = np.linspace(KinkyPrefExample.solution[0].mNrmMin, 5, 200) -print("Consumption functions at each discrete shock:") -for j in range(KinkyPrefExample.PrefShkDstn[0].atoms.size): - PrefShk = KinkyPrefExample.PrefShkDstn[0].atoms.flatten()[j] - c = KinkyPrefExample.solution[0].cFunc(m, PrefShk * np.ones_like(m)) - plt.plot(m, c) -plt.ylim([0.0, None]) -plt.show() - -# %% -print("Consumption function (and MPC) when shock=1:") -c = KinkyPrefExample.solution[0].cFunc(m, np.ones_like(m)) -k = KinkyPrefExample.solution[0].cFunc.derivativeX(m, np.ones_like(m)) -plt.plot(m, c) -plt.plot(m, k) -plt.ylim([0.0, None]) -plt.show() - -# %% -if KinkyPrefExample.vFuncBool: - print("Value function (unconditional on shock):") - plot_funcs( - KinkyPrefExample.solution[0].vFunc, - KinkyPrefExample.solution[0].mNrmMin + 0.5, - 5, - ) - -# %% -# Test the simulator for the kinky preference class -if do_simulation: - KinkyPrefExample.T_sim = 120 - KinkyPrefExample.track_vars = ["cNrm", "PrefShk"] - KinkyPrefExample.initialize_sim() - KinkyPrefExample.simulate() - -# %% - -# %% diff --git a/examples/ConsumptionSaving/example_ConsRepAgentModel.py b/examples/ConsumptionSaving/example_ConsRepAgentModel.py deleted file mode 100644 index 6618faae2..000000000 --- a/examples/ConsumptionSaving/example_ConsRepAgentModel.py +++ /dev/null @@ -1,127 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from copy import deepcopy -from time import time -import numpy as np -from HARK.utilities import plot_funcs -from HARK.ConsumptionSaving.ConsIndShockModel import init_idiosyncratic_shocks - -from HARK.ConsumptionSaving.ConsRepAgentModel import ( - RepAgentConsumerType, - RepAgentMarkovConsumerType, -) - -# %% [markdown] -# This module contains models for solving representative agent (RA) macroeconomic models. This stands in contrast to all other model modules in HARK, which (unsurprisingly) take a heterogeneous agents approach. -# In RA models, all attributes are either time invariant or exist on a short cycle. Also, models must be infinite horizon. - -# %% [markdown] -# Each period, the representative agent makes a decision about how much of his resources $m_t$ he should consume $c_t$ and how much should retain as assets $a_t$. He gets a flow of utility from consumption, with CRRA preferences (with coefficient $\rho$). Retained assets are used to finance productive capital $k_{t+1}$ in the next period. Output is produced according to a Cobb-Douglas production function using capital and labor $\ell_{t+1}$, with a capital share of $\alpha$; a fraction $\delta$ of capital depreciates immediately after production. -# -# The agent's labor productivity is subject to permanent and transitory shocks, $\psi_t$ and $\theta_t$ respectively. The representative agent stands in for a continuum of identical households, so markets are assumed competitive: the factor returns to capital and income are the (net) marginal product of these inputs. -# -# In the notation below, all lowercase state and control variables ($m_t$, $c_t$, etc) are normalized by the permanent labor productivity of the agent. The level of these variables at any time $t$ can be recovered by multiplying by permanent labor productivity $p_t$ (itself usually normalized to 1 at model start). - -# %% [markdown] -# The agent's problem can be written in Bellman form as: -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t} U(c_t) + \beta \mathbb{E} [(\Gamma_{t+1}\psi_{t+1})^{1-\rho} v_{t+1}(m_{t+1})], \\ -# a_t &=& m_t - c_t, \\ -# \psi_{t+1} &\sim& F_{\psi t+1}, \qquad \mathbb{E} [F_{\psi t}] = 1,\\ -# \theta_{t+1} &\sim& F_{\theta t+1}, \\ -# k_{t+1} &=& a_t/(\Gamma_{t+1}\psi_{t+1}), \\ -# R_{t+1} &=& 1 - \delta + \alpha (k_{t+1}/\theta_{t+1})^{(\alpha - 1)}, \\ -# w_{t+1} &=& (1-\alpha) (k_{t+1}/\theta_{t+1})^\alpha, \\ -# m_{t+1} &=& R_{t+1} k_{t+1} + w_{t+1}\theta_{t+1}, \\ -# U(c) &=& \frac{c^{1-\rho}}{1-\rho} -# \end{eqnarray*} - -# %% [markdown] -# The one period problem for this model is solved by the function $\texttt{solveConsRepAgent}$. - -# %% -# Make a quick example dictionary -RA_params = deepcopy(init_idiosyncratic_shocks) -RA_params["DeprFac"] = 0.05 -RA_params["CapShare"] = 0.36 -RA_params["UnempPrb"] = 0.0 -RA_params["LivPrb"] = [1.0] - -# %% -# Make and solve a rep agent model -RAexample = RepAgentConsumerType(**RA_params) -t_start = time() -RAexample.solve() -t_end = time() -print( - "Solving a representative agent problem took " + str(t_end - t_start) + " seconds." -) -plot_funcs(RAexample.solution[0].cFunc, 0, 20) - -# %% -# Simulate the representative agent model -RAexample.T_sim = 2000 -RAexample.track_vars = ["cNrm", "mNrm", "Rfree", "wRte"] -RAexample.initialize_sim() -t_start = time() -RAexample.simulate() -t_end = time() -print( - "Simulating a representative agent for " - + str(RAexample.T_sim) - + " periods took " - + str(t_end - t_start) - + " seconds." -) - -# %% -# Make and solve a Markov representative agent -RA_markov_params = deepcopy(RA_params) -RA_markov_params["PermGroFac"] = [[0.97, 1.03]] -RA_markov_params["MrkvArray"] = np.array([[0.99, 0.01], [0.01, 0.99]]) -RA_markov_params["Mrkv"] = 0 -RAmarkovExample = RepAgentMarkovConsumerType(**RA_markov_params) -RAmarkovExample.IncShkDstn = [2 * [RAmarkovExample.IncShkDstn[0]]] -t_start = time() -RAmarkovExample.solve() -t_end = time() -print( - "Solving a two state representative agent problem took " - + str(t_end - t_start) - + " seconds." -) -plot_funcs(RAmarkovExample.solution[0].cFunc, 0, 10) - -# %% -# Simulate the two state representative agent model -RAmarkovExample.T_sim = 2000 -RAmarkovExample.track_vars = ["cNrm", "mNrm", "Rfree", "wRte", "Mrkv"] -RAmarkovExample.initialize_sim() -t_start = time() -RAmarkovExample.simulate() -t_end = time() -print( - "Simulating a two state representative agent for " - + str(RAexample.T_sim) - + " periods took " - + str(t_end - t_start) - + " seconds." -) - -# %% diff --git a/examples/ConsumptionSaving/example_ConsRiskyContribModel.py b/examples/ConsumptionSaving/example_ConsRiskyContribModel.py deleted file mode 100644 index 542f0682b..000000000 --- a/examples/ConsumptionSaving/example_ConsRiskyContribModel.py +++ /dev/null @@ -1,303 +0,0 @@ -# %% -""" -Example implementations of HARK.ConsumptionSaving.ConsPortfolioModel -""" -from HARK.ConsumptionSaving.ConsRiskyContribModel import ( - RiskyContribConsumerType, - init_risky_contrib, -) -from time import time -import numpy as np - -# %% Define a plotting function - - -def plot_slices_3d( - functions, bot_x, top_x, y_slices, N=300, y_name=None, titles=None, ax_labs=None -): - - import matplotlib.pyplot as plt - - if type(functions) == list: - function_list = functions - else: - function_list = [functions] - - nfunc = len(function_list) - - # Initialize figure and axes - fig = plt.figure(figsize=plt.figaspect(1.0 / nfunc)) - - # Create x grid - x = np.linspace(bot_x, top_x, N, endpoint=True) - - for k in range(nfunc): - ax = fig.add_subplot(1, nfunc, k + 1) - - for y in y_slices: - - if y_name is None: - lab = "" - else: - lab = y_name + "=" + str(y) - - z = function_list[k](x, np.ones_like(x) * y) - ax.plot(x, z, label=lab) - - if ax_labs is not None: - ax.set_xlabel(ax_labs[0]) - ax.set_ylabel(ax_labs[1]) - - # ax.imshow(Z, extent=[bottom[0],top[0],bottom[1],top[1]], origin='lower') - # ax.colorbar(); - if titles is not None: - ax.set_title(titles[k]) - - ax.set_xlim([bot_x, top_x]) - - if y_name is not None: - ax.legend() - - plt.show() - - -def plot_slices_4d( - functions, - bot_x, - top_x, - y_slices, - w_slices, - N=300, - slice_names=None, - titles=None, - ax_labs=None, -): - - import matplotlib.pyplot as plt - - if type(functions) == list: - function_list = functions - else: - function_list = [functions] - - nfunc = len(function_list) - nws = len(w_slices) - - # Initialize figure and axes - fig = plt.figure(figsize=plt.figaspect(1.0 / nfunc)) - - # Create x grid - x = np.linspace(bot_x, top_x, N, endpoint=True) - - for j in range(nws): - w = w_slices[j] - - for k in range(nfunc): - ax = fig.add_subplot(nws, nfunc, j * nfunc + k + 1) - - for y in y_slices: - - if slice_names is None: - lab = "" - else: - lab = ( - slice_names[0] - + "=" - + str(y) - + "," - + slice_names[1] - + "=" - + str(w) - ) - - z = function_list[k](x, np.ones_like(x) * y, np.ones_like(x) * w) - ax.plot(x, z, label=lab) - - if ax_labs is not None: - ax.set_xlabel(ax_labs[0]) - ax.set_ylabel(ax_labs[1]) - - # ax.imshow(Z, extent=[bottom[0],top[0],bottom[1],top[1]], origin='lower') - # ax.colorbar(); - if titles is not None: - ax.set_title(titles[k]) - - ax.set_xlim([bot_x, top_x]) - - if slice_names is not None: - ax.legend() - - plt.show() - - -# %% -# Solve an infinite horizon version - -# Get initial parameters -par_infinite = init_risky_contrib.copy() -# And make the problem infinite horizon -par_infinite["cycles"] = 0 -# and sticky -par_infinite["AdjustPrb"] = 1.0 -# and with a withdrawal tax -par_infinite["tau"] = 0.1 - -par_infinite["DiscreteShareBool"] = False -par_infinite["vFuncBool"] = False - -# Create agent and solve it. -inf_agent = RiskyContribConsumerType(tolerance=1e-3, **par_infinite) -print("Now solving infinite horizon version") -t0 = time() -inf_agent.solve(verbose=True) -t1 = time() -print("Converged!") -print("Solving took " + str(t1 - t0) + " seconds.") - -# Plot policy functions -periods = [0] -n_slices = [0, 2, 6] -mMax = 20 - -dfracFunc = [inf_agent.solution[t].stage_sols["Reb"].dfracFunc_Adj for t in periods] -ShareFunc = [inf_agent.solution[t].stage_sols["Sha"].ShareFunc_Adj for t in periods] -cFuncFxd = [inf_agent.solution[t].stage_sols["Cns"].cFunc for t in periods] - -# Rebalancing -plot_slices_3d( - dfracFunc, - 0, - mMax, - y_slices=n_slices, - y_name="n", - titles=["t = " + str(t) for t in periods], - ax_labs=["m", "d"], -) -# Share -plot_slices_3d( - ShareFunc, - 0, - mMax, - y_slices=n_slices, - y_name="n", - titles=["t = " + str(t) for t in periods], - ax_labs=["m", "S"], -) - -# Consumption -shares = [0.0, 0.9] -plot_slices_4d( - cFuncFxd, - 0, - mMax, - y_slices=n_slices, - w_slices=shares, - slice_names=["n_til", "s"], - titles=["t = " + str(t) for t in periods], - ax_labs=["m_til", "c"], -) - -# %% -# Solve a short, finite horizon version -par_finite = init_risky_contrib.copy() - -# Four period model -par_finite["PermGroFac"] = [2.0, 1.0, 0.1, 1.0] -par_finite["PermShkStd"] = [0.1, 0.1, 0.0, 0.0] -par_finite["TranShkStd"] = [0.2, 0.2, 0.0, 0.0] -par_finite["AdjustPrb"] = [0.5, 0.5, 1.0, 1.0] -par_finite["tau"] = [0.1, 0.1, 0.0, 0.0] -par_finite["LivPrb"] = [1.0, 1.0, 1.0, 1.0] -par_finite["T_cycle"] = 4 -par_finite["T_retire"] = 0 -par_finite["T_age"] = 4 - -# Adjust discounting and returns distribution so that they make sense in a -# 4-period model -par_finite["DiscFac"] = 0.95 ** 15 -par_finite["Rfree"] = 1.03 ** 15 -par_finite["RiskyAvg"] = 1.08 ** 15 # Average return of the risky asset -par_finite["RiskyStd"] = 0.20 * np.sqrt(15) # Standard deviation of (log) risky returns - - -# Create and solve -contrib_agent = RiskyContribConsumerType(**par_finite) -print("Now solving") -t0 = time() -contrib_agent.solve() -t1 = time() -print("Solving took " + str(t1 - t0) + " seconds.") - -# Plot Policy functions -periods = [0, 2, 3] - -dfracFunc = [contrib_agent.solution[t].stage_sols["Reb"].dfracFunc_Adj for t in periods] -ShareFunc = [contrib_agent.solution[t].stage_sols["Sha"].ShareFunc_Adj for t in periods] -cFuncFxd = [contrib_agent.solution[t].stage_sols["Cns"].cFunc for t in periods] - -# Rebalancing -plot_slices_3d( - dfracFunc, - 0, - mMax, - y_slices=n_slices, - y_name="n", - titles=["t = " + str(t) for t in periods], - ax_labs=["m", "d"], -) -# Share -plot_slices_3d( - ShareFunc, - 0, - mMax, - y_slices=n_slices, - y_name="n", - titles=["t = " + str(t) for t in periods], - ax_labs=["m", "S"], -) -# Consumption -plot_slices_4d( - cFuncFxd, - 0, - mMax, - y_slices=n_slices, - w_slices=shares, - slice_names=["n_til", "s"], - titles=["t = " + str(t) for t in periods], - ax_labs=["m_til", "c"], -) - -# %% Simulate the finite horizon consumer -contrib_agent.track_vars = [ - "pLvl", - "t_age", - "Adjust", - "mNrm", - "nNrm", - "mNrmTilde", - "nNrmTilde", - "aNrm", - "cNrm", - "Share", - "dfrac", -] -contrib_agent.T_sim = 4 -contrib_agent.AgentCount = 10 -contrib_agent.initialize_sim() -contrib_agent.simulate() - -# %% Format simulation results - -import pandas as pd - -df = contrib_agent.history - -# Add an id to the simulation results -agent_id = np.arange(contrib_agent.AgentCount) -df["id"] = np.tile(agent_id, (contrib_agent.T_sim, 1)) - -# Flatten variables -df = {k: v.flatten(order="F") for k, v in df.items()} - -# Make dataframe -df = pd.DataFrame(df) diff --git a/examples/ConsumptionSaving/example_TractableBufferStockModel.py b/examples/ConsumptionSaving/example_TractableBufferStockModel.py deleted file mode 100644 index 4c63108d0..000000000 --- a/examples/ConsumptionSaving/example_TractableBufferStockModel.py +++ /dev/null @@ -1,159 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: -all -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Example TractableBufferStockModel - -# %% -import numpy as np # numeric Python -from HARK.utilities import plot_funcs # basic plotting tools -from HARK.ConsumptionSaving.ConsMarkovModel import ( - MarkovConsumerType, -) # An alternative, much longer way to solve the TBS model -from time import process_time # timing utility -from HARK.distribution import DiscreteDistributionLabeled -from HARK.ConsumptionSaving.TractableBufferStockModel import TractableConsumerType -import numpy as np - -do_simulation = True - -# %% -# Define the model primitives -base_primitives = { - "UnempPrb": 0.00625, # Probability of becoming unemployed - "DiscFac": 0.975, # Intertemporal discount factor - "Rfree": 1.01, # Risk-free interest factor on assets - "PermGroFac": 1.0025, # Permanent income growth factor (uncompensated) - "CRRA": 1.0, -} # Coefficient of relative risk aversion - -# %% -# Define a dictionary to be used in case of simulation -simulation_values = { - "aLvlInitMean": 0.0, # Mean of log initial assets for new agents - "aLvlInitStd": 1.0, # Stdev of log initial assets for new agents - "AgentCount": 10000, # Number of agents to simulate - "T_sim": 120, # Number of periods to simulate - "T_cycle": 1, -} # Number of periods in the cycle - -# %% -# Make and solve a tractable consumer type -ExampleType = TractableConsumerType() -ExampleType.assign_parameters(**base_primitives) -t_start = process_time() -ExampleType.solve() -t_end = process_time() -print( - "Solving a tractable consumption-savings model took " - + str(t_end - t_start) - + " seconds." -) - -# %% -# Plot the consumption function and whatnot -m_upper = 1.5 * ExampleType.mTarg -conFunc_PF = lambda m: ExampleType.h * ExampleType.PFMPC + ExampleType.PFMPC * m -# plot_funcs([ExampleType.solution[0].cFunc,ExampleType.mSSfunc,ExampleType.cSSfunc],0,m_upper) -plot_funcs([ExampleType.solution[0].cFunc, ExampleType.solution[0].cFunc_U], 0, m_upper) - -# %% -if do_simulation: - ExampleType.assign_parameters( - **simulation_values - ) # Set attributes needed for simulation - ExampleType.track_vars = ["mLvl"] - ExampleType.make_shock_history() - ExampleType.initialize_sim() - ExampleType.simulate() - - -# %% -# Now solve the same model using backward induction rather than the analytic method of TBS. -# The TBS model is equivalent to a Markov model with two states, one of them absorbing (permanent unemployment). -MrkvArray = np.array( - [[1.0 - base_primitives["UnempPrb"], base_primitives["UnempPrb"]], [0.0, 1.0]] -) # Define the two state, absorbing unemployment Markov array -init_consumer_objects = { - "CRRA": base_primitives["CRRA"], - "Rfree": np.array( - 2 * [base_primitives["Rfree"]] - ), # Interest factor (same in both states) - "PermGroFac": [ - np.array( - 2 * [base_primitives["PermGroFac"] / (1.0 - base_primitives["UnempPrb"])] - ) - ], # Unemployment-compensated permanent growth factor - "BoroCnstArt": None, # Artificial borrowing constraint - "PermShkStd": [0.0], # Permanent shock standard deviation - "PermShkCount": 1, # Number of shocks in discrete permanent shock distribution - "TranShkStd": [0.0], # Transitory shock standard deviation - "TranShkCount": 1, # Number of shocks in discrete permanent shock distribution - "T_cycle": 1, # Number of periods in cycle - "UnempPrb": 0.0, # Unemployment probability (not used, as the unemployment here is *permanent*, not transitory) - "UnempPrbRet": 0.0, # Unemployment probability when retired (irrelevant here) - "T_retire": 0, # Age at retirement (turned off) - "IncUnemp": 0.0, # Income when unemployed (irrelevant) - "IncUnempRet": 0.0, # Income when unemployed and retired (irrelevant) - "aXtraMin": 0.001, # Minimum value of assets above minimum in grid - "aXtraMax": ExampleType.mUpperBnd, # Maximum value of assets above minimum in grid - "aXtraCount": 48, # Number of points in assets grid - "aXtraExtra": [None], # Additional points to include in assets grid - "aXtraNestFac": 3, # Degree of exponential nesting when constructing assets grid - "LivPrb": [np.array([1.0, 1.0])], # Survival probability - "DiscFac": base_primitives["DiscFac"], # Intertemporal discount factor - "AgentCount": 1, # Number of agents in a simulation (irrelevant) - "tax_rate": 0.0, # Tax rate on labor income (irrelevant) - "vFuncBool": False, # Whether to calculate the value function - "CubicBool": True, # Whether to use cubic splines (False --> linear splines) - "MrkvArray": [MrkvArray], # State transition probabilities -} -MarkovType = MarkovConsumerType(**init_consumer_objects) # Make a basic consumer type - -# %% -employed_income_dist = DiscreteDistributionLabeled( - pmv=np.ones(1), atoms=np.array([[1.0], [1.0]]), var_names=["PermShk", "TranShk"] -) # Income distribution when employed -unemployed_income_dist = DiscreteDistributionLabeled( - pmv=np.ones(1), atoms=np.array([[1.0], [0.0]]), var_names=["PermShk", "TranShk"] -) # Income distribution when permanently unemployed - -# %% -MarkovType.IncShkDstn = [ - [employed_income_dist, unemployed_income_dist] -] # set the income distribution in each state -MarkovType.cycles = 0 - -# %% -# Solve the "Markov TBS" model -t_start = process_time() -MarkovType.solve() -t_end = process_time() -MarkovType.unpack("cFunc") - -# %% -print( - 'Solving the same model "the long way" took ' + str(t_end - t_start) + " seconds." -) -# plot_funcs([ExampleType.solution[0].cFunc,ExampleType.solution[0].cFunc_U],0,m_upper) -plot_funcs(MarkovType.cFunc[0], 0, m_upper) -diffFunc = lambda m: ExampleType.solution[0].cFunc(m) - MarkovType.cFunc[0][0](m) -print("Difference between the (employed) consumption functions:") -plot_funcs(diffFunc, 0, m_upper) - -# %% - -# %% diff --git a/examples/Distributions/DiscreteDistributionLabeled.py b/examples/Distributions/DiscreteDistributionLabeled.py deleted file mode 100644 index 83f886fd6..000000000 --- a/examples/Distributions/DiscreteDistributionLabeled.py +++ /dev/null @@ -1,148 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # `DDL`: Using `xarray` in `DiscreteDistribution` -# - -# %% [markdown] -# First we import relevant libraries and tools, including the new `DiscreteDistributionLabeled` class. -# - -# %% -import numpy as np -from HARK.distribution import ( - MeanOneLogNormal, - DiscreteDistributionLabeled, - calc_expectation, - combine_indep_dstns, -) - -# %% [markdown] -# We create a distribution of shocks to income from continuous distributions. -# - -# %% -PermShkDstn = MeanOneLogNormal().discretize(200) -TranShkDstn = MeanOneLogNormal().discretize(200) -IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn) - -# %% [markdown] -# Taking the components of `IncShkDstn`, we can now create a `DiscreteDistributionLabeled` object. As a demonstration of additional features, we can add a name attribute to the `DDL` object, as well as named dimensions and coordinates. -# - -# %% -x_dist = DiscreteDistributionLabeled.from_unlabeled( - IncShkDstn, - name="Distribution of Shocks to Income", - var_names=["perm_shk", "tran_shk"], - var_attrs=[ - { - "name": "Permanent Shocks to Income", - "limit": {"type": "Lognormal", "mean": -0.5, "variance": 1.0}, - }, - { - "name": "Transitory Shocks to Income", - "limit": {"type": "Lognormal", "mean": -0.5, "variance": 1.0}, - }, - ], -) - -# %% [markdown] -# The underlying object and metadata is stored in a `xarray.Dataset` object which can be accessed using the `.dataset` attribute. -# - -# %% -x_dist.dataset - -# %% [markdown] -# ### Using functions with labels to take expresive expectations. -# - -# %% [markdown] -# Taking the expectation of a `DDL` object is straightforward using the own `expected()` method. -# - -# %% -x_dist.expected() - -# %% [markdown] -# As in the `DiscreteDistribution`, we can provide a function and arguments to the `expected()` method. -# - -# %% -aGrid = np.linspace(0, 20, 100) -R = 1.03 - -# %% [markdown] -# The main difference is that the `expected()` method of `DDL` objects can take a function that uses the labels of the `xarray.DataArray` object. This allows for clearer and more expresive mathematical functions and transition equations. Surprisingly, using a function with labels does not add much overhead to the function evaluation. -# - -# %% -# %%timeit -x_dist.expected( - lambda dist, a, R: R * a / dist["perm_shk"] + dist["tran_shk"], - aGrid, - R, -) - -# %% [markdown] -# Compared to the old method of `calc_expectation` which takes a `DiscreteDistribution` object as input, the new method which takes a `DiscreteDistributionLabeled` object is significantly faster. - -# %% -# %%timeit -calc_expectation(IncShkDstn, lambda dist, a, R: R * a / dist[0] + dist[1], aGrid, R) - -# %% [markdown] -# We can also use `HARK.distribution.expected`. -# - -# %% -from HARK.distribution import expected - -# %% -expected( - func=lambda dist, a, R: R * a / dist["perm_shk"] + dist["tran_shk"], - dist=x_dist, - args=(aGrid, R), -) - -# %% [markdown] -# Additionally, we can use xarrays as inputs via keyword arguments. - -# %% -from xarray import DataArray - -aNrm = DataArray(aGrid, name="aNrm", dims=("aNrm")) - - -# %% -def mNrm_next(dist, R, a=None): - variables = {} - variables["mNrm_next"] = R * a / dist["perm_shk"] + dist["tran_shk"] - return variables - - -# %% -# %%timeit -expected( - func=mNrm_next, - dist=x_dist, - args=R, - a=aNrm, -) - -# %% [markdown] -# Taking the expectation with xarray inputs and labeled equations is still significantly faster than the old method. diff --git a/examples/Distributions/ExpectedValue.py b/examples/Distributions/ExpectedValue.py deleted file mode 100644 index f4f9b130f..000000000 --- a/examples/Distributions/ExpectedValue.py +++ /dev/null @@ -1,202 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Demonstrations and Timings of HARK.expected -# - -# %% [markdown] -# First, we import the relevant libraries. -# - -# %% -from time import time - -import numpy as np -from HARK.distribution import ( - MeanOneLogNormal, - Normal, - calc_expectation, - combine_indep_dstns, -) - -# %% [markdown] -# Next, we define discrete distributions as approximations of continuous distributions. -# - -# %% -dd_0_1_20 = Normal().discretize(20) -dd_1_1_40 = Normal(mu=1).discretize(40) -dd_10_10_100 = Normal(mu=10, sigma=10).discretize(100) - -# %% [markdown] -# ### The **new** `DiscreteDistribution.expected()` method -# - -# %% [markdown] -# There are two ways to get the expectation of a distribution. The first is to use the **new** `expected()` method of the distribution shown below. -# - -# %% -# %%timeit -ce1 = dd_0_1_20.expected() -ce2 = dd_1_1_40.expected() -ce3 = dd_10_10_100.expected() - -# %% [markdown] -# The second is to use `HARK.distribution.calc_expectation()`. Comparing the timings, the first method is significantly faster. -# - -# %% -# %%timeit -ce1 = calc_expectation(dd_0_1_20) -ce2 = calc_expectation(dd_1_1_40) -ce3 = calc_expectation(dd_10_10_100) - -# %% [markdown] -# ### The Expected Value of a function of a random variable -# - -# %% [markdown] -# Both of these methods allow us to calculate the expected value of a function of the distribution. Using the first method, which is the distribution's own method, we only need to provide the function. -# - -# %% -# %%timeit -ce4 = dd_0_1_20.expected(lambda x: 2**x) -ce5 = dd_1_1_40.expected(lambda x: 2 * x) - -# %% [markdown] -# Using `HARK.distribution.calc_expectation()`, we first provide the distribution and then the function. -# - -# %% -# %%timeit -ce4 = calc_expectation(dd_0_1_20, lambda x: 2**x) -ce5 = calc_expectation(dd_1_1_40, lambda x: 2 * x) - -# %% [markdown] -# #### The expected value of a function with additional arguments -# - -# %% [markdown] -# For both methods, we can also provide a number of arguments to the function `args`, which are passed to the function and gets called as `func(dstn,*args)`. -# - -# %% -# %%timeit -ce6 = dd_10_10_100.expected(lambda x, y: 2 * x + y, 20) -ce7 = dd_0_1_20.expected(lambda x, y: x + y, np.hstack([0, 1, 2, 3, 4, 5])) - -# %% -# %%timeit -ce6 = calc_expectation(dd_10_10_100, lambda x, y: 2 * x + y, 20) -ce7 = calc_expectation(dd_0_1_20, lambda x, y: x + y, np.hstack([0, 1, 2, 3, 4, 5])) - -# %% [markdown] -# ### The expected value of a function in `HARK` -# - -# %% [markdown] -# For a more practical demonstration of these methods as they would be used in `HARK`, we can create a distcrete distribution of shocks to income `IncShkDstn`. Given an array of liquid assets `aGrid` and an interest rate `R`, we can calculate the expected value of next period's cash on hand as the function `m_next = R * aGrid / perm_shk + tran_shk`. Below we see how this is done. Notice that the arguments to the function can be multidimensional. -# - -# %% -PermShkDstn = MeanOneLogNormal().discretize(200) -TranShkDstn = MeanOneLogNormal().discretize(200) -IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn) -aGrid = np.linspace(0, 20, 100) # aNrm grid -R = 1.05 # interest rate - - -def m_next(x, aGrid, R): - return R * aGrid / x[0] + x[1] - - -# %% -# %%timeit -ce8 = IncShkDstn.expected(m_next, aGrid, R) -ce9 = IncShkDstn.expected(m_next, aGrid.reshape((10, 10)), R) - -# %% -# %%timeit -ce8 = calc_expectation(IncShkDstn, m_next, aGrid, R) -ce9 = calc_expectation(IncShkDstn, m_next, aGrid.reshape((10, 10)), R) - -# %% [markdown] -# ### Time Comparison of the two methods -# - -# %% [markdown] -# As a final comparision of these two methods, we can see how the time difference is affected by the number of points in the distribution. -# - -# %% -size = np.arange(1, 11) * 100 - -t_self = [] -t_dist = [] - -for n in size: - PermShkDstn = MeanOneLogNormal().discretize(n) - TranShkDstn = MeanOneLogNormal().discretize(n) - IncShkDstn = combine_indep_dstns(PermShkDstn, TranShkDstn) - - m_next = lambda X, a, r: r * a / X[0] + X[1] - a_grid = np.linspace(0, 20, 100).reshape((10, 10)) - R = 1.05 - - start_self = time() - ce_self = IncShkDstn.expected(m_next, a_grid, R) - time_self = time() - start_self - - start_dist = time() - ce_dist = calc_expectation(IncShkDstn, m_next, a_grid, R) - time_dist = time() - start_dist - - t_self.append(time_self) - t_dist.append(time_dist) - -# %% -import matplotlib.pyplot as plt - -plt.plot(size, t_self, label="dist.ev(f)") -plt.plot(size, t_dist, label="ce(dist, f)") -plt.title("Time to calculate expectation of a function of shocks to income.") -plt.ylabel("time (s)") -plt.xlabel("size of grid: $x^2$") -plt.legend() -plt.show() - -# %% [markdown] -# ### Aliases for the new `expected()` method -# - -# %% [markdown] -# There is a top-level alias for the new `expected()` method to make it clearer as a mathematical expression. The way to access it is as follows: -# -# `expected(func, dstn, *args)` -# - -# %% -from HARK.distribution import expected - -# %% -expected(func=m_next, dist=IncShkDstn, args=(aGrid, R)) - -# %% -expected(func=lambda x: 1 / x[0] + x[1], dist=IncShkDstn) - -# %% diff --git a/examples/FrameAgentType/FrameAgentType Demo.py b/examples/FrameAgentType/FrameAgentType Demo.py deleted file mode 100644 index 271c39f7d..000000000 --- a/examples/FrameAgentType/FrameAgentType Demo.py +++ /dev/null @@ -1,258 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -import HARK.ConsumptionSaving.ConsPortfolioFrameModel as cpfm -import HARK.ConsumptionSaving.ConsPortfolioModel as cpm - -from HARK.frame import Frame, draw_frame_model -import numpy as np - -from HARK.rewards import ( - CRRAutility, -) - -# %% [markdown] -# The `FrameAgentType` is an alternative way to specify a model. -# -# The library contains a demonstration of this form of model, `ConsPortfolioFrameModel`, which is a replica of the `ConsPortfolioModel`. -# -# This notebook compares the results of simulations of the two models. - -# %% -pct = cpm.PortfolioConsumerType(T_sim=5000, AgentCount=200) -pct.cycles = 0 - -# Solve the model under the given parameters - -pct.solve() -pct.track_vars += [ - "mNrm", - "cNrm", - "Share", - "aNrm", - "Risky", - "Adjust", - "PermShk", - "TranShk", - "bNrm", - "who_dies", -] - -pct.make_shock_history() -pct.read_shocks = True - -pct.initialize_sim() - -pct.simulate() - -# %% -pcft = cpfm.PortfolioConsumerFrameType(T_sim=5000, AgentCount=200, read_shocks=True) - -pcft.cycles = 0 - -# Solve the model under the given parameters -pcft.solve() - -pcft.track_vars += [ - "mNrm", - "cNrm", - "Share", - "aNrm", - "Adjust", - "PermShk", - "TranShk", - "bNrm", - "U", -] - -pcft.shock_history = pct.shock_history -pcft.newborn_init_history = pct.newborn_init_history - -pcft.initialize_sim() - -pcft.simulate() - -# %% -import matplotlib.pyplot as plt - -plt.plot(range(5000), pct.history["PermShk"].mean(axis=1), label="original") -plt.plot(range(5000), pcft.history["PermShk"].mean(axis=1), label="frames", alpha=0.5) -plt.legend() - -# %% -plt.plot(range(5000), pct.history["TranShk"].mean(axis=1), label="original") -plt.plot(range(5000), pcft.history["TranShk"].mean(axis=1), label="frames", alpha=0.5) -plt.legend() - -# %% -plt.plot(range(5000), pct.history["bNrm"].mean(axis=1), label="original") -plt.plot(range(5000), pcft.history["bNrm"].mean(axis=1), label="frames", alpha=0.5) -plt.legend() - -# %% -# plt.plot(range(5000), pct.history['Risky'].mean(axis=1), label = 'original') -# plt.plot(range(5000), pcft.history['Risky'].mean(axis=1), label = 'frames', alpha = 0.5) -# plt.legend() - -# %% -plt.plot(range(5000), pct.history["aNrm"].mean(axis=1), label="original") -plt.plot(range(5000), pcft.history["aNrm"].mean(axis=1), label="frames", alpha=0.5) -plt.legend() - -# %% -plt.plot(range(5000), pct.history["mNrm"].mean(axis=1), label="original") -plt.plot(range(5000), pcft.history["mNrm"].mean(axis=1), label="frames", alpha=0.5) -plt.legend() - -# %% -plt.plot(range(5000), pct.history["cNrm"].mean(axis=1), label="original") -plt.plot(range(5000), pcft.history["cNrm"].mean(axis=1), label="frames", alpha=0.5) -plt.legend() - -# %% [markdown] -# **TODO**: Handly Risky as an aggregate value. - -# %% -# pct.history['Risky'][:3, :3] - -# %% -# pcft.history['Risky'][:3, :3] - -# %% -plt.plot(range(5000), pct.history["Share"].mean(axis=1), label="original") -plt.plot(range(5000), pcft.history["Share"].mean(axis=1), label="frames", alpha=0.5) -plt.legend() - -# %% -plt.plot( - range(5000), pcft.history["cNrm"].mean(axis=1), label="frames - cNrm", alpha=0.5 -) -plt.plot(range(5000), pcft.history["U"].mean(axis=1), label="frames - U", alpha=0.5) -plt.legend() - -# %% -pcft.history["U"] - -# %% -pcft.history["U"].mean(axis=1) - -# %% -pcft.history["U"][0, :] - -# %% -pcft.history["cNrm"][0, :] - -# %% -pcft.parameters["CRRA"] - -# %% -CRRAutility(pcft.history["cNrm"][0, :], 5) - -# %% [markdown] -# # Visualizing the Transition Equations - -# %% [markdown] -# Note that in the HARK `ConsIndShockModel`, from which the `ConsPortfolio` model inherits, the aggregate permanent shocks are considered to be portions of the permanent shocks experienced by the agents, not additions to those idiosyncratic shocks. Hence, they do not show up directly in the problem solved by the agent. This explains why the aggregate income levels are in a separarte component of the graph. - -# %% -draw_frame_model(pcft.model, figsize=(14, 12)) - -# %% [markdown] -# # Building the Solver [INCOMPLETE] - -# %% [markdown] -# Preliminery work towards a generic solver for FramedAgentTypes. - -# %% -controls = [frame for frame in pcft.frames.values() if frame.control] - - -# %% -def get_expected_return_function(control: Frame): - # Input: a control frame - # Returns: function of the control variable (control frame target) - # that returns the expected return, which is - # the sum of: - # - direct rewards - # - expected value of next-frame states (not yet implemented) - # - - rewards = [child for child in control.children if child.reward] - expected_values = [] # TODO - - ## note: function signature is what's needed for scipy.optimize - def expected_return_function(x, *args): - ## returns the sum of - ## the reward functions evaluated in context of - ## - parameters - ## - the control variable input - - # x - array of inputs, here the control frame target - # args - a tuple of other parameters needed to complete the function - - expected_return = 0 - - for reward in rewards: - ## TODO: figuring out the ordering of `x` and `args` needed for multiple downstream scopes - - local_context = {} - - # indexing through the x and args values - i = 0 - num_control_vars = None - - # assumes that all frame scopes list model variables first, parameters later - # should enforce or clarify at the frame level. - for var in reward.scope: - if var in control.target: - local_context[var] = x[i] - i = i + 1 - elif var in pcft.parameters: - if num_control_vars is None: - num_control_vars = i - - local_context[var] = args[i - num_control_vars] - i = i + 1 - - # can `self` be implicit here? - expected_return += reward.transition(reward, **local_context) - - return expected_return - - return expected_return_function - - -# %% -def optimal_policy_function(control: Frame): - erf = get_expected_return_function(control) - constraints = ( - control.constraints - ) ## these will reference the context of the control transition, including scope - - ## Returns function: - ## input: control frame scope - ## output: result of scipy.optimize of the erf with respect to constraints - ## getting the optimal input (control variable) value - return func - - -# %% -def approximate_optimal_policy_function(control, grid): - ## returns a new function: - ## that is an interpolation over optimal_policy_function - ## over the grid - - return func diff --git a/examples/FrameAgentType/FrameModels.py b/examples/FrameAgentType/FrameModels.py deleted file mode 100644 index d8a01cda7..000000000 --- a/examples/FrameAgentType/FrameModels.py +++ /dev/null @@ -1,503 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% -from HARK.frame import ( - BackwardFrameReference, - ForwardFrameReference, - Frame, - FrameAgentType, - FrameModel, - draw_frame_model, -) - -from HARK.distribution import combine_indep_dstns, add_discrete_outcome_constant_mean -from HARK.distribution import ( - IndexDistribution, - Lognormal, - MeanOneLogNormal, - Bernoulli, # Random draws for simulating agents -) - -from HARK.rewards import ( - CRRAutility, -) - -# %% [markdown] -# TO DO: -# -# Refactor to separate model from simulator (AgentType) -# -# - [x] Separate FrameModel from FrameAgentType - AgentType has cycles parameter. FrameModel need not have it. -# - [x] Define Repeater transformation -- transforms FrameModel to be either explicitly infinite or to become finite cycled. Can take age-varying parameters here (and only here). -# - [x] FrameAgentType consumes a FrameModel, and runs simulations in HARK way -# - [ ] Further decouple FrameModel from FrameAgentType. -# - [x] FrameModel should take parameters dictionary -# - [x] Generalize simulation to access appropriate solution (transition_cNrm) -# - [ ] FrameModel transition equations should not reference 'self' whiteboard -# - [ ] FrameAgentType with an arbitrary well-formed FrameModel and solution should be able to forward-simulate -# - [x] Replicate the ConsPortfolioFrameModel with new architecture. -# - [ ] Automated tests -# - [ ] Easier single variable target frames -# -# Solver as something that consumes and works with a FrameModel -# -# - [ ] Data structure for the solution of a model? -- A policy. (Look at Bellman library?) -# - [ ] Extract the key sequence of variables along which to pass value -# - [ ] Value-passing -- inverse function -# - [ ] Value-passing -- Inverse expected value -- for chance transitions -# - [ ] Policy updating -- -# - [ ] Value backup -# -# Solvers for repeated FrameModels -# -# - [ ] Finite solver as composition of these tools -# - [ ] Infinite solver through use of tools to convergence -# -# Feed solution back to FrameAgentType -# -# - [ ] Build solution object a la HARK? Or ... -# - [ ] Adjust simulator so that it uses the new solution object - -# %% [markdown] -# ## Some simple models - -# %% -init_parameters = {} -init_parameters["PermGroFac"] = 1.05 -init_parameters["PermShkStd"] = 1.5 -init_parameters["PermShkCount"] = 5 -init_parameters["TranShkStd"] = 3.0 -init_parameters["TranShkCount"] = 5 -init_parameters["RiskyAvg"] = 1.05 -init_parameters["RiskyStd"] = 1.5 -init_parameters["RiskyCount"] = 5 -init_parameters["Rfree"] = 1.03 - - -# %% -# TODO: streamline this so it can draw the parameters from context -def birth_aNrmNow(N, **context): - """ - Birth value for aNrmNow - """ - return Lognormal( - mu=context["aNrmInitMean"], - sigma=context["aNrmInitStd"], - ## TODO -- where does this seed come from? The AgentType? - seed=self.RNG.integers(0, 2**31 - 1), - ).draw(N) - - -frame_model_A = FrameModel( - [ - Frame(("bNrm",), ("aNrm",), transition=lambda Rfree, aNrm: Rfree * aNrm), - Frame(("mNrm",), ("bNrm", "TranShk"), transition=lambda bNrm: mNrm), - Frame(("cNrm"), ("mNrm",), control=True), - Frame( - ("U"), - ("cNrm", "CRRA"), ## Note CRRA here is a parameter not a state var - transition=lambda cNrm, CRRA: (CRRAutility(cNrm, CRRA),), - reward=True, - context={"CRRA": 2.0}, - ), - Frame( - ("aNrm"), - ("mNrm", "cNrm"), - default={"aNrm": birth_aNrmNow}, - transition=lambda mNrm, cNrm: (mNrm - cNrm,), - ), - ], - init_parameters, -) - -# %% -draw_frame_model(frame_model_A) - -# %% -isinstance( - list(frame_model_A.frames.var("bNrm").parents.values())[0], BackwardFrameReference -) - -# %% -frame_model_A.frames.var("aNrm").children - -# %% -frame_model_A.infinite - -# %% [markdown] -# ## Modifying the model -# -# -- To refactor to use standalone models - -# %% -draw_frame_model(frame_model_A.make_terminal()) - -# %% -double_model = frame_model_A.prepend(frame_model_A) -draw_frame_model(double_model, figsize=(8, 12)) - -# %% -double_model = frame_model_A.make_terminal().prepend(frame_model_A) -draw_frame_model(double_model, figsize=(8, 12)) - -# %% [markdown] -# ## repeat() - -# %% -repeat_model = frame_model_A.repeat({"bNrm": {"Rfree": [1.01, 1.03, 1.02]}}) -draw_frame_model(repeat_model, figsize=(8, 18)) - -# %% -repeat_model.frames.var("bNrm_1").context["Rfree"] - -# %% -repeat_model.frames.var("aNrm_2").children - -# %% [markdown] -# ## Trying again at a solver .... - -# %% [markdown] -# - [ ] Create grid of state values with a 'forward simulation' with dummy strategies -# - [ ] For each control variable, backwards: -# - [ ] Create objective function $f$ summing: -# - [ ] Direct rewards of (a, s) -# - [ ] Weighted expected value of (a,s) -# - [ ] Over a grid of state values in the control variable's scope: -# - [ ] Find optimal a* for s given $f$ -# - [ ] Using (s, a*) pairs: -# - [ ] Interpolate -# - [ ] Into a decision rule -# - [ ] When all the decision rules are done, forward simulate. -# - -# %% -model = frame_model_A - -# %% -model.frames - - -# %% -def make_decision_rule(control_frame: Frame): - # get scope - scope = control_frame.scope - - # get objective function - - # get grid over the scope - - # get optimal action for each scope point given objective - - # interpolate from (s, a*) into decision rule - - pass - - -# %% -def create_value_function_from_reward_transition(transition, local_context): - def value_function(**parent_state): - inputs = parent_state.copy() - inputs.update(local_context) - - return transition(**inputs) - - return value_function - - -# %% -for f in range(len(model.frames) - 1, 0, -1): - frame = model.frames.iloc(f) - - if frame.reward: - frame.value = create_value_function_from_reward_transition( - frame.transition, frame.context - ) - - elif frame.control: - pass - - elif len(frame.children) == 0: - # terminal chance node - - pass - - else: - # intermediate state node - pass - -# %% -model.frames.iloc(3).context - -# %% -model.frames.iloc(3).value(**{"cNrm": 2}) - -# %% [markdown] -# ### pycid rules in parallel... - -# %% -''' -def impute_random_decision(self, d: str) -> None: - """Impute a random policy to the given decision node""" - try: - domain = self.model.domain[d] - except KeyError: - raise ValueError(f"can't figure out domain for {d}, did you forget to specify DecisionDomain?") - else: - self.model[d] = StochasticFunctionCPD( - d, lambda **pv: {outcome: 1 / len(domain) for outcome in domain}, self, domain, label="random_decision" - ) -''' - -# %% -''' -def expected_utility( - self, context: Dict[str, Outcome], intervention: Dict[str, Outcome] = None, agent: AgentLabel = 0 - ) -> float: - """Compute the expected utility of an agent for a given context and optional intervention - For example: - cid = get_minimal_cid() - out = self.expected_utility({'D':1}) #TODO: give example that uses context - Parameters - ---------- - context: Node values to condition upon. A dictionary mapping of node => value. - intervention: Interventions to apply. A dictionary mapping node => value. - agent: Evaluate the utility of this agent. - """ - return sum(self.expected_value(self.agent_utilities[agent], context, intervention=intervention)) -''' - -# %% -''' -def impute_optimal_decision(self, decision: str) -> None: - """Impute an optimal policy to the given decision node""" - # self.add_cpds(random.choice(self.optimal_pure_decision_rules(d))) - self.impute_random_decision(decision) - domain = self.model.domain[decision] - utility_nodes = self.agent_utilities[self.decision_agent[decision]] - descendant_utility_nodes = list(set(utility_nodes).intersection(nx.descendants(self, decision))) - copy = self.copy() # using a copy "freezes" the policy so it doesn't adapt to future interventions - - @lru_cache(maxsize=1000) - def opt_policy(**parent_values: Outcome) -> Outcome: - eu = {} - for d in domain: - parent_values[decision] = d - eu[d] = sum(copy.expected_value(descendant_utility_nodes, parent_values)) - return max(eu, key=eu.get) # type: ignore - - self.add_cpds(StochasticFunctionCPD(decision, opt_policy, self, domain=domain, label="opt")) -''' - -# %% [markdown] -# ## Forward simulating the model - -# %% -frame_agent_A = FrameAgentType( - frame_model_A, T_sim=5000, AgentCount=200, read_shocks=True, cycles=0 -) - -# frame_agent_A.solve() -# frame_agent_A.track_vars += [ -# "mNrm", -# "cNrm", -# "aNrm", -# "bNrm", -# 'U' -# ] - -# Doesn't work yet. -# frame_agent_A.initialize_sim() -# frame_agent_A.simulate() - -# %% -## TODO: Forward simulate - -# %% [markdown] -# ## Progressively more complex models - -# %% -# maybe replace reference to init_portfolio to self.parameters? -frame_model_B = FrameModel( - [ - # todo : make an aggegrate value - Frame( - ("PermShk"), - None, - default={ - "PermShk": 1.0 - }, # maybe this is unnecessary because the shock gets sampled at t = 0 - # this is discretized before it's sampled - transition=IndexDistribution( - Lognormal.from_mean_std, - { - "mean": init_parameters["PermGroFac"], - "std": init_parameters["PermShkStd"], - }, - ).discretize( - init_parameters["PermShkCount"], method="equiprobable", tail_N=0 - ), - ), - Frame( - ("TranShk"), - None, - default={ - "TranShk": 1.0 - }, # maybe this is unnecessary because the shock gets sampled at t = 0 - transition=IndexDistribution( - MeanOneLogNormal, {"sigma": init_parameters["TranShkStd"]} - ).discretize( - init_parameters["TranShkCount"], method="equiprobable", tail_N=0 - ), - ), - Frame( - ("Rport"), - ("Share", "Risky", "Rfree"), - transition=lambda Share, Risky, Rfree: ( - Share * Risky + (1.0 - Share) * Rfree, - ), - ), - Frame( - ("bNrm",), - ("aNrm", "Rport", "PermShk"), - transition=lambda aNrm, Rport, PermShk: (Rport / PermShk) * aNrm, - ), - Frame( - ("mNrm",), - ("bNrm", "TranShk"), - transition=lambda bNrm, TranShk: (bNrm + TranShk,), - ), - Frame(("cNrm"), ("Adjust", "mNrm", "Share"), control=True), - Frame( - ("U"), - ( - "cNrm", - "CRRA", - ), ## Note CRRA here is a parameter not a state var transition = lambda self, cNrm, CRRA : (CRRAutility(cNrm, CRRA),), - reward=True, - ), - Frame( - ("aNrm"), - ("mNrm", "cNrm"), - default={"aNrm": birth_aNrmNow}, - transition=lambda mNrm, cNrm: (mNrm - cNrm,), - ), - ], - init_parameters, -) - -# %% -draw_frame_model(frame_model_B) # , dot = True) - - -# %% -# TODO: streamline this so it can draw the parameters from context -def birth_aNrmNow(self, N): - """ - Birth value for aNrmNow - """ - return Lognormal( - mu=self.aNrmInitMean, - sigma=self.aNrmInitStd, - seed=self.RNG.integers(0, 2**31 - 1), - ).draw(N) - - # maybe replace reference to init_portfolio to self.parameters? - - -frame_model_C = FrameModel( - [ - # todo : make an aggegrate value - Frame( - ("PermShk"), - None, - default={ - "PermShk": 1.0 - }, # maybe this is unnecessary because the shock gets sampled at t = 0 - # this is discretized before it's sampled - transition=IndexDistribution( - Lognormal.from_mean_std, - { - "mean": init_parameters["PermGroFac"], - "std": init_parameters["PermShkStd"], - }, - ).discretize( - init_parameters["PermShkCount"], method="equiprobable", tail_N=0 - ), - ), - Frame( - ("TranShk"), - None, - default={ - "TranShk": 1.0 - }, # maybe this is unnecessary because the shock gets sampled at t = 0 - transition=IndexDistribution( - MeanOneLogNormal, {"sigma": init_parameters["TranShkStd"]} - ).discretize( - init_parameters["TranShkCount"], method="equiprobable", tail_N=0 - ), - ), - Frame( ## TODO: Handle Risky as an Aggregate value - ("Risky"), - None, - transition=IndexDistribution( - Lognormal.from_mean_std, - { - "mean": init_parameters["RiskyAvg"], - "std": init_parameters["RiskyStd"], - } - # seed=self.RNG.integers(0, 2 ** 31 - 1) : TODO: Seed logic - ).discretize(init_parameters["RiskyCount"], method="equiprobable"), - aggregate=True, - ), - Frame( - ("Rport"), - ("Share", "Risky", "Rfree"), - transition=lambda Share, Risky, Rfree: ( - Share * Risky + (1.0 - Share) * Rfree, - ), - ), - Frame( - ("bNrm",), - ("aNrm", "Rport", "PermShk"), - transition=lambda aNrm, Rport, PermShk: (Rport / PermShk) * aNrm, - ), - Frame( - ("mNrm",), - ("bNrm", "TranShk"), - transition=lambda bNrm, TranShk: (bNrm + TranShk,), - ), - Frame(("Share"), ("Adjust", "mNrm"), default={"Share": 0}, control=True), - Frame(("cNrm"), ("Adjust", "mNrm", "Share"), control=True), - Frame( - ("U"), - ("cNrm", "CRRA"), ## Note CRRA here is a parameter not a state var - transition=lambda cNrm, CRRA: (CRRAutility(cNrm, CRRA),), - reward=True, - ), - Frame( - ("aNrm"), - ("mNrm", "cNrm"), - default={"aNrm": birth_aNrmNow}, - transition=lambda mNrm, cNrm: (mNrm - cNrm,), - ), - ], - init_parameters, -) - -# %% -draw_frame_model(frame_model_C, figsize=(8, 12)) # , dot = True) - -# %% diff --git a/examples/GenIncProcessModel/GenIncProcessModel.py b/examples/GenIncProcessModel/GenIncProcessModel.py deleted file mode 100644 index e08fa66dc..000000000 --- a/examples/GenIncProcessModel/GenIncProcessModel.py +++ /dev/null @@ -1,421 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: collapsed,code_folding -# cell_metadata_json: true -# formats: ipynb,py:percent -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.10.9 -# latex_envs: -# LaTeX_envs_menu_present: true -# autoclose: false -# autocomplete: true -# bibliofile: biblio.bib -# cite_by: apalike -# current_citInitial: 1 -# eqLabelWithNumbers: true -# eqNumInitial: 1 -# hotkeys: -# equation: Ctrl-E -# itemize: Ctrl-I -# labels_anchors: false -# latex_user_defs: false -# report_style_numbering: false -# user_envs_cfg: false -# toc: -# base_numbering: 1 -# nav_menu: {} -# number_sections: true -# sideBar: true -# skip_h1_title: false -# title_cell: Table of Contents -# title_sidebar: Contents -# toc_cell: false -# toc_position: {} -# toc_section_display: true -# toc_window_display: false -# --- - -# %% [markdown] -# # Permanent versus Persistent Income Shocks -# - -# %% {"code_folding": []} -# Initial imports and notebook setup -import matplotlib.pyplot as plt - -from copy import copy -from HARK.utilities import plot_funcs -import numpy as np - -# %% -from HARK.ConsumptionSaving.ConsGenIncProcessModel import ( - IndShockExplicitPermIncConsumerType, - PersistentShockConsumerType, -) -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType - -# %% [markdown] -# First, recall that the `ConsIndShockModel` assumes that income has a permanent component $p$ which is subject to "permanent" shocks: -# -# $\log p_{t+1} = \log p_{t} + \log \psi_{t+1}$ -# -# However, many papers in the literature instead examine models in which shocks merely have some persistence, -# -# $\log p_{t+1} = \gamma \log p_{t} + \log \psi_{t+1}$ -# -# where if $0 < \gamma < 1$ then $\lim_{n \uparrow \infty} \mathbb{E}_{t}[\log p_{t+n}]=0$ (which means that the level of $p$ reverts to its mean of $p=1$. The two models become identical as $\gamma$ approaches 1. -# -# This notebook describes HARK's tools to solve models with persistent shocks. This is acheived by the following: -# -# 1. Defining `ConsGenIncProcessModel`, which extends `ConsIndShockModel` by explicitly tracking persistent income $p_t$ as a state variable. -# 2. Constructing the `IndShockExplicitPermIncConsumerType`, which is a type of consumer created for comparison by allowing their income process to be the one in which $\gamma=1$ (i.e. subject to permanent shocks). -# - -# %% [markdown] -# -# ## General Income Process model -# In the `ConsGenIncProcessModel`, the user can define a generic function $G$ that translates current $p_{t}$ into expected next period persistent income $p_{t+1}$ (subject to shocks). -# -# The agent's problem can be written in Bellman form as: - -# %% [markdown] -# \begin{eqnarray*} -# v_t(M_t,p_t) &=& \max_{c_t} U(c_t) + \beta (1-\mathsf{D}_{t+1}) \mathbb{E}_{t} [v_{t+1}(M_{t+1}, p_{t+1})] \\ -# a_t &=& M_t - c_t \\ -# a_t &\geq& \underline{a} \\ -# M_{t+1} &=& R a_t + \theta_{t+1} \\ -# p_{t+1} &=& G_{t+1}(p_t)\psi_{t+1} \\ -# \psi_t \sim F_{\psi_t} &\qquad& \theta_t \sim F_{\theta_t} \\ -# \mathbb{E} [F_{\psi_t}] = 1 & & \mathbb{E} [F_{\theta_t}] =1 \\ -# U(c) &=& \frac{c^{1-\rho}}{1-\rho} -# \end{eqnarray*} - -# %% [markdown] -# The one-period problem for this model is solved by the function `solveConsGenIncProcess`, which creates an instance of the class `ConsGenIncProcessSolver`. The class `GenIncProcessConsumerType` extends `IndShockConsumerType` to represent agents in this model. To construct an instance of this class, several parameters must be passed to the constructor, as shown in the table below (parameters can be either "primitive" or "constructed" if they have already built-in capabilities from previous models). -# -# ### Example parameter values to solve GenIncProcess model -# -# | Param | Description | Code | Value | Constructed | -# | :---: | --- | --- | --- | :---: | -# | $\beta$ |Intertemporal discount factor | $\texttt{DiscFac}$ | 0.96 | | -# | $\rho$ |Coefficient of relative risk aversion | $\texttt{CRRA}$ | 2.0 | | -# | $R$ | Risk free interest factor | $\texttt{Rfree}$ | 1.03 | | -# | $1 - \mathsf{D}$ |Survival probability | $\texttt{LivPrb}$ | [0.98] | | -# | $\underline{a}$ |Artificial borrowing constraint | $\texttt{BoroCnstArt}$ | 0.0 | | -# | $(none)$ |Indicator of whether $\texttt{vFunc}$ should be computed | $\texttt{vFuncBool}$ | 'True' | | -# | $(none)$ |Indicator of whether $\texttt{cFunc}$ should use cubic lines | $\texttt{CubicBool}$ | 'False' | | -# |$F$ |A list containing three arrays of floats, representing a discrete
approximation to the income process:
event probabilities, persistent shocks, transitory shocks | $\texttt{IncomeDstn}$ | - |$\surd$ | -# | $G$ |Expected persistent income next period | $\texttt{pLvlNextFunc}$ | - | $\surd$ | -# | $(none)$ |Array of time-varying persistent income levels | $\texttt{pLvlGrid}$ | - |$\surd$ | -# | $(none)$ | Array of "extra" end-of-period asset values | $\texttt{aXtraGrid}$ | - |$\surd$ | -# -# ### Constructed inputs to solve GenIncProcess -# The "constructed" inputs above are using expected attributes and are drawn on various methods as explained below. -# -# -# * The input $\texttt{IncomeDstn}$ is created by the method `update_income_process`, which is inherited from the `IndShockConsumerType`. -# -# * The input $\texttt{pLvlNextFunc}$ is created by the method `updatepLvlNextFunc`. To do so, it uses the initial sequence of $\texttt{pLvlNextFunc}$, as well as the mean and standard deviation of the (log) initial permanent income, $\texttt{pLvlInitMean}$ and $\texttt{pLvlInitStd}$. -# -# * Note: In this model, the method creates a trivial $\texttt{pLvlNextFunc}$ attribute with no persistent income dynamics. But we can overwrite it by when defining other subclasses (for example, to make an AR1 income process for the newly defined agent-type). -# -# -# * The input $\texttt{pLvlGrid}$ is created by the method `updatepLvlGrid`, which is done by updating the grid of persistent income levels for infinite horizon models (cycles=0) and lifecycle models (cycles=1). This method draws on the initial distribution of persistent income, the $\texttt{pLvlNextFuncs}$, $\texttt{pLvlInitMean}$, $\texttt{pLvlInitStd}$ and the attribute $\texttt{pLvlPctiles}$ (percentiles of the distribution of persistent income). It then uses a simulation approach to generate the $\texttt{pLvlGrid}$ at each period of the cycle. -# -# -# * The input $\texttt{aXtraGrid}$ is created by $\texttt{updateAssetsGrid}$, which updates the agent's end-of-period assets grid by constructing a multi-exponentially spaced grid of aXtra values, based on $\texttt{aNrmInitMean}$ and $\texttt{aNrmInitStd}$. -# - -# %% [markdown] -# ## 1. Consumer with Explicit Permanent Income -# -# Now that we've defined the baseline model of a consumer facing persistent income shocks, we can define an "explicit permanent income" consumer who experiences idiosyncratic shocks to permanent and transitory, and faces permanent income growth *as a particular case of the general model*. -# -# The agent's problem can be written in Bellman form as: -# -# \begin{eqnarray*} -# v_t(M_t,p_t) &=& \max_{c_t} U(c_t) + \beta (1-\mathsf{D}_{t+1}) \mathbb{E} [v_{t+1}(M_{t+1}, p_{t+1}) ], \\ -# a_t &=& M_t - c_t, \\ -# a_t &\geq& \underline{a}, \\ -# M_{t+1} &=& R/(\Gamma_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\ -# p_{t+1} &=& G_{t+1}(p_t)\psi_{t+1}, \\ -# \psi_t \sim F_{\psi_t}, \mathbb{E} [F_{\psi_t}] = 1 &\qquad& \theta_t \sim F_{\theta_t}, \mathbb{E} [F_{\theta_t}] = 1, \\ -# U(c) &=& \frac{c^{1-\rho}}{1-\rho}. -# \end{eqnarray*} -# -# -# * Note: This agent type is identical to an `IndShockConsumerType` consumer, but now the level of permanent income $\texttt{pLvl}$ is explicitly tracked as a state variable. This is the sense in which this model is a particular class of the GenIncProcess model, as shown in the mathematical description of the agent's optimization problem in that model. -# -# To construct `IndShockExplicitPermIncConsumerType` as an instance of `GenIncProcessConsumerType`, we need to pass additional parameters to the constructor as shown in the table below. -# -# ### Additional parameters to solve ExplicitPermInc model -# -# | Param | Description | Code | Value | Constructed | -# | :---: | --- | --- | --- | :---: | -# |(none)|percentiles of the distribution of persistent income|$\texttt{pLvlPctiles}$||| -# | $G$ |Expected persistent income next period | $\texttt{pLvlNextFunc}$ | - | $\surd$ | -# |$\Gamma$|Permanent income growth factor|$\texttt{PermGroFac}$|[1.0]| | -# -# -# ### Constructed inputs to solve ExplicitPermInc -# -# * In this "explicit permanent income" model, we overwrite the method `updatepLvlNextFunc` to create $\texttt{pLvlNextFunc}$ as a sequence of linear functions, indicating constant expected permanent income growth across permanent income levels. This method uses the attribute $\texttt{PermGroFac}$, and installs a special retirement function when it exists. -# -# - -# %% {"code_folding": []} -# This cell defines a dictionary to make an instance of "explicit permanent income" consumer. -GenIncDictionary = { - "CRRA": 2.0, # Coefficient of relative risk aversion - "Rfree": 1.03, # Interest factor on assets - "DiscFac": 0.96, # Intertemporal discount factor - "LivPrb": [0.98], # Survival probability - "AgentCount": 10000, # Number of agents of this type (only matters for simulation) - "aNrmInitMean": 0.0, # Mean of log initial assets (only matters for simulation) - "aNrmInitStd": 1.0, # Standard deviation of log initial assets (only for simulation) - "pLvlInitMean": 0.0, # Mean of log initial permanent income (only matters for simulation) - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income (only matters for simulation) - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor (only matters for simulation) - "T_age": None, # Age after which simulated agents are automatically killed - "T_cycle": 1, # Number of periods in the cycle for this agent type - # Parameters for constructing the "assets above minimum" grid - "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 30, # Maximum end-of-period "assets above minimum" value - "aXtraExtra": [ - 0.005, - 0.01, - ], # Some other value of "assets above minimum" to add to the grid - "aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraCount": 48, # Number of points in the grid of "assets above minimum" - # Parameters describing the income process - "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks - "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks - "PermShkStd": [0.1], # Standard deviation of log permanent income shocks - "TranShkStd": [0.1], # Standard deviation of log transitory income shocks - "UnempPrb": 0.05, # Probability of unemployment while working - "UnempPrbRet": 0.005, # Probability of "unemployment" while retired - "IncUnemp": 0.3, # Unemployment benefits replacement rate - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "tax_rate": 0.0, # Flat income tax rate - "T_retire": 0, # Period of retirement (0 --> no retirement) - "BoroCnstArt": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets - "CubicBool": False, # Use cubic spline interpolation when True, linear interpolation when False - "vFuncBool": True, # Whether to calculate the value function during solution - # More parameters specific to "Explicit Permanent income" shock model - "cycles": 0, - "pLvlPctiles": np.concatenate( - ( - [0.001, 0.005, 0.01, 0.03], - np.linspace(0.05, 0.95, num=19), - [0.97, 0.99, 0.995, 0.999], - ) - ), - "PermGroFac": [ - 1.0 - ], # Permanent income growth factor - long run permanent income growth doesn't work yet -} - -# %% [markdown] -# Let's now create an instance of the type of consumer we are interested in and solve this agent's problem with an infinite horizon (cycles=0). - -# %% -# Make and solve an example "explicit permanent income" consumer with idiosyncratic shocks -ExplicitExample = IndShockExplicitPermIncConsumerType(**GenIncDictionary) - -print("Here, the lowest percentile is " + str(GenIncDictionary["pLvlPctiles"][0] * 100)) -print( - "and the highest percentile is " - + str(GenIncDictionary["pLvlPctiles"][-1] * 100) - + ".\n" -) - -ExplicitExample.solve() - - -# %% [markdown] -# In the cell below, we generate a plot of the consumption function for explicit permanent income consumer at different income levels. - -# %% {"code_folding": []} -# Plot the consumption function at various permanent income levels. -print("Consumption function by pLvl for explicit permanent income consumer:") -pLvlGrid = ExplicitExample.pLvlGrid[0] -mLvlGrid = np.linspace(0, 20, 300) -for p in pLvlGrid: - M_temp = mLvlGrid + ExplicitExample.solution[0].mLvlMin(p) - C = ExplicitExample.solution[0].cFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) -plt.xlim(0.0, 20.0) -plt.xlabel("Market resource level mLvl") -plt.ylabel("Consumption level cLvl") -plt.show() - -# %% [markdown] -# ## Permanent income normalized -# -# An alternative model is to normalize it by dividing all variables by permanent income $p_t$ and solve the model again. - -# %% -# Make and solve an example of normalized model -NormalizedExample = IndShockConsumerType(**GenIncDictionary, verbose=0) -NormalizedExample.solve() - -# %% -# Compare the normalized problem with and without explicit permanent income and plot the consumption functions -print("Normalized consumption function by pLvl for explicit permanent income consumer:") -pLvlGrid = ExplicitExample.pLvlGrid[0] -mNrmGrid = np.linspace(0, 20, 300) -for p in pLvlGrid: - M_temp = mNrmGrid * p + ExplicitExample.solution[0].mLvlMin(p) - C = ExplicitExample.solution[0].cFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp / p, C / p) - -plt.xlim(0.0, 20.0) -plt.xlabel("Normalized market resources mNrm") -plt.ylabel("Normalized consumption cNrm") -plt.show() - -print( - "Consumption function for normalized problem (without explicit permanent income):" -) -mNrmMin = NormalizedExample.solution[0].mNrmMin -plot_funcs(NormalizedExample.solution[0].cFunc, mNrmMin, mNrmMin + 20.0) - -# %% [markdown] -# The figures above show that the normalized consumption function for the "explicit permanent income" consumer is almost identical for every permanent income level (and the same as the normalized problem's $\texttt{cFunc}$), but is less accurate due to extrapolation outside the bounds of $\texttt{pLvlGrid}$. -# -# The "explicit permanent income" solution deviates from the solution to the normalized problem because of errors from extrapolating beyond the bounds of the $\texttt{pLvlGrid}$. The error is largest for $\texttt{pLvl}$ values near the upper and lower bounds, and propagates toward the center of the distribution. -# - -# %% -# Plot the value function at various permanent income levels -if ExplicitExample.vFuncBool: - pGrid = np.linspace(0.1, 3.0, 24) - M = np.linspace(0.001, 5, 300) - for p in pGrid: - M_temp = M + ExplicitExample.solution[0].mLvlMin(p) - C = ExplicitExample.solution[0].vFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) - plt.ylim([-200, 0]) - plt.xlabel("Market resource level mLvl") - plt.ylabel("Value v") - plt.show() - -# %% -# Simulate many periods to get to the stationary distribution -ExplicitExample.T_sim = 500 -ExplicitExample.track_vars = ["mLvl", "cLvl", "pLvl"] -ExplicitExample.initialize_sim() -ExplicitExample.simulate() -plt.plot(np.mean(ExplicitExample.history["mLvl"], axis=1)) -plt.xlabel("Simulated time period") -plt.ylabel("Average market resources mLvl") -plt.show() - - -# %% [markdown] -# ## 2. Persistent income shock consumer -# -# -# Next, the `PersistentShockConsumerType` class is introduced to solve consumption-saving models with idiosyncratic shocks to income in which shocks are persistent and transitory. This model extends `ConsGenIndShockModel` by allowing (log) persistent income to follow an AR(1) process. -# -# The agent's problem can be written in Bellman form as: -# -# \begin{eqnarray*} -# v_t(M_t,p_t) &=& \max_{c_t} U(c_t) + \beta (1-\mathsf{D}_{t+1}) \mathbb{E} [v_{t+1}(M_{t+1}, p_{t+1}) ], \\ -# a_t &=& M_t - c_t, \\ -# a_t &\geq& \underline{a}, \\ -# M_{t+1} &=& R a_t + \theta_{t+1}, \\ -# log(p_{t+1}) &=& \varphi log(p_t)+(1-\varphi log(\overline{p}_{t+1} )) +log(\Gamma_{t+1})+log(\psi_{t+1}), \\ -# \\ -# \psi_t \sim F_{\psi_t}, \mathbb{E} [F_{\psi_t}] = 1 &\qquad& \theta_t \sim F_{\theta_t}, \mathbb{E} [F_{\theta_t}] = 1 \\ -# \end{eqnarray*} -# -# ### Additional parameters to solve PersistentShock model -# -# | Param | Description | Code | Value | Constructed | -# | :---: | --- | --- | --- | :---: | -# |$\varphi$|Serial correlation coefficient for permanent income|$\texttt{PrstIncCorr}$|0.98|| -# |||||| -# -# ### Constructed inputs to solve PersistentShock -# -# * For this model, we overwrite the method $\texttt{updatepLvlNextFunc}$ to create the input $\texttt{pLvlNextFunc}$ as a sequence of AR1-style functions. The method uses now the attributes $\texttt{PermGroFac}$ and $\texttt{PrstIncCorr}$. If cycles=0, the product of $\texttt{PermGroFac}$ across all periods must be 1.0, otherwise this method is invalid. -# - -# %% {"code_folding": []} -# Make a dictionary for the "persistent idiosyncratic shocks" model -PrstIncCorr = 0.98 # Serial correlation coefficient for persistent income - -persistent_shocks = copy(GenIncDictionary) -persistent_shocks["PrstIncCorr"] = PrstIncCorr - - -# %% [markdown] -# Again, the `PersistentShockConsumerType` class solves the problem of a consumer facing idiosyncratic shocks to his persistent and transitory income, and for which the (log) persistent income follows an AR1 process rather than random walk. After modifying the dictionary from the `ConsGenIncProcessModel` class to accomodate these modeling choices, we generate a particular instance of the "persistent income" consumer type below. - -# %% -# Make and solve an example of "persistent idisyncratic shocks" consumer -PersistentExample = PersistentShockConsumerType(**persistent_shocks) -PersistentExample.solve() - -# %% -# Plot the consumption function at various levels of persistent income pLvl -print( - "Consumption function by persistent income level pLvl for a consumer with AR1 coefficient of " - + str(PersistentExample.PrstIncCorr) - + ":" -) -pLvlGrid = PersistentExample.pLvlGrid[0] -mLvlGrid = np.linspace(0, 20, 300) -for p in pLvlGrid: - M_temp = mLvlGrid + PersistentExample.solution[0].mLvlMin(p) - C = PersistentExample.solution[0].cFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) -plt.xlim(0.0, 20.0) -plt.xlabel("Market resource level mLvl") -plt.ylabel("Consumption level cLvl") -plt.show() - -# %% -# Plot the value function at various persistent income levels -if PersistentExample.vFuncBool: - pGrid = PersistentExample.pLvlGrid[0] - M = np.linspace(0.001, 5, 300) - for p in pGrid: - M_temp = M + PersistentExample.solution[0].mLvlMin(p) - C = PersistentExample.solution[0].vFunc(M_temp, p * np.ones_like(M_temp)) - plt.plot(M_temp, C) - plt.ylim([-200, 0]) - plt.xlabel("Market resource level mLvl") - plt.ylabel("Value v") - plt.show() - -# %% -# Simulate some data -PersistentExample.T_sim = 500 -PersistentExample.track_vars = ["mLvl", "cLvl", "pLvl"] -PersistentExample.initialize_sim() -PersistentExample.simulate() -plt.plot(np.mean(PersistentExample.history["mLvl"], axis=1)) -plt.xlabel("Simulated time period") -plt.ylabel("Average market resources mLvl") -plt.show() diff --git a/examples/Gentle-Intro/Gentle-Intro-To-HARK.py b/examples/Gentle-Intro/Gentle-Intro-To-HARK.py deleted file mode 100644 index d7ac57442..000000000 --- a/examples/Gentle-Intro/Gentle-Intro-To-HARK.py +++ /dev/null @@ -1,316 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: collapsed,code_folding -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # A Gentle Introduction to HARK -# -# This notebook provides a simple, hands-on tutorial for first time HARK users -- and potentially first time Python users. It does not go "into the weeds" - we have hidden some code cells that do boring things that you don't need to digest on your first experience with HARK. Our aim is to convey a feel for how the toolkit works. -# -# For readers for whom this is your very first experience with Python, we have put important Python concepts in **boldface**. For those for whom this is the first time they have used a Jupyter notebook, we have put Jupyter instructions in _italics_. Only cursory definitions (if any) are provided here. If you want to learn more, there are many online Python and Jupyter tutorials. - -# %% code_folding=[] -# This cell has a bit of initial setup. You can click the triangle to the left to expand it. -# Click the "Run" button immediately above the notebook in order to execute the contents of any cell -# WARNING: Each cell in the notebook relies upon results generated by previous cells -# The most common problem beginners have is to execute a cell before all its predecessors -# If you do this, you can restart the kernel (see the "Kernel" menu above) and start over - -import matplotlib.pyplot as plt -import numpy as np -import HARK -from copy import deepcopy - -mystr = lambda number: "{:.4f}".format(number) -from HARK.utilities import plot_funcs - -# %% [markdown] -# ## Your First HARK Model: Perfect Foresight -# -# We start with almost the simplest possible consumption model: A consumer with CRRA utility -# -# \begin{equation} -# U(C) = \frac{C^{1-\rho}}{1-\rho} -# \end{equation} -# -# has perfect foresight about everything except the (stochastic) date of death, which occurs with constant probability implying a "survival probability" $\newcommand{\LivPrb}{\aleph}\LivPrb < 1$. Permanent labor income $P_t$ grows from period to period by a factor $\Gamma_t$. At the beginning of each period $t$, the consumer has some amount of market resources $M_t$ (which includes both market wealth and currrent income) and must choose how much of those resources to consume $C_t$ and how much to retain in a riskless asset $A_t$ which will earn return factor $R$. The agent's flow of utility $U(C_t)$ from consumption is geometrically discounted by factor $\beta$. Between periods, the agent dies with probability $\mathsf{D}_t$, ending his problem. -# -# The agent's problem can be written in Bellman form as: -# -# \begin{eqnarray*} -# V_t(M_t,P_t) &=& \max_{C_t}~U(C_t) + \beta \aleph V_{t+1}(M_{t+1},P_{t+1}), \\ -# & s.t. & \\ -# %A_t &=& M_t - C_t, \\ -# M_{t+1} &=& R (M_{t}-C_{t}) + Y_{t+1}, \\ -# P_{t+1} &=& \Gamma_{t+1} P_t, \\ -# \end{eqnarray*} -# -# A particular perfect foresight agent's problem can be characterized by values of risk aversion $\rho$, discount factor $\beta$, and return factor $R$, along with sequences of income growth factors $\{ \Gamma_t \}$ and survival probabilities $\{\mathsf{\aleph}_t\}$. To keep things simple, let's forget about "sequences" of income growth and mortality, and just think about an $\textit{infinite horizon}$ consumer with constant income growth and survival probability. -# -# ## Representing Agents in HARK -# -# HARK represents agents solving this type of problem as $\textbf{instances}$ of the $\textbf{class}$ $\texttt{PerfForesightConsumerType}$, a $\textbf{subclass}$ of $\texttt{AgentType}$. To make agents of this class, we must import the class itself into our workspace. (Run the cell below in order to do this). - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType - -# %% [markdown] -# The $\texttt{PerfForesightConsumerType}$ class contains within itself the python code that constructs the solution for the perfect foresight model we are studying here, as specifically articulated in [these lecture notes](https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/). -# -# To create an instance of $\texttt{PerfForesightConsumerType}$, we simply call the class as if it were a function, passing as arguments the specific parameter values we want it to have. In the hidden cell below, we define a $\textbf{dictionary}$ named $\texttt{PF_dictionary}$ with these parameter values: -# -# | Param | Description | Code | Value | -# | :---: | --- | --- | :---: | -# | $\rho$ | Relative risk aversion | $\texttt{CRRA}$ | 2.5 | -# | $\beta$ | Discount factor | $\texttt{DiscFac}$ | 0.96 | -# | $R$ | Risk free interest factor | $\texttt{Rfree}$ | 1.03 | -# | $\aleph$ | Survival probability | $\texttt{LivPrb}$ | 0.98 | -# | $\Gamma$ | Income growth factor | $\texttt{PermGroFac}$ | 1.01 | -# -# -# For now, don't worry about the specifics of dictionaries. All you need to know is that a dictionary lets us pass many arguments wrapped up in one simple data structure. - -# %% code_folding=[] -# This cell defines a parameter dictionary. You can expand it if you want to see what that looks like. -PF_dictionary = { - "CRRA": 2.5, - "DiscFac": 0.96, - "Rfree": 1.03, - "LivPrb": [0.98], - "PermGroFac": [1.01], - "T_cycle": 1, - "cycles": 0, - "AgentCount": 10000, -} - -# To those curious enough to open this hidden cell, you might notice that we defined -# a few extra parameters in that dictionary: T_cycle, cycles, and AgentCount. Don't -# worry about these for now. - -# %% [markdown] -# Let's make an **object** named $\texttt{PFexample}$ which is an **instance** of the $\texttt{PerfForesightConsumerType}$ class. The object $\texttt{PFexample}$ will bundle together the abstract mathematical description of the solution embodied in $\texttt{PerfForesightConsumerType}$, and the specific set of parameter values defined in $\texttt{PF_dictionary}$. Such a bundle is created passing $\texttt{PF_dictionary}$ to the class $\texttt{PerfForesightConsumerType}$: - -# %% -PFexample = PerfForesightConsumerType(**PF_dictionary) -# the asterisks ** basically say "here come some arguments" to PerfForesightConsumerType - -# %% [markdown] -# In $\texttt{PFexample}$, we now have _defined_ the problem of a particular infinite horizon perfect foresight consumer who knows how to solve this problem. -# -# ## Solving an Agent's Problem -# -# To tell the agent actually to solve the problem, we call the agent's $\texttt{solve}$ **method**. (A method is essentially a function that an object runs that affects the object's own internal characteristics -- in this case, the method adds the consumption function to the contents of $\texttt{PFexample}$.) -# -# The cell below calls the $\texttt{solve}$ method for $\texttt{PFexample}$ - -# %% -PFexample.solve() - -# %% [markdown] -# Running the $\texttt{solve}$ method creates the **attribute** of $\texttt{PFexample}$ named $\texttt{solution}$. In fact, every subclass of $\texttt{AgentType}$ works the same way: The class definition contains the abstract algorithm that knows how to solve the model, but to obtain the particular solution for a specific instance (paramterization/configuration), that instance must be instructed to $\texttt{solve()}$ its problem. -# -# The $\texttt{solution}$ attribute is always a $\textit{list}$ of solutions to a single period of the problem. In the case of an infinite horizon model like the one here, there is just one element in that list -- the solution to all periods of the infinite horizon problem. The consumption function stored as the first element (element 0) of the solution list can be retrieved by: - -# %% -PFexample.solution[0].cFunc - -# %% [markdown] -# One of the results proven in the associated [the lecture notes](https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/) is that, for the specific problem defined above, there is a solution in which the _ratio_ $c = C/P$ is a linear function of the _ratio_ of market resources to permanent income, $m = M/P$. -# -# This is why $\texttt{cFunc}$ can be represented by a linear interpolation. It can be plotted between an $m$ ratio of 0 and 10 using the command below. - -# %% -mPlotTop = 10 -plot_funcs(PFexample.solution[0].cFunc, 0.0, mPlotTop) - -# %% [markdown] -# The figure illustrates one of the surprising features of the perfect foresight model: A person with zero money should be spending at a rate more than double their income (that is, $\texttt{cFunc}(0.) \approx 2.08$ - the intersection on the vertical axis). How can this be? -# -# The answer is that we have not incorporated any constraint that would prevent the agent from borrowing against the entire PDV of future earnings-- human wealth. How much is that? What's the minimum value of $m_t$ where the consumption function is defined? We can check by retrieving the $\texttt{hNrm}$ **attribute** of the solution, which calculates the value of human wealth normalized by permanent income: - -# %% -humanWealth = PFexample.solution[0].hNrm -mMinimum = PFexample.solution[0].mNrmMin -print( - "This agent's human wealth is " - + str(humanWealth) - + " times his current income level." -) -print( - "This agent's consumption function is defined (consumption is positive) down to m_t = " - + str(mMinimum) -) - -# %% [markdown] -# Yikes! Let's take a look at the bottom of the consumption function. In the cell below, the bounds of the `plot_funcs` function are set to display down to the lowest defined value of the consumption function. - -# %% -plot_funcs(PFexample.solution[0].cFunc, mMinimum, mPlotTop) - -# %% [markdown] -# ## Changing Agent Parameters -# -# Suppose you wanted to change one (or more) of the parameters of the agent's problem and see what that does. We want to compare consumption functions before and after we change parameters, so let's make a new instance of $\texttt{PerfForesightConsumerType}$ by copying $\texttt{PFexample}$. - -# %% -NewExample = deepcopy(PFexample) - -# %% [markdown] -# You can assign new parameters to an `AgentType` with the `assign_parameter` method. For example, we could make the new agent less patient: - -# %% -NewExample.assign_parameters(DiscFac=0.90) -NewExample.solve() -mPlotBottom = mMinimum -plot_funcs( - [PFexample.solution[0].cFunc, NewExample.solution[0].cFunc], mPlotBottom, mPlotTop -) - -# %% [markdown] -# (Note that you can pass a **list** of functions to `plot_funcs` as the first argument rather than just a single function. Lists are written inside of [square brackets].) -# -# Let's try to deal with the "problem" of massive human wealth by making another consumer who has essentially no future income. We can virtually eliminate human wealth by making the permanent income growth factor $\textit{very}$ small. -# -# In $\texttt{PFexample}$, the agent's income grew by 1 percent per period -- his $\texttt{PermGroFac}$ took the value 1.01. What if our new agent had a growth factor of 0.01 -- his income __shrinks__ by 99 percent each period? In the cell below, set $\texttt{NewExample}$'s discount factor back to its original value, then set its $\texttt{PermGroFac}$ attribute so that the growth factor is 0.01 each period. -# -# Important: Recall that the model at the top of this document said that an agent's problem is characterized by a sequence of income growth factors, but we tabled that concept. Because $\texttt{PerfForesightConsumerType}$ treats $\texttt{PermGroFac}$ as a __time-varying__ attribute, it must be specified as a **list** (with a single element in this case). - -# %% -# Revert NewExample's discount factor and make his future income minuscule -# print("your lines here") - -# Compare the old and new consumption functions -plot_funcs([PFexample.solution[0].cFunc, NewExample.solution[0].cFunc], 0.0, 10.0) - -# %% [markdown] -# Now $\texttt{NewExample}$'s consumption function has the same slope (MPC) as $\texttt{PFexample}$, but it emanates from (almost) zero-- he has basically no future income to borrow against! -# -# If you'd like, use the cell above to alter $\texttt{NewExample}$'s other attributes (relative risk aversion, etc) and see how the consumption function changes. However, keep in mind that *no solution exists* for some combinations of parameters. HARK should let you know if this is the case if you try to solve such a model. -# -# -# ## Your Second HARK Model: Adding Income Shocks -# -# Linear consumption functions are pretty boring, and you'd be justified in feeling unimpressed if all HARK could do was plot some lines. Let's look at another model that adds two important layers of complexity: income shocks and (artificial) borrowing constraints. -# -# Specifically, our new type of consumer receives two income shocks at the beginning of each period: a completely transitory shock $\theta_t$ and a completely permanent shock $\psi_t$. Moreover, lenders will not let the agent borrow money such that his ratio of end-of-period assets $A_t$ to permanent income $P_t$ is less than $\underline{a}$. As with the perfect foresight problem, this model can be framed in terms of __normalized__ variables, e.g. $m_t \equiv M_t/P_t$. (See [here](https://www.econ2.jhu.edu/people/ccarroll/papers/BufferStockTheory/) for all the theory). -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t} ~ U(c_t) ~ + \phantom{\LivFac} \beta \mathbb{E} [(\Gamma_{t+1}\psi_{t+1})^{1-\rho} v_{t+1}(m_{t+1}) ], \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \underset{\bar{}}{a}, \\ -# m_{t+1} &=& R/(\Gamma_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\ -# \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1, \\ -# u(c) &=& \frac{c^{1-\rho}}{1-\rho}. -# \end{eqnarray*} -# -# HARK represents agents with this kind of problem as instances of the class $\texttt{IndShockConsumerType}$. To create an $\texttt{IndShockConsumerType}$, we must specify the same set of parameters as for a $\texttt{PerfForesightConsumerType}$, as well as an artificial borrowing constraint $\underline{a}$ and a sequence of income shocks. It's easy enough to pick a borrowing constraint -- say, zero -- but how would we specify the distributions of the shocks? Can't the joint distribution of permanent and transitory shocks be just about anything? -# -# _Yes_, and HARK can handle whatever correlation structure a user might care to specify. However, the default behavior of $\texttt{IndShockConsumerType}$ is that the distribution of permanent income shocks is mean one lognormal, and the distribution of transitory shocks is mean one lognormal augmented with a point mass representing unemployment. The distributions are independent of each other by default, and by default are approximated with $N$ point equiprobable distributions. -# -# Let's make an infinite horizon instance of $\texttt{IndShockConsumerType}$ with the same parameters as our original perfect foresight agent, plus the extra parameters to specify the income shock distribution and the artificial borrowing constraint. As before, we'll make a dictionary: -# -# -# | Param | Description | Code | Value | -# | :---: | --- | --- | :---: | -# | $\underline{a}$ | Artificial borrowing constraint | $\texttt{BoroCnstArt}$ | 0.0 | -# | $\sigma_\psi$ | Underlying stdev of permanent income shocks | $\texttt{PermShkStd}$ | 0.1 | -# | $\sigma_\theta$ | Underlying stdev of transitory income shocks | $\texttt{TranShkStd}$ | 0.1 | -# | $N_\psi$ | Number of discrete permanent income shocks | $\texttt{PermShkCount}$ | 7 | -# | $N_\theta$ | Number of discrete transitory income shocks | $\texttt{TranShkCount}$ | 7 | -# | $\mho$ | Unemployment probability | $\texttt{UnempPrb}$ | 0.05 | -# | $\underset{\bar{}}{\theta}$ | Transitory shock when unemployed | $\texttt{IncUnemp}$ | 0.3 | - -# %% code_folding=[] -# This cell defines a parameter dictionary for making an instance of IndShockConsumerType. - -IndShockDictionary = { - "CRRA": 2.5, # The dictionary includes our original parameters... - "Rfree": 1.03, - "DiscFac": 0.96, - "LivPrb": [0.98], - "PermGroFac": [1.01], - "PermShkStd": [ - 0.1 - ], # ... and the new parameters for constructing the income process. - "PermShkCount": 7, - "TranShkStd": [0.1], - "TranShkCount": 7, - "UnempPrb": 0.05, - "IncUnemp": 0.3, - "BoroCnstArt": 0.0, - "aXtraMin": 0.001, # aXtra parameters specify how to construct the grid of assets. - "aXtraMax": 50.0, # Don't worry about these for now - "aXtraNestFac": 3, - "aXtraCount": 48, - "aXtraExtra": [None], - "vFuncBool": False, # These booleans indicate whether the value function should be calculated - "CubicBool": False, # and whether to use cubic spline interpolation. You can ignore them. - "aNrmInitMean": -10.0, - "aNrmInitStd": 0.0, # These parameters specify the (log) distribution of normalized assets - "pLvlInitMean": 0.0, # and permanent income for agents at "birth". They are only relevant in - "pLvlInitStd": 0.0, # simulation and you don't need to worry about them. - "PermGroFacAgg": 1.0, - "T_retire": 0, # What's this about retirement? ConsIndShock is set up to be able to - "UnempPrbRet": 0.0, # handle lifecycle models as well as infinite horizon problems. Swapping - "IncUnempRet": 0.0, # out the structure of the income process is easy, but ignore for now. - "T_age": None, - "T_cycle": 1, - "cycles": 0, - "AgentCount": 10000, - "tax_rate": 0.0, -} - -# Hey, there's a lot of parameters we didn't tell you about! Yes, but you don't need to -# think about them for now. - -# %% [markdown] -# As before, we need to import the relevant subclass of $\texttt{AgentType}$ into our workspace, then create an instance by passing the dictionary to the class as if the class were a function. - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType - -IndShockExample = IndShockConsumerType(**IndShockDictionary) - -# %% [markdown] -# Now we can solve our new agent's problem just like before, using the $\texttt{solve}$ method. - -# %% -IndShockExample.solve() -plot_funcs(IndShockExample.solution[0].cFunc, 0.0, 10.0) - -# %% [markdown] -# ## Changing Constructed Attributes -# -# In the parameter dictionary above, we chose values for HARK to use when constructing its numeric representation of $F_t$, the joint distribution of permanent and transitory income shocks. When $\texttt{IndShockExample}$ was created, those parameters ($\texttt{TranShkStd}$, etc) were used by the **constructor** or **initialization** method of $\texttt{IndShockConsumerType}$ to construct an attribute called $\texttt{IncomeDstn}$. -# -# Suppose you were interested in changing (say) the amount of permanent income risk. From the section above, you might think that you could simply change the attribute $\texttt{TranShkStd}$, solve the model again, and it would work. -# -# That's _almost_ true-- there's one extra step. $\texttt{TranShkStd}$ is a primitive input, but it's not the thing you _actually_ want to change. Changing $\texttt{TranShkStd}$ doesn't actually update the income distribution... unless you tell it to (just like changing an agent's preferences does not change the consumption function that was stored for the old set of parameters -- until you invoke the $\texttt{solve}$ method again). In the cell below, we invoke the method $\texttt{update_income_process}$ so HARK knows to reconstruct the attribute $\texttt{IncomeDstn}$. - -# %% -OtherExample = deepcopy( - IndShockExample -) # Make a copy so we can compare consumption functions -OtherExample.assign_parameters( - PermShkStd=[0.2] -) # Double permanent income risk (note that it's a one element list) -OtherExample.update_income_process() # Call the method to reconstruct the representation of F_t -OtherExample.solve() - -# %% [markdown] -# In the cell below, use your blossoming HARK skills to plot the consumption function for $\texttt{IndShockExample}$ and $\texttt{OtherExample}$ on the same figure. - -# %% -# Use the line(s) below to plot the consumptions functions against each other diff --git a/examples/HowWeSolveIndShockConsumerType/HowWeSolveIndShockConsumerType.py b/examples/HowWeSolveIndShockConsumerType/HowWeSolveIndShockConsumerType.py deleted file mode 100644 index bdc883e38..000000000 --- a/examples/HowWeSolveIndShockConsumerType/HowWeSolveIndShockConsumerType.py +++ /dev/null @@ -1,204 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # How we solve a model defined by the `IndShockConsumerType` class -# The IndShockConsumerType reprents the work-horse consumption savings model with temporary and permanent shocks to income, finite or infinite horizons, CRRA utility and more. In this DemARK we take you through the steps involved in solving one period of such a model. The inheritance chains can be a little long, so figuring out where all the parameters and methods come from can be a bit confusing. Hence this map! The intention is to make it easier to know how to inheret from IndShockConsumerType in the sense that you know where to look for specific solver logic, but also so you know can figure out which methods to overwrite or supplement in your own `AgentType` and solver! -# ## The `solveConsIndShock` function -# In HARK, a period's problem is always solved by the callable (function or callable object instance) stored in the field `solve_one_period`. In the case of `IndShockConsumerType`, this function is called `solveConsIndShock`. The function accepts a number of arguments, that it uses to construct an instance of either a `ConsIndShockSolverBasic` or a `ConsIndShockSolver`. These solvers both have the methods `prepare_to_solve` and `solve`, that we will have a closer look at in this notebook. This means, that the logic of `solveConsIndShock` is basically: -# -# 1. Check if cubic interpolation (`CubicBool`) or construction of the value function interpolant (`vFuncBool`) are requested. Construct an instance of `ConsIndShockSolverBasic` if neither are requested, else construct a `ConsIndShockSolver`. Call this `solver`. -# 1. Call `solver.prepare_to_solve()` -# 1. Call `solver.solve()` and return the output as the current solution. - -# %% [markdown] -# ### Two types of solvers -# As mentioned above, `solve_one_period` will construct an instance of the class `ConsIndShockSolverBasic`or `ConsIndShockSolver`. The main difference is whether it uses cubic interpolation or if it explicitly constructs a value function approximation. The choice and construction of a solver instance is bullet 1) from above. -# -# #### What happens in upon construction -# Neither of the two solvers have their own `__init__`. `ConsIndShockSolver` inherits from `ConsIndShockSolverBasic` that in turn inherits from `ConsIndShockSetup`. `ConsIndShockSetup` inherits from `ConsPerfForesightSolver`, which itself is just an `Object`, so we get the inheritance structure -# -# - `ConsPerfForesightSolver` $\leftarrow$ `ConsIndShockSetup` $\leftarrow$ `ConsIndShockSolverBasic` $\leftarrow$ `ConsIndShockSolver` -# -# When one of the two classes in the end of the inheritance chain is called, it will call `ConsIndShockSetup.__init__(args...)`. This takes a whole list of fixed inputs that then gets assigned to the object through a -# ``` -# ConsIndShockSetup.assign_parameters(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool) -# ``` -# call, that then calls -# ``` -# ConsPerfForesightSolver.assign_parameters(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac) -# ``` -# We're getting kind of detailed here, but it is simply to help us understand the inheritance structure. The methods are quite straight forward, and simply assign the list of variables to self. The ones that do not get assigned by the `ConsPerfForesightSolver` method gets assign by the `ConsIndShockSetup` method instead. -# -# -# After all the input parameters are set, we update the utility function definitions. Remember, that we restrict ourselves to CRRA utility functions, and these are parameterized with the scalar we call `CRRA` in HARK. We use the two-argument CRRA utility (and derivatives, inverses, etc) from `HARK.utilities`, so we need to create a `lambda` (an anonymous function) according to the fixed `CRRA` we have chosen. This gets done through a call to -# -# ``` -# ConsIndShockSetup.defUtilityFuncs() -# ``` -# that itself calls -# ``` -# ConsPerfForesightSolver.defUtilityFuncs() -# ``` -# Again, we wish to emphasize the inheritance structure. The method in `ConsPerfForesightSolver` defines the most basic utility functions (utility, its marginal and its marginal marginal), and `ConsIndShockSolver` adds additional functions (marginal of inverse, inverse of marginal, marginal of inverse of marginal, and optionally inverse if `vFuncBool` is true). -# -# To sum up, the `__init__` method lives in `ConsIndShockSetup`, calls `assign_parameters` and `defUtilityFuncs` from `ConsPerfForesightSolver` and defines its own methods with the same names that adds some methods used to solve the `IndShockConsumerType` using EGM. The main things controlled by the end-user are whether cubic interpolation should be used, `CubicBool`, and if the value function should be explicitly formed, `vFuncBool`. -# ### Prepare to solve -# We are now in bullet 2) from the list above. The `prepare_to_solve` method is all about grabbing relevant information from next period's solution, calculating some limiting solutions. It comes from `ConsIndShockSetup` and calls two methods: -# -# 1. `ConsIndShockSetup.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac)` -# 2. `ConsIndShockSetup.defBoroCnst(self.BoroCnstArt)` -# -# First, we have `setAndUpdateValues`. The main purpose is to grab the relevant vectors that represent the shock distributions, the effective discount factor, and value function (marginal, level, marginal marginal depending on the options). It also calculates some limiting marginal propensities to consume and human wealth levels. Second, we have `defBoroCnst`. As the name indicates, it calculates the natural borrowing constraint, handles artificial borrowing constraints, and defines the consumption function where the constraint binds (`cFuncNowCnst`). -# -# To sum, `prepare_to_solve` sets up the stochastic environment an borrowing constraints the consumer might face. It also grabs interpolants from "next period"'s solution. -# -# ### Solve it! -# The last method `solveConsIndShock` will call from the `solver` is `solve`. This method essentially has four steps: -# 1. Pre-processing for EGM: solver.prepare_to_calc_EndOfPrdvP -# 1. First step of EGM: solver.calc_EndOfPrdvP -# 1. Second step of EGM: solver.make_basic_solution -# 1. Add MPC and human wealth: solver.add_MPC_and_human_wealth -# -# #### Pre-processing for EGM `prepare_to_calc_EndOfPrdvP` -# Find relevant values of end-of-period asset values (according to `aXtraGrid` and natural borrowing constraint) and next period values implied by current period end-of-period assets and stochastic elements. The method stores the following in `self`: -# -# 1. values of permanent shocks in `PermShkVals_temp` -# 1. shock probabilities in `ShkPrbs_temp` -# 1. next period resources in `mNrmNext` -# 1. current grid of end-of-period assets in `aNrmNow` -# -# The method also returns `aNrmNow`. The definition is in `ConsIndShockSolverBasic` and is not overwritten in `ConsIndShockSolver`. -# -# #### First step of EGM `calc_EndOfPrdvP` -# Find the marginal value of having some level of end-of-period assets today. End-of-period assets as well as stochastics imply next-period resources at the beginning of the period, calculated above. Return the result as `EndOfPrdvP`. -# -# #### Second step of EGM `make_basic_solution` -# Apply inverse marginal utility function to nodes from about to find (m, c) pairs for the new consumption function in `get_points_for_interpolation` and create the interpolants in `use_points_for_interpolation`. The latter constructs the `ConsumerSolution` that contains the current consumption function `cFunc`, the current marginal value function `vPfunc`, and the smallest possible resource level `mNrmMinNow`. -# -# #### Add MPC and human wealth `add_MPC_and_human_wealth` -# Add values calculated in `defBoroCnst` now that we have a solution object to put them in. -# -# #### Special to the non-Basic solver -# We are now done, but in the `ConsIndShockSolver` (non-`Basic`!) solver there are a few extra steps. We add steady state m, and depending on the values of `vFuncBool` and `CubicBool` we also add the value function and the marginal marginal value function. - -# %% [markdown] -# ## Let's try it in action! -# First, we define a standard lifecycle model, solve it and then - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import ( - IndShockConsumerType, - init_lifecycle, -) -import numpy as np -import matplotlib.pyplot as plt - -LifecycleExample = IndShockConsumerType(**init_lifecycle) -LifecycleExample.cycles = ( - 1 # Make this consumer live a sequence of periods exactly once -) -LifecycleExample.solve() - -# %% [markdown] -# Let's have a look at the solution in time period second period. We should then be able to - -# %% -from HARK.utilities import plot_funcs - -plot_funcs( - [LifecycleExample.solution[0].cFunc], LifecycleExample.solution[0].mNrmMin, 10 -) - -# %% [markdown] -# Let us then create a solver for the first period. - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import ConsIndShockSolverBasic - -solver = ConsIndShockSolverBasic( - LifecycleExample.solution[1], - LifecycleExample.IncShkDstn[0], - LifecycleExample.LivPrb[0], - LifecycleExample.DiscFac, - LifecycleExample.CRRA, - LifecycleExample.Rfree, - LifecycleExample.PermGroFac[0], - LifecycleExample.BoroCnstArt, - LifecycleExample.aXtraGrid, - LifecycleExample.vFuncBool, - LifecycleExample.CubicBool, -) - -# %% -solver.prepare_to_solve() - -# %% [markdown] -# Many important values are now calculated and stored in solver, such as the effective discount factor, the smallest permanent income shock, and more. - -# %% -solver.DiscFacEff - -# %% -solver.PermShkMinNext - -# %% [markdown] -# These values were calculated in `setAndUpdateValues`. In `defBoroCnst` that was also called, several things were calculated, for example the consumption function defined by the borrowing constraint. - -# %% -plot_funcs([solver.cFuncNowCnst], solver.mNrmMinNow, 10) - -# %% [markdown] -# Then, we set up all the grids, grabs the discrete shock distributions, and state grids in `prepare_to_calc_EndOfPrdvP`. - -# %% -solver.prepare_to_calc_EndOfPrdvP() - -# %% [markdown] -# Then we calculate the marginal utility of next period's resources given the stochastic environment and current grids. - -# %% -EndOfPrdvP = solver.calc_EndOfPrdvP() - -# %% [markdown] -# Then, we essentially just have to construct the (resource, consumption) pairs by completing the EGM step, and constructing the interpolants by using the knowledge that the limiting solutions are those of the perfect foresight model. This is done with `make_basic_solution` as discussed above. - -# %% -solution = solver.make_basic_solution( - EndOfPrdvP, solver.aNrmNow, solver.make_linear_cFunc -) - -# %% [markdown] -# Lastly, we add the MPC and human wealth quantities we calculated in the method that prepared the solution of this period. - -# %% -solver.add_MPC_and_human_wealth(solution) - -# %% [markdown] -# All that is left is to verify that the solution in `solution` is identical to `LifecycleExample.solution[0]`. We can plot the against each other: - -# %% -plot_funcs( - [LifecycleExample.solution[0].cFunc, solution.cFunc], - LifecycleExample.solution[0].mNrmMin, - 10, -) - -# %% [markdown] -# Although, it's probably even clearer if we just subtract the function values from each other at some grid. - -# %% -eval_grid = np.linspace(0, 20, 200) -LifecycleExample.solution[0].cFunc(eval_grid) - solution.cFunc(eval_grid) diff --git a/examples/Interpolation/CubicInterp.py b/examples/Interpolation/CubicInterp.py deleted file mode 100644 index 9a87fca70..000000000 --- a/examples/Interpolation/CubicInterp.py +++ /dev/null @@ -1,85 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Cubic Interpolation with Scipy - -# %% pycharm={"name": "#%%\n"} -import matplotlib.pyplot as plt -import numpy as np -from scipy.interpolate import CubicHermiteSpline - -from HARK.interpolation import CubicInterp, CubicHermiteInterp - -# %% [markdown] -# ### Creating a HARK wrapper for scipy's CubicHermiteSpline -# -# The class CubicHermiteInterp in HARK.interpolation implements a HARK wrapper for scipy's CubicHermiteSpline. A HARK wrapper is needed due to the way interpolators are used in solution methods accross HARK, and in particular due to the `distance_criteria` attribute used for VFI convergence. - -# %% pycharm={"name": "#%%\n"} -x = np.linspace(0, 10, num=11, endpoint=True) -y = np.cos(-(x**2) / 9.0) -dydx = 2.0 * x / 9.0 * np.sin(-(x**2) / 9.0) - -f = CubicInterp(x, y, dydx, lower_extrap=True) -f2 = CubicHermiteSpline(x, y, dydx) -f3 = CubicHermiteInterp(x, y, dydx, lower_extrap=True) - -# %% [markdown] -# Above are 3 interpolators, which are: -# 1. **CubicInterp** from HARK.interpolation -# 2. **CubicHermiteSpline** from scipy.interpolate -# 3. **CubicHermiteInterp** hybrid newly implemented in HARK.interpolation -# -# Below we see that they behave in much the same way. - -# %% pycharm={"name": "#%%\n"} -xnew = np.linspace(0, 10, num=41, endpoint=True) - -plt.plot(x, y, "o", xnew, f(xnew), "-", xnew, f2(xnew), "--", xnew, f3(xnew), "-.") -plt.legend(["data", "hark", "scipy", "hark_new"], loc="best") -plt.show() - -# %% [markdown] -# We can also verify that **CubicHermiteInterp** works as intended when extrapolating. Scipy's **CubicHermiteSpline** behaves differently when extrapolating, as it extrapolates using the last polynomial, whereas HARK implements linear decay extrapolation, so it is not shown below. - -# %% pycharm={"name": "#%%\n"} -x_out = np.linspace(-1, 11, num=41, endpoint=True) - -plt.plot(x, y, "o", x_out, f(x_out), "-", x_out, f3(x_out), "-.") -plt.legend(["data", "hark", "hark_new"], loc="best") -plt.show() - -# %% [markdown] -# ### Timings -# -# Below we can compare timings for interpolation and extrapolation among the 3 interpolators. As expected, `scipy`'s CubicHermiteInterpolator (`f2` below) is the fastest, but it's not HARK compatible. `HARK.interpolation`'s CubicInterp (`f`) is the slowest, and `HARK.interpolation`'s new CubicHermiteInterp (`f3`) is somewhere in between. - -# %% pycharm={"name": "#%%\n"} -# %timeit f(xnew) -# %timeit f(x_out) - -# %% pycharm={"name": "#%%\n"} -# %timeit f2(xnew) -# %timeit f2(x_out) - -# %% pycharm={"name": "#%%\n"} -# %timeit f3(xnew) -# %timeit f3(x_out) - -# %% [markdown] pycharm={"name": "#%%\n"} -# Notice in particular the difference between interpolating and extrapolating for the new ** CubicHermiteInterp **.The difference comes from having to calculate the extrapolation "by hand", since `HARK` uses linear decay extrapolation, whereas for interpolation it returns `scipy`'s result directly. - -# %% diff --git a/examples/Interpolation/DecayInterp.py b/examples/Interpolation/DecayInterp.py deleted file mode 100644 index dfbc4a0dc..000000000 --- a/examples/Interpolation/DecayInterp.py +++ /dev/null @@ -1,237 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Interpolators with "decay" in HARK -# -# ## Preliminaries -# -# Suppose we have an interpolator $f(\cdot):\mathbb{R}^n\rightarrow \mathbb{R}$ that is set up over a cartesian grid. Denote the greatest points of the i-th dimension grid with $\bar{x}_i$. Suppose we have some limiting function $g(\cdot): \mathbb{R}^n\rightarrow \mathbb{R}$ and we want to construct some enhanced function $h(\cdot):\mathbb{R}^n\rightarrow \mathbb{R}$ such that: -# - $h(x) = f(x)$ when $x$ falls inside of the limits of $f$'s grids, that is $x_i \leq \bar{x}_i$ $\forall i=1,...,n$. -# - $h(x)\rightarrow g(x)$ when $x\rightarrow\infty$ in any of its dimensions. -# -# We might want to construct such functions if we are worried about the performance of our interpolator $f(\cdot)$ when approximating values that fall far-off its grid. As demonstrated in Section 5.10 of ["Solution Methods for Microeconomic Dynamic Stochastic Optimization Problems"](https://llorracc.github.io/SolvingMicroDSOPs/) this situation arises when approximating the solution of consumption-saving problems: extrapolated solutions can have characteristics that are at odds with the basic theoretical properties of our problems. Fortunately, it is often possible to analytically solve the limiting version of these problems---for instance a version of a consumption-saving problem where wealth is infinitely larger than income---and to use these analytical solutions $g(\cdot)$ to moderate the extrapolating behavior of our numerical solutions. -# -# This notebook exemplifies the use of HARK's `econforgeinterp.DecayInterp` class, which implements n-dimensional interpolators that assymptotically approach a given analytical function off their grids. - -# %% -# Setup -import numpy as np -import matplotlib.pyplot as plt -from HARK.econforgeinterp import LinearFast, DecayInterp -from HARK.interpolation import LinearInterp - -# %% [markdown] -# ## Basic use of the `DecayInterp` class -# -# `DecayInterp` requires two basic inputs: -# - `interp`, which is the interpolator $f(\cdot)$. It must be an instance of the `HARK.econforgeinterp.LinearFast` class. -# - `limit_fun`, which is the limiting function $g(\cdot)$. It must receive the same number of inputs as `interp` and be able to take `numpy` arrays as inputs. -# -# And that's it! -# -# ### An example with a 2D function -# -# Imagine an "$h(\cdot)$" type of function of two dimensions for which: -# - $f(x,y) = 2*x + y$. -# - $g(x,y) = \sqrt{x} + \sin(y)$. -# The following cell creates a `DecayInterp` object representing such function. - -# %% -# Construct the interpolator - -# Grids -x_grid = np.linspace(0, 10, 11) -y_grid = np.linspace(0, 10, 11) -x, y = np.meshgrid(x_grid, y_grid, indexing="ij") -# Function value -z = 2 * x + y -# Create interpolator -interp = LinearFast(z, [x_grid, y_grid]) - -# Limit function -lim_fun = lambda x, y: np.sqrt(x) - np.sin(y) - -# Construct the interpolator with decay extrapolation (h function) -my_decay_interp = DecayInterp( - interp=interp, - limit_fun=lim_fun, -) - -# %% [markdown] -# The following cell plots the function. - -# %% -# Create some points to evaluate the function -x_ev, y_ev = np.meshgrid( - np.linspace(5, 30, 100), - np.linspace(5, 30, 100), - indexing="ij", -) - -# Plot -fig, ax = plt.subplots(subplot_kw={"projection": "3d"}) -ax.plot_surface(x_ev, y_ev, my_decay_interp(x_ev, y_ev)) -plt.show() - -# %% [markdown] -# Notice that the cell above defined $10$ as the greatest gridpoint in both dimensions $x$ and $y$. Therrefore, our plot should look like $2*x + y$ in the square $[0,10]^2$, and drift towards $\sqrt{x} + \sin(y)$ continuously when $x > 10$ or $y > 10$. -# -# This is indeed the case, but there is a 'kink' that occurs along the $x=10$ and $y=10$ lines: the change in the behavior of the function is abrupt, and we might not want that depending on our application. If we want to avoid this sort of behavior, or control what happens in points that are outside the interpolator's grid but close to it, we have to tinker with the way in which we combine the $f(\cdot)$ and $g(\cdot)$ functions. `DecayInterp` refers to the way in which it combines the functions as its `decay_method`. -# -# The class currently has three different decay methods: -# - "`decay_prop`", the default. -# - "`decay_hark`", which matches `HARK.interpolation.LinearInterp`'s decay method in the 1D case. -# - "`paste`". -# -# We now explain each method. - -# %% [markdown] -# ## `decay_method=="decay_prop"` -# -# Imagine some $x$ that its outside of $f$'s grid in at least one dimension, that is, $\exists i\,\, x_i > \bar{x}_i$. Denote with $\tilde{x}$ the point in $\mathbb{R}^n$ that is inside the grid of $f$ and is closest to $x$; note that $\tilde{x} = \min\{x, \bar{x}\}$ with the minimum taken element-wise. -# -# When `decay_method=="decay_prop"`, `DecayInterp` calculates a standardized distance between $x$ and $\tilde{x}$ as $$d(x, \tilde{x}) = \sum_{i=1}^n \left| \frac{x_i - \tilde{x}_i}{\bar{x}_i} \right|$$ and uses it to compute the combination between the extrapolated value $f(x)$ and the limit value $g(x)$ as -# -# \begin{equation} -# h(x) = e^{-d(x,\tilde{x})} \times f(x) + (1 - e^{-d(x,\tilde{x})}) \times g(x) -# \end{equation} -# -# Notice that $f(x)$ is extrapolated using the provided interpolator. It's behavior will depend on the extrapolation options of the object you provide. - -# %% [markdown] -# ## `decay_method=="decay_hark"` -# -# The method `"decay_hark"` is an N-dimensional generalization of the method used by HARK's 1D linear interpolators (`HARK.interpolation.LinearInterp`) when a limiting function is provided. This method tries to preserve the rate at which the approximated function $f(\cdot)$ approaches the limiting function $g(\cdot)$ when the edge of the grid is reached. -# -# To explain this method intuitively, start with a one-dimensional case. Let $\bar{x}$ be the largest girdpoint and consider some $x>\bar{x}$. -# -# We want the distance between functions $h(\cdot)$ and $g(\cdot)$ to close at a constant rate as we move along $x$. Calling that constant rate $K$ for now, this requirement can be expressed as -# \begin{equation} -# \frac{d (g(x) - h(x))/d x}{g(x) - h(x)} = K. -# \end{equation} -# This is a simple differential equation with a known family of solutions of a form that we can express as -# \begin{equation} -# g(x) - h(x) = A e^{K\times(x - \bar{x})} -# \end{equation} -# for any constant A. -# -# Now we can obtain $A$ and $K$ from the restrictions we want to impose on our particular solution. First, we want $h(\bar{x})=f(\bar{x})$ so that there is no jump at the edge of the grid. This implies $$A = g(\bar{x}) - f(\bar{x}).$$ -# -# Finally, we want the rate of closing of the difference between the functions to be the same that it was exactly at the edge of the grid. That is -# \begin{equation*} -# K = \left.\frac{d (g(x) - f(x))/d x}{g(x) - f(x)}\right|_{x=\bar{x}} = \frac{g'(\bar{x}) - f'(\bar{x})}{g(\bar{x}) - f(\bar{x})} -# \end{equation*} -# -# Putting everything together, we have -# \begin{equation*} -# h(x) = g(x) - A\times e^{K\times(x - \bar{x})},\qquad A = g(\bar{x}) - f(\bar{x}) ,\qquad K = \frac{g'(\bar{x}) - f'(\bar{x})}{g(\bar{x}) - f(\bar{x})} -# \end{equation*} -# -# To ensure that $h(x)\rightarrow g(x)$ as $x\rightarrow\infty$, $K$ must be negative in the above expression, so we replace $K$ with $-|K|$. -# -# The muliti-dimensional version of this interpolator simply replaces the expression for $K$ above with -# \begin{equation*} -# K=\frac{1}{g(\bar{x}) - f(\bar{x})}\times(\nabla g(\bar{x}) - \nabla f(\bar{x})). -# \end{equation*} - -# %% [markdown] -# ## `decay_method=="paste"` -# -# The option `decay_method=="paste"` is different from the other two in the sense that it does not enforce $h(x)\rightarrow g(x)$ when $x\rightarrow\infty$. Instead, it "pastes" $g(\cdot)$ on top of $f(\cdot)$ so that, when extrapolating $h$ behaves like $g$ with a level shift that makes it coincide with $f$ at the edge of the grid. This is useful if we know that our function behaves like $g(\cdot)$ in the limit in its derivatives, but might not exactly coincide in levels. -# -# Imagine some $x$ that its outside of $f$'s grid in at least one dimension, that is, $\exists i\,\, x_i > \bar{x}_i$. Denote with $\tilde{x}$ the point in $\mathbb{R}^n$ that is inside the grid of $f$ and is closest to $x$; note that $\tilde{x} = \min\{x, \bar{x}\}$ with the minimum taken element-wise. `DecayInterp` will return -# -# $$ -# h(x) = g(x) + (f(\bar{x}) - g(\bar{x})). -# $$ - -# %% [markdown] -# ## An ilustration of the different "decay methods" - -# %% -# Compare decay methods - -x = np.linspace(0, 1, 20) -interp = LinearFast(np.sqrt(x), [x]) -limit_fun = lambda x: 1 + 0.3 * x -limit_grad = lambda x: [0.3 * np.ones_like(x)] - -dec_pr = DecayInterp( - interp, - limit_fun=limit_fun, - limit_grad=limit_grad, - extrap_method="decay_prop", -) - -dec_pa = DecayInterp( - interp, - limit_fun=limit_fun, - limit_grad=limit_grad, - extrap_method="paste", -) - -dec_ha = DecayInterp( - interp, - limit_fun=limit_fun, - limit_grad=limit_grad, - extrap_method="decay_hark", -) - -x_ev = np.linspace(0, 2, 200) - -plt.figure() -plt.plot(x_ev, dec_pr(x_ev), label="prop") -plt.plot(x_ev, dec_pa(x_ev), label="paste") -plt.plot(x_ev, dec_ha(x_ev), label="decay_hark") -plt.plot(x_ev, limit_fun(x_ev), "--", label="limit") -plt.legend() -plt.show() - -# %% [markdown] -# ## Comparing `econforge.DecayInterp` with `interpolation.LinearInterp` - -# %% -# Compare with base HARK -lim_slope = 0.3 -lim_inter = 1.0 - -# Irregular grid -x = np.linspace(0, 1, 50) -y = np.sqrt(x) - -# Hark's base implementation -base_lim_interp = LinearInterp(x, y, intercept_limit=lim_inter, slope_limit=lim_slope) - -# New implementation -efor_lim_interp = DecayInterp( - LinearFast(y, [x]), - limit_fun=lambda x: lim_inter + lim_slope * x, - limit_grad=lambda x: [lim_slope * np.ones_like(x)], - extrap_method="decay_hark", -) - -# Evaluate them -x_eval = np.linspace(0, 2, 50) -base_vals = base_lim_interp(x_eval) -efor_vals = efor_lim_interp(x_eval) - -# Plot and compare -plt.figure() -plt.plot(x_eval, base_vals, label="base") -plt.plot(x_eval, efor_vals, ".", label="efor") -plt.plot(x_eval, lim_inter + x_eval * lim_slope, "--r", label="limit") -plt.legend() -plt.show() diff --git a/examples/Journeys/AzureMachineLearning.py b/examples/Journeys/AzureMachineLearning.py deleted file mode 100644 index 8df5f21d1..000000000 --- a/examples/Journeys/AzureMachineLearning.py +++ /dev/null @@ -1,132 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: collapsed,code_folding,heading_collapsed,hidden,jupyter,pycharm -# cell_metadata_json: true -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # [Azure Machine Learning](https://docs.microsoft.com/en-us/azure/machine-learning/) - -# %% [markdown] -# ## [Quickstart: Create workspace resources you need to get started with Azure Machine Learning](https://docs.microsoft.com/en-us/azure/machine-learning/quickstart-create-resources) -# -# 1. Create an azure account -- [Github Student developer pack](https://education.github.com/pack) provides free account with $100 credit -# 2. Follow ["Create the workspace"](https://docs.microsoft.com/en-us/azure/machine-learning/quickstart-create-resources#create-the-workspace) to start a Azure Machine Learning Studio -# 3. Follow ["Create compute instance"](https://docs.microsoft.com/en-us/azure/machine-learning/quickstart-create-resources#instance) to create 2 compute instances (don't use the defaults!). Here, you can create a 1-core (Standard_DS1_v2 (1 cores, 3.5 GB RAM, 7 GB disk) and a 4-core (Standard_DS3_v2 (4 cores, 14 GB RAM, 28 GB disk) instance. - -# %% [markdown] {"pycharm": {"name": "#%% md\n"}} -# ## Jupyter notebooks on Azure -# -# 1. Download this notebook as a `.ipynb` file to your PC. -# 2. On the home of Microsoft Azure Machine Learning Studio, you will see a panel with `Notebooks`. Click on `Start now`. -# 3. Click on `Terminal` and type `pip install git+https://github.com/econ-ark/HARK.git` on both computes. You can switch computes on the top right part of the window. -# 4. Close and terminate the terminals. -# 5. Click on `+ Create` and `Upload files`. Upload this notebook. -# 6. Make sure the kernel is set to `Python 3.8 AzureML` on upper right corner next to compute. -# -# Now, you should be able to run this notebook on either compute (switch computes on top right again) and test out the differences in computational resources. When you run into an issue on your desktop computer or laptop, you can take advantage of cloud computing resources. - -# %% [markdown] -# # IndShockConsumerTypeFast Documentation -# ## Consumption-Saving model with Idiosyncratic Income Shocks - -# %% {"collapsed": false, "jupyter": {"outputs_hidden": false}, "pycharm": {"name": "#%%\n"}} -import matplotlib.pyplot as plt -import numpy as np - -# Initial imports and notebook setup, click arrow to show -from HARK.ConsumptionSaving.ConsIndShockModelFast import IndShockConsumerTypeFast -from HARK.utilities import plot_funcs_der, plot_funcs - -mystr = lambda number: "{:.4f}".format(number) - -# %% [markdown] -# The module `HARK.ConsumptionSaving.ConsIndShockModelFast` concerns consumption-saving models with idiosyncratic shocks to (non-capital) income. All of the models assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent. -# -# $\newcommand{\CRRA}{\rho}$ -# $\newcommand{\DiePrb}{\mathsf{D}}$ -# $\newcommand{\PermGroFac}{\Gamma}$ -# $\newcommand{\Rfree}{\mathsf{R}}$ -# $\newcommand{\DiscFac}{\beta}$ - -# %% [markdown] -# ## Statement of idiosyncratic income shocks model -# -# Suppose we want to solve a model like the one analyzed in [BufferStockTheory](https://www.econ2.jhu.edu/people/ccarroll/papers/BufferStockTheory/), which has all the same features as the perfect foresight consumer, plus idiosyncratic shocks to income each period. Agents with this kind of model are represented by the class `IndShockConsumerTypeFast`. -# -# Specifically, this type of consumer receives two income shocks at the beginning of each period: a completely transitory shock $\newcommand{\tShkEmp}{\theta}{\tShkEmp_t}$ and a completely permanent shock $\newcommand{\pShk}{\psi}{\pShk_t}$. Moreover, the agent is subject to borrowing a borrowing limit: the ratio of end-of-period assets $A_t$ to permanent income $P_t$ must be greater than $\underline{a}$. As with the perfect foresight problem, this model is stated in terms of *normalized* variables, dividing all real variables by $P_t$: -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t} {~} u(c_t) + \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ (\PermGroFac_{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(m_{t+1}) \right], \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \text{$\underline{a}$}, \\ -# m_{t+1} &=& \Rfree/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\ -# (\psi_{t+1},\theta_{t+1}) &\sim& F_{t+1}, \\ -# \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1, \\ -# u(c) &=& \frac{c^{1-\rho}}{1-\rho}. -# \end{eqnarray*} - -# %% [markdown] -# ## Solving and examining the solution of the idiosyncratic income shocks model -# -# The cell below creates an infinite horizon instance of `IndShockConsumerTypeFast` and solves its model by calling its `solve` method. - -# %% {"collapsed": false, "jupyter": {"outputs_hidden": false}, "pycharm": {"name": "#%%\n"}} -IndShockExample = IndShockConsumerTypeFast() -IndShockExample.cycles = 0 # Make this type have an infinite horizon -# %time IndShockExample.solve() - -# %% [markdown] -# Because numba just-in-time compiles, we can see the effect of calling the solve method again on run-time. - -# %% {"collapsed": false, "jupyter": {"outputs_hidden": false}, "pycharm": {"name": "#%%\n"}} -# %time IndShockExample.solve() - -# %% [markdown] -# After solving the model, we can examine an element of this type's $\texttt{solution}$: - -# %% {"collapsed": false, "jupyter": {"outputs_hidden": false}, "pycharm": {"name": "#%%\n"}} -print(vars(IndShockExample.solution[0])) - -# %% [markdown] -# The single-period solution to an idiosyncratic shocks consumer's problem has all of the same attributes as in the perfect foresight model, with a couple additions. The solution can include the marginal marginal value of market resources function $\texttt{vPPfunc}$, but this is only constructed if $\texttt{CubicBool}$ is `True`, so that the MPC can be accurately computed; when it is `False`, then $\texttt{vPPfunc}$ merely returns `NaN` everywhere. -# -# The `solveConsIndShock` function calculates steady state market resources and stores it in the attribute $\texttt{mNrmSS}$. This represents the steady state level of $m_t$ if *this period* were to occur indefinitely, but with income shocks turned off. This is relevant in a "one period infinite horizon" model like we've specified here, but is less useful in a lifecycle model. -# -# Let's take a look at the consumption function by plotting it, along with its derivative (the MPC): - -# %% {"collapsed": false, "jupyter": {"outputs_hidden": false}, "pycharm": {"name": "#%%\n"}} -print("Consumption function for an idiosyncratic shocks consumer type:") -plot_funcs(IndShockExample.solution[0].cFunc, IndShockExample.solution[0].mNrmMin, 5) -print("Marginal propensity to consume for an idiosyncratic shocks consumer type:") -plot_funcs_der( - IndShockExample.solution[0].cFunc, IndShockExample.solution[0].mNrmMin, 5 -) - -# %% [markdown] -# The lower part of the consumption function is linear with a slope of 1, representing the *constrained* part of the consumption function where the consumer *would like* to consume more by borrowing-- his marginal utility of consumption exceeds the marginal value of assets-- but he is prevented from doing so by the artificial borrowing constraint. -# -# The MPC is a step function, as the $\texttt{cFunc}$ itself is a piecewise linear function; note the large jump in the MPC where the borrowing constraint begins to bind. -# -# If you want to look at the interpolation nodes for the consumption function, these can be found by "digging into" attributes of $\texttt{cFunc}$: - -# %% [markdown] -# The consumption function in this model is an instance of `LowerEnvelope1D`, a class that takes an arbitrary number of 1D interpolants as arguments to its initialization method. When called, a `LowerEnvelope1D` evaluates each of its component functions and returns the lowest value. Here, the two component functions are the *unconstrained* consumption function-- how the agent would consume if the artificial borrowing constraint did not exist for *just this period*-- and the *borrowing constrained* consumption function-- how much he would consume if the artificial borrowing constraint is binding. -# -# The *actual* consumption function is the lower of these two functions, pointwise. We can see this by plotting the component functions on the same figure: - -# %% {"collapsed": false, "jupyter": {"outputs_hidden": false}, "pycharm": {"name": "#%%\n"}} -plot_funcs(IndShockExample.solution[0].cFunc.functions, -0.25, 5.0) - -# %% {"collapsed": false, "jupyter": {"outputs_hidden": false}, "pycharm": {"name": "#%%\n"}} diff --git a/examples/Journeys/Journey-Engineering-Background.py b/examples/Journeys/Journey-Engineering-Background.py deleted file mode 100644 index d168f5874..000000000 --- a/examples/Journeys/Journey-Engineering-Background.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.9.16 -# latex_envs: -# LaTeX_envs_menu_present: true -# autoclose: false -# autocomplete: true -# bibliofile: biblio.bib -# cite_by: apalike -# current_citInitial: 1 -# eqLabelWithNumbers: true -# eqNumInitial: 1 -# hotkeys: -# equation: Ctrl-E -# itemize: Ctrl-I -# labels_anchors: false -# latex_user_defs: false -# report_style_numbering: false -# user_envs_cfg: false -# toc: -# base_numbering: 1 -# nav_menu: {} -# number_sections: true -# sideBar: true -# skip_h1_title: false -# title_cell: Table of Contents -# title_sidebar: Contents -# toc_cell: false -# toc_position: {} -# toc_section_display: true -# toc_window_display: false -# widgets: -# application/vnd.jupyter.widget-state+json: -# state: {} -# version_major: 2 -# version_minor: 0 -# --- - -# %% [markdown] -# # Journey: Engineering background -# -# This is a second of the possible journeys into HARK - the Python package designed to solve economic models with the heterogeneous agents. As it is a "journey", it is not a one big tutorial, but a set of the links to the notebooks/other resources which will help you understand the different HARK objects and functionalities. -# -# This notebook was designed for users with some skill in programming. It does not require any knowledge of the economic theory - we are going to give you a very basic examples (which cannot substitute the macroeconomic textbooks if you want to learn theory more systematically, here we give you same examples: ) -# - -# %% [markdown] -# ## Microeconomic agent-type problems -# -# In the economic analysis, one of the most used problems are "consumers problems" which are designed to model the consumer choices. In this class of problems consumers need to find the optimal set of goods given her resources. In the basic formulation of the problem there is only one good but can be consumed in different time periods (let us denote it by $C_t$, where $0\leq t\leq T\leq \infty$ denotes periods). Consumer receives some resources $w_t$ (think wages), which she can invest with interest rate $R$ (denote the investments by $A_t$) or consume. The utility of the consumption in the period $t=0$ is given by function $U()$, for the next periods it is given by the function $\beta^t U ()$ as consumer prefer consumption now then in the future, $\beta<1$. The consumption problem can be then formalize by the maximization problem: -# -# \begin{eqnarray*} -# \max_{C_t}& \sum_{t=0}^T \beta^t U(C_t) -# \end{eqnarray*} -# -# With the condition in each t: -# \begin{eqnarray*} -# C_t + A_{t+1} = w_t+RA_t -# \end{eqnarray*} -# -# U is typically assumed to be a constant risk aversion function: -# $$ -# U(C)=\frac{C^{1-\rho}}{1-\rho} -# $$ -# -# As you see, the following problem enables to analyze the consumer's saving decisions. Obviously, the complexity of the problem depends on the $w_t$, if it is deterministic it is much easier to track down than in case if $w_t$ is given by a stochastic process. -# -# |Number | Tutorial | Description| -# | :---- | :---- | :---- | -# |1 |[Gentle_intro_I](https://github.com/econ-ark/DemARK/blob/master/notebooks/Gentle-Intro-To-HARK-PerfForesightCRRA.ipynb) |Here is your first tutorial in HARK, to solve the case when $w_t$ is deterministic| -# |2 |[Gentle_intro_II](https://github.com/econ-ark/DemARK/blob/master/notebooks/Gentle-Intro-To-HARK-Buffer-Stock-Model.ipynb) |The notebook concerning the case when $w_t$ follows the idiosyncratic AR process| -# |3|[Chinese-Growth](https://github.com/econ-ark/DemARK/blob/master/notebooks/Chinese-Growth.ipynb)|The third notebook concern the case when $w_t$| diff --git a/examples/Journeys/Journey-PhD.py b/examples/Journeys/Journey-PhD.py deleted file mode 100644 index a3cbc3a82..000000000 --- a/examples/Journeys/Journey-PhD.py +++ /dev/null @@ -1,389 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: ExecuteTime,collapsed,-autoscroll -# formats: ipynb,py:percent -# notebook_metadata_filter: all,-widgets,-varInspector -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.9.16 -# latex_envs: -# LaTeX_envs_menu_present: true -# autoclose: false -# autocomplete: true -# bibliofile: biblio.bib -# cite_by: apalike -# current_citInitial: 1 -# eqLabelWithNumbers: true -# eqNumInitial: 1 -# hotkeys: -# equation: Ctrl-E -# itemize: Ctrl-I -# labels_anchors: false -# latex_user_defs: false -# report_style_numbering: false -# user_envs_cfg: false -# toc: -# base_numbering: 1 -# nav_menu: {} -# number_sections: true -# sideBar: true -# skip_h1_title: false -# title_cell: Table of Contents -# title_sidebar: Contents -# toc_cell: false -# toc_position: {} -# toc_section_display: true -# toc_window_display: false -# --- - -# %% [markdown] -# Journey: Economics PhD Student -# ==== -# - -# %% [markdown] -# ## 1 Introduction -# -# This notebook is designed as an introduction to someone with the training of a 1st year Economics student, but perhaps without much background in computer programming or scientific computation. As it is a "journey," it is not one big tutorial, but a set of links to notebooks and other resources which will help you understand the different HARK objects and functionalities. -# -# This journey does not require any special skill in programming. However, we recommend you take a few introductory tutorials in Python and object-oriented programming (OOP) to make you familiar with the basic concepts. Moreover, we assume some knowledge in economic theory. -# -# As you have found this journey, you probably have a concept of what a heterogeneous agent model is, but here is a short recap. Think about a basic, infinitely lived consumer problem as you know from first-year graduate courses (letting alone the companies and general equilibrium for now). Using the Bellman equation, we can write it as: -# -# \begin{eqnarray*} -# V(M_t) &=& \max_{C_t} U(C_t) + \beta V(M_{t+1}), \\ -# & s.t. & \\ -# A_t &=& M_t - C_t, \\ -# M_{t+1} &=& R (M_{t}-C_{t}) + Y_t, \\ -# \end{eqnarray*} -# -# -# Where $\beta <1$ is a discount factor, $C_t$ is consumption, $A_t$ - assets, $Y_t$ - income and $U(C)$ is a standard CRRA utility function: -# -# $$ -# U(C)=\frac{C^{1-\rho}}{1-\rho}. -# $$ -# -# Now assume that every consumer faces some uncertainty on her income which is subject to idiosyncratic shocks - the realizations of each shock is (potentially) different for each agent. In this setting, it follows an AR (1) process, so that the current value of $Y$ is a state variable that predicts future values of $Y$. -# -# Then, the Bellman equation looks like: -# -# \begin{eqnarray*} -# V(M_t, Y_t) &=& \max_{C_t} U(C_t) + E[\beta V(M_{t+1}, Y_{t+1})], \\ -# & s.t. & \\ -# A_t &=& M_t - C_t, \\ -# M_{t+1} &=& R (M_{t}-C_{t}) + Y_t, \\ -# \end{eqnarray*} -# -# Finding a distribution of agent assets (consumption, savings) must involve much more advanced numerical tools than in the representative agent setting. This is more demanding task to accomplish and master. Moreover, the knowledge about involved numerical methods is less systematic, and often hard to find. To quote the HARK [Documentation](https://docs.econ-ark.org/overview/introduction.html): -# -# *"After months of effort, you may have had the character-improving experience of -# proudly explaining to your adviser that not only had you grafted two ideas -# together, you also found a trick that speeded the solution by an order of -# magnitude, only to be told that your breathtaking insight had been understood -# for many years, as reflected in an appendix to a 2008 paper; or, worse, your -# discovery was something that “everybody knows” but did not exist at all in -# published form!"* -# -# HARK was designed to help you avoid similar experiences. We see two main uses of this package and its tools: -# -# - To simulate the standard heterogeneous agent models without learning all the numerical methods -# - To solve your own models building-on the already implemented algorithms -# -# This journey will help you mostly with using HARK in the first way. We do not elaborate here the numerical methods; however, in the last sections you can find some guidance on which methods were used and how the source code is structured. -# -# Although using the prepared package is easier than writing your own solution (what you will need to do sooner or later if you create an original heterogeneous agent model), there is much effort in comprehending the main classes and functionalities of HARK. We hope that this journey will make it easier! We believe that it also will be your first step into the world of the heterogeneous agents modeling. -# -# --- -# NOTE -# *** -# We will be very happy to see your feedback. If you have any questions regarding this tutorial or HARK as a whole please see our [Github page](https://github.com/econ-ark/HARK). -# -# --- - -# %% [markdown] -# ## 2 Before you start -# -# As we have mentioned before, this journey does not require any special skill in programming. However, some knowledge about Python and object-oriented programing (OOP) is needed. We propose two possible ways to gather the basic concepts; however, plenty of others resources are available: -# -# - Quick introduction to Python and OOP: chapters five to seven from [Quantecon](https://python-programming.quantecon.org/intro.html) should familiarize you with everything what you need for the first tutorials. -# - A little longer introduction (if you want to learn something about used numerical methods): -# - Start with the basic Python [tutorial](https://docs.python.org/3/tutorial) -# - Get some knowledge about [Numpy](https://numpy.org/doc/stable/user/quickstart.html) -# - You can also learn Python by learning Machine learning, as there are many tutorials constructed in that way (one example is [scikit-learn tutorials](https://scikit-learn.org/stable/tutorial/index.html)). - -# %% [markdown] -# ## 3 Few words about HARK structure -# -# HARK was written using OOP (we hope that you skimmed the tutorials and have some understanding of this). This means that different parts of the model, like different types of consumers, firms, and general equilibrium conditions (if you have these components in the model), are implemented as different *objects*. Such structure enables you to build your own models with different consumer-type distributions / company structure (if you want some). Importantly, learning the package with such structure implies learning the different types of objects (classes). -# -# In HARK there are two main classes: `AgentType` (think consumers, microeconomic models) and `Market` (think general equilibrium, macroeconomic models). As AgentType objects are the attributes of the Market, we first present this type (additionally, if you are interested only in microeconomic research, you may not want to study the Market class). -# -# In practice, it will take more than two classes to accommodate the variety of models constructed using the toolkit. Thus, each class will have subclasses and those their own subclasses. In general, a more sophisticated class will be defined as a subclass. This journey will reflect this structure, first by presenting the most primitive models, and then the more fancy ones. -# -# --- -# NOTE -# *** -# In OOP, objects are organized in **classes** (the general structure of the objects) and more specific **subclasses**. The subclass inherits the methods and attributes from the its parent class. Thus, everything which you can do with the object from a general class can be done with the object from its subclass. In case of the economic models, the basic one are always the parent classes of the more sophisticated ones. -# -# --- -# - -# %% [markdown] -# ## 4 Agent-type class -# Agent-type class enables you to build microeconomic models (such as the one presented in the introduction). It is also the essential part of the macroeconomic model in HARK. So remember: *to use HARK, you always need to use agent-type classes!* -# -# ### 4.1 Introductory example -# As an example, let's solve the stochastic model from the introduction. Assume the income process of the agent $i$ in the period t, $Y_{i,t}$, is given by: -# -# \begin{eqnarray*} -# Y_{i,t} &=& \varepsilon_t(\theta_{i,t} p_{i,t}) \\ -# p_{i,t+1} &=& p_{i,t}\psi_{i,t+1}\\ -# \psi_{i,t} & \sim & N(1,\sigma_{\varrho})\\ -# \theta_{i,t} & \sim & N(1,\sigma_{\theta})\\ -# \end{eqnarray*} -# -# To get a universal solution of this problem, we need to find a policy function (in this case consumption function). This can be done easily using the HARK `solve` function. -# -# Before doing this, we need to declare our model (we assume standard parametrization: R= 1.03, $\rho = 2$, $\beta = 0.96$, $P(\varepsilon=0)= 0.005$, $P(\varepsilon=1)= 0.995$, $\sigma_{\psi}= \sigma_{\theta}=0.1)$: -# -# [comment]: <> (Is this the correct description of the income process? The confusion comes from not knowing the names of a few parameters "epsilon", "P v.s. p"? Does this match the income process defined in the cstw paper?) -# - -# %% -import sys # set path of the notebook -import os - -sys.path.insert(0, os.path.abspath("../../.")) -from HARK.ConsumptionSaving.ConsIndShockModel import * # import the module for the idiosyncratic shocks - -# we previously defined the paramters to not bother you about it now -import JourneyPhDparam as Params # imported paramters -from HARK.utilities import plot_funcs # useful function - -Example = IndShockConsumerType() - -# %% [markdown] -# Next we can solve the model and plot the consumption function: - -# %% -Example.solve() -min_v = Example.solution[ - 0 -].mNrmMin # minimal value for which the consumption function is defined -max_v = 20 -print("Consumption function") -plot_funcs([Example.solution[0].cFunc], min_v, max_v) - -# %% [markdown] -# ### 4.2 The Agent-Type structure -# To understand the microeconomic models in HARK, you need to have some concept of the Agent-type class structure. As it was mentioned, in HARK more advanced models are subclasses of the more primitive ones. The following diagram illustrates this structure: the deterministic class `PerfForesightConsumerType` is the parent for the class of the consumers with idiosyncratic income shocks `IndShockConsumerType`. Subsequently, there is a class defined with both idiosyncratic and aggregate income shocks `𝙼𝚊𝚛𝚔𝚘𝚟ConsumerType`. -# -# ![HARK structure](HARK-struct-2.png) -# -# However, it doesn't end there! There are subclasses of the `AggShockConsumerType` which are designed to be integrated with macroeconomic models (we will discuss them in the section devoted to the Market class), as well as there are many other subclasses (which we will mention in the supplementary section). - -# %% [markdown] -# ### 4.3 Main tutorials -# -# To reflect the agent-type structure, we propose you start with the Quickstart notebook (it is devoted to the deterministic case). Then proceed to the idiosyncratic consumers and then to consumers with aggregate and idiosyncratic shocks. The exact order of the suggested tutorials is given in the table. -# -# -# |Number | Tutorial | Description| -# | :---- | :---- | :---- | -# |1 |[Quickstart](https://github.com/econ-ark/HARK/blob/master/examples/Journeys/Quickstart_tutorial/Quick_start_with_solution.ipynb) |This tutorial familiarize you with the basic HARK objects and functionalities.
You will learn how to create, solve, plot and simulate the deterministic
microeconomic models ($\texttt{PerfForesightConsumerType}$ class).| -# |2 |[Idiosyncratic consumers](https://github.com/econ-ark/HARK/blob/master/examples/ConsIndShockModel/IndShockConsumerType.ipynb) |In this tutorial you will learn how to deal
with the microeconomic models with agents with idiosyncratic shocks:
individual productivity shocks ($\texttt{IndShockConsumerType}$ class). It builds on the Quickstart. | -# |3|[Nondurables during great recession](https://github.com/econ-ark/DemARK/blob/master/notebooks/Nondurables-During-Great-Recession.ipynb)| Use you knowledge about HARK to conduct a few economic experiments!
You will examine the effects of the uncertinity increase on the heterogenous
agents with idiosyncratic income risk.| -# |4|[Chinese-Growth](https://github.com/econ-ark/DemARK/blob/master/notebooks/Chinese-Growth.ipynb)|Learn how to dealt with models with idiosyncratic
and aggregate risk ($\texttt{𝙼𝚊𝚛𝚔𝚘𝚟ConsumerType}$ class).
Next build advanced simulation with many agent types.| -# - -# %% [markdown] -# ### 4.4 Supplementary tutorials -# -# The aforementioned four tutorials are the most essential ones. However, in HARK there are a few other classes with a similar structure as three basic ones (with some minor differences). Here is a list of the notebooks which familiarize you with them (if you so wish, as it is not required to understand the next topics). -# -# |Number | Tutorial | Description| -# | :---- | :---- | :---- | -# |1* |[Kinked consumer](https://github.com/econ-ark/HARK/blob/master/examples/ConsIndShockModel/KinkedRconsumerType.ipynb) | $\texttt{KinkedRconsumerType}$ is a subclass of $\texttt{IndShockConsumerType}$.
In enables to set different borrowing and lending interest rate. | -# |2* |[Buffer-stock consumer](https://github.com/econ-ark/DemARK/blob/master/notebooks/Gentle-Intro-To-HARK-Buffer-Stock-Model.ipynb) | In the Buffer Stock model, the unemployment state (zero income stat) is irreversible.
This framework is implemented by $\texttt{TractableConsumerType}$ class.
For the analytical properties of buffer stock model check this [lecture notes](https://www.econ2.jhu.edu/people/ccarroll/public/LectureNotes/Consumption/TractableBufferStock/).| -# |3*|[Generalized income process](https://github.com/econ-ark/HARK/blob/master/examples/GenIncProcessModel/GenIncProcessModel.ipynb)| In $\texttt{IndShockConsumerType}$ class, the idiosyncratic income shocks
were assumed to be or purely permanent or purely transitory. In the similar class
$\texttt{PersistentShockConsumerType}$ the income shocks follows AR(1) process with parameter <1,
thus there are not full permanent nor transitory
(it was called generalized income process).| -# -# - -# %% [markdown] -# ## 5 Market class -# -# In macroeconomic models, the consumers are only one possible type of agent. In such models, the economy contains also firms and a government (or other types of agents). In HARK, several standard macro models were implemented using the **Market** class and its subclasses. -# -# - -# %% [markdown] -# ### 5.1 Introductory example -# -# Let's extend our model from the previous section. Assume the perfect competition and Cobb-Douglas production function: -# -# \begin{eqnarray*} -# y_t = k_t^{\alpha} n_t^{1-\alpha} -# \end{eqnarray*} -# Thus, the producers' problem is: -# \begin{eqnarray*} -# \max_{k_t, n_t} &\: k_t^{\alpha} n_t^{1-\alpha} - (R_t +\delta)k_t-w_t n_t -# \end{eqnarray*} -# -# Where $k_t$ is capital, $n_t$ is labour, $\delta$ is a depreciation rate. -# -# In this case, consumers' incomes are determined by the wage: -# -# [comment]: <> (Should there be an equation here? Or is this information apparent from the bellman equation?) -# -# \begin{eqnarray*} -# V(M_{i,t}, Y_{i,t}) &=& \max_{C_{i,t}, M_{i,t+1}} U(C_{i,t}) + E[\beta V(M_{i,t+1}, Y_{i,t+1})], \\ -# & s.t. & \\ -# A_{i,t} &=& M_{i,t} - C_{i,t}, \\ -# M_{i,t+1} &=& R_{t+1} (M_{i,t}-C_{i,t}) + w_{t+1} Y_{i,t+1}, \\ -# \end{eqnarray*} -# -# Additionally, assume that the distribution of the consumers over capital is given by the measure $\Gamma_t$. To close the economy, there are the market clearing conditions: -# \begin{eqnarray*} -# n_t &= \int Y{_i,t} d \Gamma_t \\ -# k_{t+1} &= \int A_{i,t}^i d \Gamma_t \\ -# k_{t+1}+ \int C_{i,t} d\Gamma_t &= y_t+(1-\delta)k_t -# \end{eqnarray*} -# -# In HARK, you can solve this basic case by using the `CobbDouglasEconomy` class. However, to add the consumers to the economy you need the `AggShockConsumerType` class, which is a subclass of `IndShockConsumerType` Let's declare the economy (assuming depreciation rate $\delta = 0.025$): -# - -# %% -from HARK.ConsumptionSaving.ConsAggShockModel import * # module with the economy classes - -AggShockExample = AggShockConsumerType( - **Params.init_agg_shocks -) # declare the consumer, using the previously prepared parameters - -# Make a Cobb-Douglas economy for the agents -EconomyExample = CobbDouglasEconomy( - agents=[AggShockExample], **Params.init_cobb_douglas -) - -# %% [markdown] -# Now, you can solve the economy and plot the aggregate savings function: - -# %% -EconomyExample.make_AggShkHist() # Simulate a history of aggregate shocks - -# Have the consumers inherit relevant objects from the economy -AggShockExample.get_economy_data(EconomyExample) - -AggShockExample.solve() # solve the model - -print( - "capital-level steady state: ", EconomyExample.kSS -) # print the capital-level steady stae - -plot_funcs( - AggShockExample.AFunc, 0.1, 2 * EconomyExample.kSS -) # plot the aggregate savings function - -# %% [markdown] -# ### 5.2 Market class structure -# -# As in the case of the agent-type, the more complicated macroeconomic models are the subclasses of more primitive ones. The subclasses of Market include `CobbDouglasEconomy` and `SmallOpenEconomy`. The main difference between them is that for `CobbDouglasEconomy`, the capital and labour prices are endogenous, while in the (small) open economy class both are set exogenously. -# -# Nevertheless, both basic classes enable the aggregate fluctuation in the economy, that is: -# -# \begin{eqnarray*} -# Y_{i,t} &=& \varepsilon_t(\epsilon_{i,t}p_{i,t}\Theta_t P_t )\\ -# P_{t+1} &=& P_{t}\Psi_{t+1}\\ -# \Psi_{t} &\sim & {N}(1,\sigma_{\Psi})\\ -# \Theta_t &\sim &{N}(1,\sigma_{\Theta})\\ -# \end{eqnarray*} -# -# The consumers, which are attributes of such market classes, need to include the aggregate fluctuations of the whole economy in their optimization problem. This is the reason why the `AggShockConsumerType` class (and their subclasses) must be used to construct the macro-model. -# -# The subclass of `CobbDouglasEconomy` is `CobbDouglasMarkovEconomy`. In this setting, there exists an additional aggregate fluctuation in the economy (the distribution of which is given by the finite Markov matrix). -# -# -# ![HARK_struct_2](HARK-struct-4.png) -# -# -# - -# %% [markdown] -# ### 5.3 Tutorial -# -# To learn the functionalities of the market-type classes in HARK we suggest studying the following notebook devoted to [Krussel-Smith economy](https://github.com/econ-ark/REMARK/blob/master/REMARKs/KrusellSmith.md). In this notebook, the classical [Krussell-Smith model](https://www.journals.uchicago.edu/doi/abs/10.1086/250034?journalCode=jpe) is implemented (with some extensions) using the `CobbDouglasMarkovEconomy` class. -# -# Before that, you may want to check the main function from [ConsAggShockModel module](https://github.com/econ-ark/HARK/blob/master/examples/ConsumptionSaving/example_ConsAggShockModel.ipynb) or its [source code](https://github.com/econ-ark/HARK/blob/master//HARK/ConsumptionSaving/ConsAggShockModel.py) to see the basic steps to create the market type objects. -# -# - -# %% [markdown] -# #### 5.3.1 If you want to learn (a little) how the Market class works -# -# The Market class was designed to be a general framework for many different macro models. It involves a procedure of aggregating the agents' choices: eg. aggregating consumption and savings (`reap_vars` in the code) and then transforming the aggregated variables (`mill_rule` in the code). -# -# If you would like to get better knowledge about this structure, first take a look at the [Hark documentation](https://docs.econ-ark.org/overview/ARKitecture.html). Next, to understand how the HARK Market class works in less standard setting, look at the [Fashion victim model](../notebooks/Fashion-Victim-Model.ipynb). -# - -# %% [markdown] -# ## 6 If you need to study a source code -# -# In the previous sections we saw an example of how to solve different models using HARK. However, we know that you may also need to work with the source code for a few reasons (e.g. to learn used numerical methods, write your own code). -# -# Working directly with code (even if well-written) is a much more complicated tasks than just working with finished functions, and no tutorial will let you go through this painlessly. However, we hope that this partelaborating on the HARK structure and numerical methods will help you with this task. -# -# ### 6.1 A few more words on HARK structure -# -# When you look at the [HARK](https://github.com/econ-ark/HARK) sources, you will find the subdirectory called HARK. Next there is a script called "core. py". Surprisingly, you will not find this code in many of the subclasses which you learned during this journey! -# -# The reason for this is that HARK.core.py is a core of the package: a framework for all models which can be coded in HARK. It contains the general framework of the agent-type classes (AgentType class) and for the market. The exact structure of modules in the HARK core you can find in the [Hark documentation](https://docs.econ-ark.org/overview/ARKitecture.html#general-purpose-tools). Here, you can also find the general structure of the [AgentType](https://docs.econ-ark.org/overview/ARKitecture.html#agenttype-class) and [Market classes](https://docs.econ-ark.org/overview/ARKitecture.html#market-class). -# -# Where are the subclasses which you'v learned during the journey? In HARK, the subclasses are located in the separate directories. For the AgentType subclasses, you need to look at HARK.ConsumptionSaving directory. For example, `PerfForesightConsumerType` and `IndShockConsumerType` can be found in ConsIndShockModel.py. Nevertheless, if you want to understand any of the HARK modules, you must first understand `HARK.core`. -# -# -# ### 6.2 HARK solution -# -# For the consumer problems, solutions of the one-period consumer's problem are found using the attribute function `solve_one_period`. The inputs passed to this function also include data from the subsequent periods. Before solve_one_period is called, the function pre_solve() is applied, which prepare the solution (eg. transmit the solution of the sub-sequent period as an input). -# -# The structure of the functions which are used as solve_one_period reflects the agent-type class structures. Thus, when you will study the source code, you will first read the solve classes. -# -# ![Hark_struct3](HARK-struct-3.png) -# -# -# #### 6.2.1 Solution method for agent problem -# However, knowing the structure of the code may not be very beneficial if you do not know the solution method! While for the perfect foresight consumer has an analytic solution, the policy functions for the stochastic consumer (thus with the idiosyncratic or the aggregate shocks) are solved by the **endogenous grid method**. -# -# The method of endogenous gridpoints is now widely used in macroeconomic simulations. There are a few resources to learn it; here, we suggest Professor Carroll's [lecture notes](https://www.econ2.jhu.edu/people/ccarroll/SolvingMicroDSOPs/). If you prefer a very quick version, we suggest appendix to the Kruger and Kindermann [paper](https://www.nber.org/papers/w20601.pdf) (they develop a slightly bigger model with a different notation, but the idea is the same). -# -# #### 6.2.2 Finding general equilibrium -# In general, the rational expectations general equilibrium is found by updating the agents' expectations and the aggregate choices up to the point at which the actual aggregated variables (like interest rate or capital) are equal to the expected ones. However, one may need to refer to the papers cited in the notebooks to understand the exact methods used. -# -# -# ### 6.3 How to study HARK codes -# -# We hope that this section gave you some idea how the HARK library works. However, HARK contains much more than is discussed here. Here is some more guidance on how to continue your journey: -# -# - Before you start make sure that you understand the endogenous grid method, as well as the general framework structure for AgentType and Market from [HARK documentation](https://docs.econ-ark.org/overview/ARKitecture.html#agenttype-class). -# - When working through HARK.core, make sure that you see the connection between the structure in the documentation and the code (check autodoc from the [HARK documentation](https://docs.econ-ark.org/reference/tools/core.html) webpage). -# - Proceed to the ConsumptionSaving/ConsIndShockModel.py and compare the tutorials with the source code. -# - Proceed to the ConsumptionSaving/ConsAggShockModel.py and compare the tutorial on the Market class with the source code, check [autodoc](https://docs.econ-ark.org/reference/ConsumptionSaving/ConsAggShockModel.html). -# -# So in general, when you want to learn any of the modules in the HARK toolkit, first check autodoc from the [HARK documentation](https://docs.econ-ark.org/reference/index.html) webpage. -# diff --git a/examples/Journeys/Journey-Policymaker.py b/examples/Journeys/Journey-Policymaker.py deleted file mode 100644 index d01f1c5c0..000000000 --- a/examples/Journeys/Journey-Policymaker.py +++ /dev/null @@ -1,281 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Journey: HARK for Policymaker -# HARK is a powerful toolbox to solve heterogeneous agent models in discrete time. Users can run off-the shelf models or use tools to build their own agent type. While there are numerous notebooks which introduce and describe different agents and tools it might be overwhelming at first. -# -# This guide tries to introduce HARK and point out the most important resources such as notebooks, model types, and tools such that users can get quickly up to speed to analyze macroeconomic shocks. The outline for today covers: -# -# 1. Introduction into HARK -# 2. HARK meets SSJ -# 3. Benefits of using HARK? -# -# Author: Adrian Monninger - -# %% [markdown] -# ## 1. Introduction into HARK -# -# Heterogenous Agents Resources & toolKit (HARK) is a toolkit for the structural modeling of economic choices of optimizing and non-optimizing heterogeneous agents. -# -# The open-source project [Econ-ark](https://github.com/econ-ark) contains the three main repositories [HARK](https://github.com/econ-ark/HARK), [DemARK](https://github.com/econ-ark/DemARK), and [RemARK](https://github.com/econ-ark/RemARK). On top of that, there is a [website](https://econ-ark.org/) and an [online documentation](https://docs.econ-ark.org/) with useful descriptions and references to specific notebooks. -# -# - HARK: Includes the source code as well as some example notebooks of how to use AgentTypes, tools, and MarketClasses -# - DemARK: Demonstrations of economic models using HARK. -# - [Fisher Two Period](https://github.com/econ-ark/DemARK/blob/master/notebooks/FisherTwoPeriod.ipynb) -# - [Diamond OLG](https://github.com/econ-ark/DemARK/blob/master/notebooks/DiamondOLG.ipynb) -# - [Lucas Asset Pricing](https://github.com/econ-ark/DemARK/blob/master/notebooks/Lucas-Asset-Pricing-Model.ipynb) -# - RemARK: R[eplications/eproductions] and Explorations Made using ARK. -# - [Carroll, Slacalek, Tokuoka, White (2017): The distribution of wealth and the marginal propensity to consume](https://github.com/econ-ark/REMARK/blob/master/REMARKs/cstwMPC.md) -# - [Cocco, Gomes, Maenhout (2005): Consumption and portfolio choice over the life cycle](https://github.com/econ-ark/REMARK/blob/master/REMARKs/CGMPortfolio.md) -# - [Krusell, Smith (1998): Income and wealth heterogeneity in the macroeconomy](https://github.com/econ-ark/REMARK/blob/master/REMARKs/KrusellSmith.md) -# - ... - -# %% [markdown] -# ## 1.1 Structure -# HARK has two types of classes. One for the micro level called: `AgentType` and one for the macro level called: `Market`. Today, we will focus on the AgentType and use the sequence space toolbox for the macro level. - -# %% [markdown] -# To understand the microeconomic models in HARK, you need to have some concept of the Agent-type class structure. In HARK more advanced models are subclasses of the more primitive ones. The diagram, illustrates this structure: the deterministic class `PerfForesightConsumerType`, is then a parent for the class of the consumers with idiosyncratic income shocks `IndShockConsumerType`. Next there is a class with the idiosyncratic and aggregate income shocks `𝙼𝚊𝚛𝚔𝚘𝚟ConsumerType`. However, it is not the end! There are subclass of the `AggShockConsumerType` which are designed to be integrated with the macroeconomic models, as well as there are many other subclasses. -# -# ![HARK structure](HARK-struct-2.png) - -# %% [markdown] -# ## 1.2 Example: `IndShockConsumerType` -# The `IndShockConsumerType` is our standard consumer that receives two income shocks at the beginning of each period: a completely transitory shock $\newcommand{\tShkEmp}{\theta}{\tShkEmp_t}$ and a completely permanent shock $\newcommand{\pShk}{\psi}{\pShk_t}$. Moreover, the agent is subject to borrowing a borrowing limit: the ratio of end-of-period assets $A_t$ to permanent income $P_t$ must be greater than $\underline{a}$. As with most problems in HARK, this model is stated in terms of *normalized* variables, dividing all real variables by $P_t$: -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t} {~} u(c_t) + \beta (1-D_{t+1}) \mathbb{E}_{t} \left[ (\Gamma{t+1}\psi_{t+1})^{1-\rho} v_{t+1}(m_{t+1}) \right], \\ -# a_t &=& m_t - c_t, \\ -# a_t &\geq& \text{$\underline{a}$}, \\ -# m_{t+1} &=& R/(\Gamma_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\ -# (\psi_{t+1},\theta_{t+1}) &\sim& F_{t+1}, \\ -# \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1, \\ -# u(c) &=& \frac{c^{1-\rho}}{1-\rho}. -# \end{eqnarray*} -# -# The object-oriented programming language makes it extremely easy to [use](https://github.com/econ-ark/HARK/blob/master/examples/ConsIndShockModel/IndShockConsumerType.ipynb). A small illustration is below solving an infinite horizon and lifecycle problem. -# -# -# - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import ( - IndShockConsumerType, - init_idiosyncratic_shocks, -) -from HARK.utilities import plot_funcs_der, plot_funcs -import matplotlib.pyplot as plt -import numpy as np - -# %% [markdown] -# ### An infinite horizon Problem - -# %% -# Create agent -IndShockExample_inf = IndShockConsumerType(**init_idiosyncratic_shocks, verbose=False) -IndShockExample_inf.cycles = 0 # Make this type have an infinite horizon - -# %% -# Solve -IndShockExample_inf.solve() - -# %% -# Show -print("Consumption function for an idiosyncratic shocks consumer type:") -plot_funcs( - IndShockExample_inf.solution[0].cFunc, IndShockExample_inf.solution[0].mNrmMin, 5 -) -print("Marginal propensity to consume for an idiosyncratic shocks consumer type:") -plot_funcs_der( - IndShockExample_inf.solution[0].cFunc, IndShockExample_inf.solution[0].mNrmMin, 5 -) - -# %% [markdown] -# ### A lifecycle Problem -# -# Similarly, we can solve a lifecycle model. Just specify the time varying parameters for each period and solve. - -# %% -LifecycleExample = IndShockConsumerType(**init_idiosyncratic_shocks) -LifecycleExample.cycles = ( - 1 # Make this consumer live a sequence of periods -- a lifetime -- exactly once -) -LifecycleExample.T_cycle = ( - 10 # Specify the number of periods (T_cycles + terminal period) -) -# Adapt the time varying parameter. As you can see, you can specify different values for each period. -LifecycleExample.PermShkStd = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] -LifecycleExample.TranShkStd = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] -LifecycleExample.LivPrb = [0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98, 0.98] -LifecycleExample.PermGroFac = [ - 1.01, - 1.01, - 1.01, - 1.01, - 1.01, - 1.01, - 1.01, - 1.01, - 1.01, - 1.01, -] -LifecycleExample.update_income_process() -LifecycleExample.solve() -print("First element of solution is", LifecycleExample.solution[0]) -print("Solution has", len(LifecycleExample.solution), "elements.") - -# %% -print("Consumption functions across the lifecycle:") -mMin = np.min( - [LifecycleExample.solution[t].mNrmMin for t in range(LifecycleExample.T_cycle)] -) -LifecycleExample.unpack( - "cFunc" -) # This makes all of the cFuncs accessible in the attribute cFunc -plot_funcs(LifecycleExample.cFunc, mMin, 5) - -# %% [markdown] -# ### 2.3 Simulation -# After we solved the model backwards, we can simulate agents forward using Monte Carlo or [Transition Matrices](https://github.com/econ-ark/HARK/tree/master/examples/ConsIndShockModel/IndShockConsumerType_Transition_Matrix_Example.ipynb). These results can be used for in-model regressions or plotting distributions of assets or consumption. - -# %% -# Specify parameters for the simulation -IndShockExample_inf.AgentCount = 5000 # Number of agents of this type -IndShockExample_inf.T_sim = 1000 # Number of periods to simulate -IndShockExample_inf.aNrmInitMean = np.log( - 0.0 -) # Mean of log initial assets. The value of np.log(0.0) causes the code to ensure -# newborns have have exactly 1.0 in market resources. -# i) Specify the variables you are interested in -IndShockExample_inf.track_vars = ["aNrm", "mNrm", "cNrm", "pLvl", "aLvl"] -# ii) Initiate simulation -IndShockExample_inf.initialize_sim() -# iii) run it -IndShockExample_inf.simulate() - -# %% -from HARK.utilities import get_lorenz_shares, get_percentiles - -pctiles = np.linspace(0.001, 0.999, 200) -sim_Lorenz_points = get_lorenz_shares( - IndShockExample_inf.state_now["aLvl"], percentiles=pctiles -) -plt.plot(pctiles, pctiles, "--") -plt.plot(pctiles, sim_Lorenz_points, "-b") -plt.xlabel("Percentile of net worth") -plt.ylabel("Cumulative share of wealth") -plt.show(block=False) - -# %% [markdown] -# # 2. HARK meets SSJ -# HARK offers an extreme amount of flexibility solving the heterogeneous block in partial equilibrium. To include the general equilibrium parts, there are multiple options depending on your purpose. You could use the in-house [`MarketType`](https://github.com/econ-ark/HARK/blob/master/examples/ConsumptionSaving/example_ConsAggShockModel.ipynb) to model your economy in which the agent lives or check out how we implemented [Krusell-Smith](https://github.com/econ-ark/KrusellSmith/blob/master/Code/Python/KrusellSmith.ipynb). -# -# TODAY, we look at the interlinkage of HARK with the [Sequence Space Jacobian Toolbox](https://github.com/shade-econ/sequence-jacobian) you are already familiar with. (If not, take a look at their example notebooks or (re-)take the [NBER workshop from 2022](https://github.com/shade-econ/nber-workshop-2022)) -# -# The idea is to use HARK for the heterogeneous household part, solve the steady state values and jacobians, plug them in the sequence space toolbox and get all their nice functions for free! This is a way to combine the flexibility of HARK on the heterogeneity part and the fast and user-friendly general equilibrium part from SSJ. - -# %% [markdown] -# Let's get into an introduction example [here](https://github.com/AMonninger/REMARK-ECB/blob/master/code/python/IndShockConsumerType_HANK.ipynb). - -# %% [markdown] -# ## 3. Why use HARK? -# The question might now arrise: Why should I learn a new Toolkit and not stick to SSJ completely. Why does it make sense to specify the heterogeneous block in HARK and not simply stick to the off-the-shelf hetblocks? -# -# The short answer is: HARK allows a lot of flexibility on the heterogeneous agent part! It enables the user to match mircoeconomic facts, introduce additional features such as retirement decisions or whole markets (risky asset, durable goods, labor) as well. -# -# Below, we look into two examples which use HARK and manage to match the liquid asset distribution. Afterwards, we highlight some agenttypes with additional features, and give a starting point if you want to build your own Agent. - -# %% [markdown] -# ### 3.1 Targeting liquid asset distribution -# In the [HARK meets SSJ notebook](https://github.com/AMonninger/REMARK-ECB/blob/master/code/python/IndShockConsumerType_HANK.ipynb), we targeted mean assets (SSJ targets mean MPC). But, what about the whole distribution? The importance of this is the difference between TANK and HANK: Households with large, but below one, MPCs respond to a shock not only in the period when the shock occurs, but in the subsequent periods as well. Those i(ntertermporal)MPCs are quantitatively relevant (see [Auclert et al 2018](https://www.nber.org/papers/w25020)) and shape the persistance of response functions. -# -# HARK gives you the means to tweak your model such that you can match the asset, and with it the MPC, distribution. - -# %% [markdown] -# For instance, Carroll, Slacalek, Tokuoka, and White (2017) show that by having small ex-ante hetoerogeneity in discount rates, you can match the Lorenz curve remarkably well. The results can be seen below. If you want to redo the analysis check out this [demonstration](https://github.com/econ-ark/DemARK/blob/master/notebooks/Micro-and-Macro-Implications-of-Very-Impatient-HHs.ipynb) or [replication](https://github.com/econ-ark/DistributionOfWealthMPC). -# -# ![Lorenz_DisCount](LorenzcurveDiscRate.png) - -# %% [markdown] -# A recent paper by [Carroll, Crawley, Frankovic, and Tretvoll](https://github.com/llorracc/HAFiscal) matches the intertemporal MPC in addition to the wealth distribution. For this, they add a 'splurge' factor; households spend each period a fixed fraction of their labor income. -# -# Below is figure one from Carroll et al. With such a heterogeneous agent part in your quantitative HANK model allows you to make serious claims about quantitative changes from interest rate or government spending shocks. -# -# ![HAFiscal_Fig](HAFiscalFig1.png) - -# %% [markdown] -# ### 3.2 Other HARK agents -# There are many more off-the-shelf agents waiting to be used. Including additional features, allows you to analyse other markets as well. Note, that you can solve and simulate them already, BUT the jacobians are not ready yet! -# -# For a list, click [here](https://github.com/econ-ark/HARK/tree/master/HARK/ConsumptionSaving). Below some notable examples which solve problems with discrete choice. For them, the standard `HetBlock` of SSJ is not capable of solving. -# -# #### a) [Portfolio Choice](https://github.com/econ-ark/HARK/blob/master/examples/ConsPortfolioModel/example_ConsPortfolioModel.ipynb) -# -# Using the `PortfolioConsumerType` allows you to add risky assets to a one-asset model. A baseline [Lifecycle and Portfolio choice](https://github.com/econ-ark/REMARK/blob/master/REMARKs/CGMPortfolio.md) model a la Cocco, Gomes, & Maenhout (2005) is already implemented. Depending on your question, you can -# - specify a share of risky asset holder exogeneously -# - specify an exogeneous probability (a la calvo) with which agents can rebalance their portfolio (see [Luettike 2021](https://www.ralphluetticke.com/publication/aermacro_2020/) for an example) -# - solve the share endogeneously with participation and transsaction costs -# - vary returns by age -# - ... -# -# For a life demonstration invite [Mateo](https://mv77.github.io/). In his JMP, he utilizes household expectations of stock returns to explain the equity premium puzzle. -# -# #### b) Search and matching model -# -# [Will](https://github.com/wdu9) combines a HANK with a Search and Match Model. For this, he uses the `ConsMarkovConsumerType` which can handle mutliple (employment) states. As a result, the model allows him to endogenize wage and unemployment dynamics. -# Invite Will to present once his first draft is ready. -# -# -# #### c) [Durable Good](https://github.com/AMonninger/DurableConsumerType_REMARK/blob/main/code/python/DurableModel_Notebook.ipynb) -# -# Using `DurableConsumerType` allows you to solve a household problem with non-durable and durable goods, where the adjustment of the durable stock entails a non-convex cost. This opens doors to analyze business cycle fluctuations of durable good demand, prices, as well as sectoral labor markets. -# -# [My](https://github.com/AMonninger) JMP uses this agent in a partial and general equilibrium context. For this, I show how unemployment expectations drive durable consumption fluctuations. Matching consumption dynamics of durables and non-durables allows me to re-evaulate the impact of fiscal and monetary policy. I'm happy to present this in the near future. - -# %% [markdown] -# ### 3.3 Build your own Agent -# In case your research question requires additional featuers off-the-shelf models do not have, you can add them relatively easy! As seen above, agents inherit features from other agents. Hence, search for the closest agenttype and replace the parts you want to change. - -# %% [markdown] -# #### a) Understanding the code -# Obviously, the most important thing is to understand the structure of the code. Then you can think about which code to replace and how. -# A good starting point is this [notebook](https://github.com/econ-ark/HARK/blob/master/examples/HowWeSolveIndShockConsumerType/HowWeSolveIndShockConsumerType.ipynb) describing how we solve the `IndShockConsumerType`. Afterwards, look at the source code of other models which build on this one eg [`IndShockRiskyAssetConsumerType`](https://github.com/econ-ark/HARK/blob/master/HARK/ConsumptionSaving/ConsRiskyAssetModel.py) and observe how the replacement works. - -# %% [markdown] -# #### b) Use our tools -# We update our toolbox constantly. Hence, there might be something in for your current problem. For many of them exist notebooks to showcase their function. -# -# Useful examples are: -# - [DCEGM-Upper-Envelope](https://github.com/econ-ark/DemARK/blob/master/notebooks/DCEGM-Upper-Envelope.ipynb): To solve problems with nonconex value functions due to discrete choices -# - [Harmenberg-Aggregation](https://github.com/econ-ark/DemARK/blob/master/notebooks/Harmenberg-Aggregation.ipynb): Aggregating distributions with a permanent-income-weighting -# - [DecayInterp](https://github.com/econ-ark/HARK/tree/master/examples/Interpolation/DecayInterp.ipynb): Interpolation with decay which can be used if there exist an analytical limit -# - ... - -# %% [markdown] -# # Conclusion -# In this journey you have learned how to use the `IndShockConsumer` and how to use its features in the partial equilibrium case. Afterwards, we have seen how easy it is to connect HARK to the sequence space toolbox in order to generate the general equilibrium blocks. -# -# The selling point of HARK is its flexibility in the heterogeneous agent blocks. We can allow for features such as ex-ante heterogeneity in discount rates to match the asset distribution and use the resulting jacobians to get IRFs from SSJ. Hence, analysing monetary and fiscal policy responses get more accurate! -# -# But, you don't have to stop there. HARK allows you to introduce the kind of heteroegeneity you need. Therefore, you are not restricted by the tools to answer your questions, but can start with the questions and define your tools after them! - -# %% [markdown] -# # References -# Carroll, C., Slacalek, J., Tokuoka, K., & White, M. N. (2017). The distribution of wealth and the marginal propensity to consume. Quantitative Economics, 8(3), 977-1020. -# -# Cocco, J. F., Gomes, F. J., & Maenhout, P. J. (2005). Consumption and portfolio choice over the life cycle. The Review of Financial Studies, 18(2), 491-533. -# -# Krusell, P., & Smith, Jr, A. A. (1998). Income and wealth heterogeneity in the macroeconomy. Journal of political Economy, 106(5), 867-896. diff --git a/examples/Journeys/Journeys-into-HARK.py b/examples/Journeys/Journeys-into-HARK.py deleted file mode 100644 index b84de0f8d..000000000 --- a/examples/Journeys/Journeys-into-HARK.py +++ /dev/null @@ -1,41 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# Journeys into HARK -# ====== -# -# HARK is a tool designed for many types of users. Consequently, the ways how it can be learned are different depending on your knowledge about the structural modeling in economics, object oriented programing and economic modeling in general. Thus here we propose a few "journeys" which are intended to match your experience: -# - -# %% [markdown] -# ## [1st year PhD student's track](../notebooks/Journey_1_PhD.ipynb) -# You have some knowledge in economic theory and structural modeling but you are not an expert in programing, in particular projects built in object-oriented programing paradigma. Therefore, this journey put a special effort to discuss how to create, solve and simulate HARK objects, not requiring a special skill in programming. -# - -# %% [markdown] -# ## [Engineer's track](Journey-Engineering-Background.ipynb) -# -# You are familiar with numerical simulations as well as object oriented programming and Python. However, you do not work (much) with economic problems. Thus we propose you a quick tutorials to get used to the models and the basic classes of HARK -# - -# %% [markdown] -# ## Lecturer's track -# You want to use HARK during your classes. We propose you a quick tutorials to get used to the models and the basic classes of HARK. - -# %% [markdown] -# ## [Policymaker track](Journey-Policymaker.ipynb) -# -# You have some knowledge in economic theory and would like to use HARK to solve for general equilibrium HANK models. This journey introduces HARK and shows how to combine it with the [sequence space jacobian toolbox](https://github.com/shade-econ/sequence-jacobian) in order to create a HANK model. diff --git a/examples/Journeys/Quickstart_tutorial/Jounery_1_param.py b/examples/Journeys/Quickstart_tutorial/Jounery_1_param.py deleted file mode 100644 index d6005d285..000000000 --- a/examples/Journeys/Quickstart_tutorial/Jounery_1_param.py +++ /dev/null @@ -1,157 +0,0 @@ -''' -Set if parameters for the first journey -''' -from copy import copy -import numpy as np - -# ----------------------------------------------------------------------------- -# --- Define all of the parameters for the perfect foresight model ------------ -# ----------------------------------------------------------------------------- - -CRRA = 2.0 # Coefficient of relative risk aversion -Rfree = 1.03 # Interest factor on assets -DiscFac = 0.96 # Intertemporal discount factor -LivPrb = [1.0] # Survival probability -PermGroFac = [1.0] # Permanent income growth factor -AgentCount = 10000 # Number of agents of this type (only matters for simulation) -aNrmInitMean = 0.0 # Mean of log initial assets (only matters for simulation) -aNrmInitStd = 1.0 # Standard deviation of log initial assets (only for simulation) -pLvlInitMean = 0.0 # Mean of log initial permanent income (only matters for simulation) -pLvlInitStd = 0.0 # Standard deviation of log initial permanent income (only matters for simulation) -PermGroFacAgg = 1.0 # Aggregate permanent income growth factor (only matters for simulation) -T_age = None # Age after which simulated agents are automatically killed -T_cycle = 1 # Number of periods in the cycle for this agent type - -# Make a dictionary to specify a perfect foresight consumer type -init_perfect_foresight = { 'CRRA': CRRA, - 'Rfree': Rfree, - 'DiscFac': DiscFac, - 'LivPrb': LivPrb, - 'PermGroFac': PermGroFac, - 'AgentCount': AgentCount, - 'aNrmInitMean' : aNrmInitMean, - 'aNrmInitStd' : aNrmInitStd, - 'pLvlInitMean' : pLvlInitMean, - 'pLvlInitStd' : pLvlInitStd, - 'PermGroFacAgg' : PermGroFacAgg, - 'T_age' : T_age, - 'T_cycle' : T_cycle - } - -# ----------------------------------------------------------------------------- -# --- Define additional parameters for the idiosyncratic shocks model --------- -# ----------------------------------------------------------------------------- - -# Parameters for constructing the "assets above minimum" grid -aXtraMin = 0.001 # Minimum end-of-period "assets above minimum" value -aXtraMax = 20 # Maximum end-of-period "assets above minimum" value -aXtraExtra = [None] # Some other value of "assets above minimum" to add to the grid, not used -aXtraNestFac = 3 # Exponential nesting factor when constructing "assets above minimum" grid -aXtraCount = 48 # Number of points in the grid of "assets above minimum" - -# Parameters describing the income process -PermShkCount = 7 # Number of points in discrete approximation to permanent income shocks -TranShkCount = 7 # Number of points in discrete approximation to transitory income shocks -PermShkStd = [0.1] # Standard deviation of log permanent income shocks -TranShkStd = [0.2] # Standard deviation of log transitory income shocks -UnempPrb = 0.005 # Probability of unemployment while working -UnempPrbRet = 0.005 # Probability of "unemployment" while retired -IncUnemp = 0.3 # Unemployment benefits replacement rate -IncUnempRet = 0.0 # "Unemployment" benefits when retired -tax_rate = 0.0 # Flat income tax rate -T_retire = 0 # Period of retirement (0 --> no retirement) - -# A few other parameters -BoroCnstArt = 0.0 # Artificial borrowing constraint; imposed minimum level of end-of period assets -CubicBool = True # Use cubic spline interpolation when True, linear interpolation when False -vFuncBool = False # Whether to calculate the value function during solution - -# Make a dictionary to specify an idiosyncratic income shocks consumer -init_idiosyncratic_shocks = { 'CRRA': CRRA, - 'Rfree': Rfree, - 'DiscFac': DiscFac, - 'LivPrb': LivPrb, - 'PermGroFac': PermGroFac, - 'AgentCount': AgentCount, - 'aXtraMin': aXtraMin, - 'aXtraMax': aXtraMax, - 'aXtraNestFac':aXtraNestFac, - 'aXtraCount': aXtraCount, - 'aXtraExtra': [aXtraExtra], - 'PermShkStd': PermShkStd, - 'PermShkCount': PermShkCount, - 'TranShkStd': TranShkStd, - 'TranShkCount': TranShkCount, - 'UnempPrb': UnempPrb, - 'UnempPrbRet': UnempPrbRet, - 'IncUnemp': IncUnemp, - 'IncUnempRet': IncUnempRet, - 'BoroCnstArt': BoroCnstArt, - 'tax_rate':0.0, - 'vFuncBool':vFuncBool, - 'CubicBool':CubicBool, - 'T_retire':T_retire, - 'aNrmInitMean' : aNrmInitMean, - 'aNrmInitStd' : aNrmInitStd, - 'pLvlInitMean' : pLvlInitMean, - 'pLvlInitStd' : pLvlInitStd, - 'PermGroFacAgg' : PermGroFacAgg, - 'T_age' : T_age, - 'T_cycle' : T_cycle - } - -# Make a dictionary to specify a lifecycle consumer with a finite horizon - -# ----------------------------------------------------------------------------- -# ----- Define additional parameters for the aggregate shocks model ----------- -# ----------------------------------------------------------------------------- -MgridBase = np.array([0.1,0.3,0.6,0.8,0.9,0.98,1.0,1.02,1.1,1.2,1.6,2.0,3.0]) # Grid of capital-to-labor-ratios (factors) - -# Parameters for a Cobb-Douglas economy -PermGroFacAgg = 1.00 # Aggregate permanent income growth factor -PermShkAggCount = 1 # Number of points in discrete approximation to aggregate permanent shock dist -TranShkAggCount = 1 # Number of points in discrete approximation to aggregate transitory shock dist -PermShkAggStd = 0.00 # Standard deviation of log aggregate permanent shocks -TranShkAggStd = 0.00 # Standard deviation of log aggregate transitory shocks -DeprFac = 0.025 # Capital depreciation rate -CapShare = 0.36 # Capital's share of income -DiscFacPF = DiscFac # Discount factor of perfect foresight calibration -CRRAPF = CRRA # Coefficient of relative risk aversion of perfect foresight calibration -intercept_prev = 0.0 # Intercept of aggregate savings function -slope_prev = 1.0 # Slope of aggregate savings function -verbose_cobb_douglas = True # Whether to print solution progress to screen while solving -T_discard = 200 # Number of simulated "burn in" periods to discard when updating AFunc -DampingFac = 0.5 # Damping factor when updating AFunc; puts DampingFac weight on old params, rest on new -max_loops = 20 # Maximum number of AFunc updating loops to allow - -# Make a dictionary to specify an aggregate shocks consumer -init_agg_shocks = copy(init_idiosyncratic_shocks) -del init_agg_shocks['Rfree'] # Interest factor is endogenous in agg shocks model -del init_agg_shocks['CubicBool'] # Not supported yet for agg shocks model -del init_agg_shocks['vFuncBool'] # Not supported yet for agg shocks model -init_agg_shocks['PermGroFac'] = [1.0] -init_agg_shocks['MgridBase'] = MgridBase -init_agg_shocks['aXtraCount'] = 24 -init_agg_shocks['aNrmInitStd'] = 0.0 -init_agg_shocks['LivPrb'] = LivPrb - - -# Make a dictionary to specify a Cobb-Douglas economy -init_cobb_douglas = {'PermShkAggCount': PermShkAggCount, - 'TranShkAggCount': TranShkAggCount, - 'PermShkAggStd': PermShkAggStd, - 'TranShkAggStd': TranShkAggStd, - 'DeprFac': DeprFac, - 'CapShare': CapShare, - 'DiscFac': DiscFacPF, - 'CRRA': CRRAPF, - 'PermGroFacAgg': PermGroFacAgg, - 'AggregateL':1.0, - 'act_T':1200, - 'intercept_prev': intercept_prev, - 'slope_prev': slope_prev, - 'verbose': verbose_cobb_douglas, - 'T_discard': T_discard, - 'DampingFac': DampingFac, - 'max_loops': max_loops - } diff --git a/examples/Journeys/Quickstart_tutorial/Quick_start_with_solution.py b/examples/Journeys/Quickstart_tutorial/Quick_start_with_solution.py deleted file mode 100644 index 42e209614..000000000 --- a/examples/Journeys/Quickstart_tutorial/Quick_start_with_solution.py +++ /dev/null @@ -1,799 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.10.8 -# --- - -# %% [markdown] solution="shown" -# Quickstart tutorial -# ====== -# -# ## Summary -# -# This notebook provides the basics of the microeconomic, agent-type class which is fundamental for HARK. -# -# ____ -# ### Structure: -# -# - **Part 1**: basics of the perfect-foresight agent model -# - **Part 2**: more advanced methods for the perfect-foresight agent model -# -# ### Learning outcomes: -# - **Part 1**: -# - Learn how to declare basic agent-type objects -# - Learn solution methods for the agent-type objects -# - Plot value function and consumption function -# - Learn how to simulate the agent-type objects -# - Plot value function -# - **Part 2**: -# - Learn how to build life-cycle models -# - Learn more advanced simulation techniques -# - Learn advanced plots -# ____ -# ## Introduction to the consumer problem -# -# HARK AgentType classes were designed to solve the consumer problem. -# -# In the most basic formulation, the consumer problem is given as follows. The consumer lives T+1 periods (T $\leq \infty$) and during her lifetime receives the same income $Y$. In each period t (0$\leq$ t$\leq$ T) she can spend it on the consumption $C_t$ or invest in an asset $A_t$ with risk free interest rate R. She maximize the lifetime utility, by solving the following Bellman equation defined on the "cash in hand" state space $M_t = C_t +A_t$: -# -# For $t -# -# Obviously, HARK was designed to solve much more complicated consumer problems. However, it was written in the object programming paradigma (OPP). Thus, the class designed to solve such basic problem: $\texttt{PerfForesightConsumerType}$ is then a foundation (parent/subclass in the OPP language) for the more advanced classes with the heterogeneous agents. In the diagram you can observe the inheritance between some of the HARK Agent-type classes: -# -# -# As you can observe, the $\texttt{AgentType}$ superclass is the most general type of framework for the microeconomic models implemented in HARK. The child/subclass of $\texttt{AgentType}$ is $\texttt{PerfForesightConsumerType}$. There, you will need to define parameters, or **attributes** in OPP (such as $T$, $\beta$, and so on). Next, there are classes which inherit those attributes, and further incorporate the heterogeneity of agents. -# -# In these classes, you will need to *additionally* define parameters of the heterogeneity you wish to model (idiosyncratic shocks to income and aggregate productivity shocks are two common examples). Moreover, **methods** (which define how the object is created, how the solution is presented, etc.) of the subclasses are the same or modified methods of the parent class. -# -# Therefore, to master the basics of HARK microclass you will first need to learn the $\texttt{PerfForesightConsumerType}$ class. Consequently, this tutorial aims to teach you this. However, the majority of the presented methods are general for the HARK agent-type objects (though it may involve assigning more parameters). -# -# In the next notebooks, the class $\texttt{IndShockConsumerType}$ with idiosyncratic income shocks is a more specific example of using the HARK microclass. -# - -# %% [markdown] -# # Part I: Basics of the perfect foresight model -# -# In this part, you learn basics of the perfect foresight model. We will solve the example of the consumer problem presented in the introduction. - -# %% [markdown] -# ## Getting started -# First, you need to import HARK and a few additional libraries. Importantly, to use $\texttt{PerfForesightConsumerType}$ you also need to import HARK.ConsumptionSaving.ConsIndShockModel sublibrary. - -# %% - -# import sys -# import os -# sys.path.insert(0, os.path.abspath('../../../.')) -import matplotlib.pyplot as plt -import numpy as np -import HARK - -from copy import deepcopy -from HARK.ConsumptionSaving.ConsIndShockModel import * -from HARK.utilities import plot_funcs_der, plot_funcs - - -# %% [markdown] -# ## Agent-type object creation -# The most basic way of creating HARK object is to call its constructor (in OPP method which create the object, called by the class name). -# -# For $\texttt{PerfForesightConsumerType}$ we need to set: -# - $T+1$: a consumer's lifespan, called $\texttt{cycles}$ in the code, if $T= \infty$, set $\texttt{cycles}$=0. -# - $R$: risk free intrest rate, called $\texttt{Rfree}$ in the code. -# - $\beta$: a discount factor, $\texttt{DiscFac}$ in the code. -# - $\rho$: CRRA utility function parameter, $\texttt{CRRA}$ in the code. -# -# Additionally, you need to define two parameters which do not occur in the presented example, but nevertheless can be useful: -# -# - Probability of surviving to the next period, called $\texttt{LivPrb}$ in the code. -# - Income $Y$ growth factor, $\texttt{PermGroFac}$ in the code. -# -# We call our first HARK object **Example_agent_1** and set the example values of the parameters. - -# %% -Example_agent_1 = PerfForesightConsumerType( - cycles=0, CRRA=2.0, Rfree=1.03, DiscFac=0.99, LivPrb=1.0, PermGroFac=1.0 -) - - -# %% [markdown] -# Because we did not assume growth in 𝑌 or survival uncertainty , we set these values to 1. - -# %% [markdown] -# The second method involves creating a **dictionary**: a list of parameters' names and values. Here we define the dictionary with the same values as in the first example. - -# %% -First_dictionary = { - "CRRA": 2.0, - "DiscFac": 0.99, - "Rfree": 1.03, - "cycles": 0, - "LivPrb": [1.00], - "PermGroFac": [1.00], -} - -# %% [markdown] -# To create an object with a dictionary, use the constructor with the previously defined dictionary as an argument: -# - -# %% -Example_agent_2 = PerfForesightConsumerType(**First_dictionary) - -# %% [markdown] -# Although the first method is easier, we recommend defining a dictionary whenever you create a HARK object. First, it makes your code cleaner. Second, it enables you to create multiple objects with the same dictionary (the importantance of which will become apparent as we move on to creating macro classes). -# - -# %% [markdown] -# The presented here methods work also for the more sophisticated HARK object (however you will need to specify more parameters). - -# %% [markdown] -# ### Creating an agent-type object by copy -# -# Once creating an agent-type object, you can use its set of parameters to create another. To do so you need to use **deepcopy** method from copy package. - -# %% -Example_agent_3 = deepcopy(Example_agent_2) - -# %% [markdown] -# Note: **Do not** only use an assignment operator (=) because it does not create new object. For example, a command such as: - -# %% -Example_agent_4 = Example_agent_2 - -# %% [markdown] -# does not create a new object. It will only gives a new name to the object Example_agent_2 (this gives a single agent object both names Example_agent_2 and Example_agent_4). - -# %% [markdown] -# ### Modifying parameter values -# -# You can easily change the parameter value of the object by "." operator. -# -# For example, to change the discount factor value of the object created in the previous subsection: -# - -# %% -Example_agent_3.DiscFac = 0.95 - -# %% [markdown] -# ## Solving an agent-type problems -# -# To solve agent type problems such as the on presented in the example, you need to find a **value function** from the Bellman equations and **the policy functions**. In our case, the only policy function is a consumption function: a function that for each age t and cash-in-hand $M_t$, specify the optimal consumption level: $c_t(M_t)$. -# -# To solve a model in HARK, you need to use $\texttt{solve}$ method. For example, if we want to solve the model with parameters of the object Example_agent_2: - -# %% -Example_agent_2.solve() - -# %% [markdown] -# ### Solution elements -# -# `Solve` method finds the value function and consumption function for each period t of the consumer's life (in case of the infinite T, it specifies only one set of functions; because all the parameters are stable and lifespan is always infinite, the functions are the same for each $t$). -# -# Besides consumption and value functions, `solve` method create also a few attributes, the most important is minimal cash-in-hand value for which the problem has a solution. -# -# The exact name of these attributes in HARK are: -# -# - vFunc: value function -# - cFunc: consumption function -# - mNrmMin: Minimum value of $M_t$ such that cFunc and vFunc are defined. -# -# To get access to the value/consumption function you need to specify the period t and the object name, using two times operator. So to get access to the value function, consumption function and mNrmMin for the solved example: -# -# - -# %% -Example_agent_2.solution[0].vFunc -Example_agent_2.solution[0].cFunc -Example_agent_2.solution[0].mNrmMin - - -# %% [markdown] -# As you can see, only mNrmMin can be printed as a value. However, the value and consumption functions can be plotted. -# - -# %% [markdown] -# ### Plotting the solution -# -# After $\texttt{solve}$ method is used, the value and consumption functions can be plotted. HARK dedicated function for doing so is `plot_funcs`. As arguments, you need to give a function from the solution (possible a few functions) and the limits of interval for which you want to make a plot. -# -# For example, we can plot consumption and value functions on the interval from mNrmMin to -mNrmMin. -# - -# %% -min_v = Example_agent_2.solution[0].mNrmMin -max_v = -Example_agent_2.solution[0].mNrmMin -print("Consumption function") -plot_funcs([Example_agent_2.solution[0].cFunc], min_v, max_v) -print("Value function") -plot_funcs([Example_agent_2.solution[0].vFunc], min_v, max_v) - -# %% [markdown] -# ## Simulation -# -# Next step is to simulate the agent behavior. To do so, you first need to set a few parameters for the sake of the simulation: -# -# - $\texttt{AgentCount}$: number of simulated agents -# - $\texttt{T_cycle}$: logical parameter which governs the time flow during the simulation (if it is moving forward or backward) -# - $\texttt{T_sim}$: number of simulation periods -# - $\texttt{T_age}$: Age after which simulated agents die with certainty -# -# Moreover, HARK enables simulation of the model with the log-normal distributions of the initial assets and incomes. You need to set the parameters: -# -# - $\texttt{aNrmInitMean}$: Mean of log initial assets -# - $\texttt{aNrmInitStd}$: Standard deviation of log initial assets -# - $\texttt{pLvlInitMean}$: Mean of log initial permanent income -# - $\texttt{pLvlInitStd}$: Standard deviation of log initial permanent income -# -# Lastly, using HARK agent type class, you can also set the aggregate income increase (so that the rate of the income increase is common to all agents). You may then set a parameter: -# -# - $\texttt{PermGroFacAgg}$: Aggregate permanent income growth factor -# -# In our example, we simulate 1 agent, as it is a representative agent model. Time flow is chronological and there is no initial heterogeneity. Thus, std of the initial assets and income distributions are set to 0. The initial assets and income are set to 1.0. There is no aggregate income increase, so we set the income growth factor to 1. We simulate 1000 periods and assume an infinitely lived agent. -# -# To declare the values of these parameters, we create a new dictionary: - -# %% -Simulation_dictionary = { - "AgentCount": 1, - "aNrmInitMean": 0.0, - "aNrmInitStd": 0.0, - "pLvlInitMean": 0.0, - "pLvlInitStd": 0.0, - "PermGroFacAgg": 1.0, - "T_cycle": 1, - "T_sim": 1000, - "T_age": None, -} - - -# %% [markdown] -# Next, you need to update the object. To do so we use **setattr** function, which adds the parameters' values to the defined agent object. - -# %% -for key, value in Simulation_dictionary.items(): - setattr(Example_agent_2, key, value) - -# %% [markdown] -# Finally, you can start our simulation. First, you need to decide which variables you want to track, we choose an assets level and consumption level, in the code they are called: $\texttt{aNrmNow}$ and $\texttt{cNrmNow}$. Next, you need to initialize the simulation by $\texttt{initialize_sim}$ method. Lastly, run the simulation with the $\texttt{simulate}$ method. - -# %% -Example_agent_2.track_vars = [ - "aNrm", - "cNrm", -] # should these be 'aLvl, cLvl' since the normalized versions of these variables isn't introduced until the next section? -Example_agent_2.initialize_sim() -Example_agent_2.simulate() - -# %% [markdown] -# ## Plotting the simulation -# -# Plotting the simulation is a little bit more complicated than plotting the solution, as you cannot use a dedicated function. Instead, we will use the **matplot** library in the following way. -# -# To see the consumption and asset history, we can use objects created by the simulation which contain the history of every agent in each of the simulation periods. These objects have the same naming as the tracked variables with a **\_hist** ending. Thus, from the previous example, the history of assets and consumption are called $\texttt{aNrmNow_hist}$ and $\texttt{cNrmNow_hist}$. -# -# Let's make a plot of the assets level and consumption level during the simulated periods. First, define the vectors of mean assets and consumption. Here, there is only one consumer, so we do not need to use a mean function (although it is done so here). However, if you want to plot the mean asset/consumption level for many agents, you will need to use this method. -# - -# %% -periods = np.linspace(0, 1000, 1000) -asset_level = np.mean(Example_agent_2.history["aNrm"][0:1000], axis=1) -cons_level = np.mean(Example_agent_2.history["cNrm"][0:1000], axis=1) - -plt.figure(figsize=(5, 5)) -plt.plot(periods, asset_level, label="Assets level") -plt.plot(periods, cons_level, label="Consumption level") -plt.legend(loc=2) -plt.show() - -# %% [markdown] -# Now, let's plot the mean asset and consumption increase: - -# %% -increase_assets = asset_level[1:1000] / asset_level[0:999] -increase_cons = cons_level[1:1000] / cons_level[0:999] -plt.figure(figsize=(5, 5)) -plt.plot(periods[1:1000], increase_assets, label="Assets increase") -plt.plot(periods[1:1000], increase_cons, label="Consumption increase") -plt.legend(loc=2) -plt.show() - -# %% [markdown] -# ## Exercise -# -# Congratulations! You've just learned the basics of the agent-type class in HARK. It is time for some exercises: -# -# - -# %% [markdown] -# ### Exercise 1: create the agent-type object -# -# Define a dictionary and then use it to create the agent-type object with the parameters: -# -# - $\beta = 0.96$ -# - $\rho = 2.0$ -# - $T = \infty$ -# - Risk free interest rate $R= 1.05$ -# Assume no survival uncertainty and income growth factor 1.01 -# - -# %% -# Write your solution here - -# fill the dictionary and then use it to create the object - -# First_dictionary = { -# 'CRRA' : , -# 'DiscFac' : , -# 'Rfree' : , -# 'cycles' : , -# 'LivPrb' : [], -# 'PermGroFac' : [], -# } -# Exercise_agent = - -# %% [markdown] solution="hidden" solution_first=true -# **Solution**: click on the box on the left to expand the solution - -# %% solution="hidden" -# Solution -First_dictionary = { - "CRRA": 2.0, - "DiscFac": 0.96, - "Rfree": 1.05, - "cycles": 0, - "LivPrb": [1.0], - "PermGroFac": [1.0], -} -Exercise_agent = PerfForesightConsumerType(**First_dictionary) - -# %% [markdown] -# ### Exercise 2: Solve the model and plot the value function -# -# - -# %% -# Write your solution here, use methods from "solving the model" subsection - -# %% [markdown] solution="hidden" solution_first=true -# **Solution**: click on the box on the left to expand the solution - -# %% solution="hidden" -# Solution -Exercise_agent.solve() - -min_v = Exercise_agent.solution[0].mNrmMin -max_v = -Exercise_agent.solution[0].mNrmMin -print("Value function") -plot_funcs([Exercise_agent.solution[0].vFunc], min_v, max_v) - -# %% [markdown] -# ### Exercise 3: Prepare the simulation -# -# Next prepare the simulation. Assume that **there exsists the initial assets and income heterogenity**. Assume, the initial income and assets distributions are log-normal, have mean 1 and std 1. Simulate 1000 agents for 1000 periods. -# -# Add the new parameters to the object: - -# %% -# Write your solution here. - -# Fill the dictionary -# Simulation_dictionary = { 'AgentCount': , -# 'aNrmInitMean' : , -# 'aNrmInitStd' : , -# 'pLvlInitMean' : , -# 'pLvlInitStd' : , -# 'PermGroFacAgg' : 1.0, #assume no income aggregate growth -# 'T_cycle' : 1, #assume forward time flow -# 'T_sim' : , -# 'T_age' : None #assume immortal agents -# } - -# for key,value in Simulation_dictionary.items(): -# setattr(Exercise_agent,key,value) - - -# %% [markdown] solution="hidden" solution_first=true -# **Solution**: click on the box on the left to expand the solution - -# %% solution="hidden" -# Solution -Simulation_dictionary = { - "AgentCount": 1000, - "aNrmInitMean": 1.0, - "aNrmInitStd": 1.0, - "pLvlInitMean": 1.0, - "pLvlInitStd": 1.0, - "PermGroFacAgg": 1.0, - "T_cycle": 1, - "T_sim": 1000, - "T_age": None, -} - -for key, value in Simulation_dictionary.items(): - setattr(Exercise_agent, key, value) - -# %% [markdown] -# ### Exercise 4: Simulate -# -# - -# %% -# Write your solution here. Use the commands from "simulation" subsection, track consumption values - - -# %% [markdown] solution="hidden" solution_first=true -# **Solution**: click on the box on the left to expand the solution - -# %% solution="hidden" -# Solution -Exercise_agent.track_vars = ["aNrm", "cNrm"] -Exercise_agent.initialize_sim() -Exercise_agent.simulate() - -# %% [markdown] solution="hidden" -# ### Exercise 5: Plot the simulations -# -# Plot mean consumption level and consumption increase: - -# %% -# Write your solution here. - -# Firstly prepare the vectors which you would like to plot: -# periods= np.linspace(0,1000,1000) -# cons_level = np.mean(Exercise_agent.cNrmNow_hist[0:1000], axis = 1) -# increase_cons = cons_level[1:1000]/cons_level[0:999] - -# next plot your solution - - -# %% [markdown] solution="hidden" solution_first=true -# **Solution**: click on the box on the left to expand the solution - -# %% solution="hidden" -# Solution -periods = np.linspace(0, 1000, 1000) -cons_level = np.mean(Exercise_agent.history["cNrm"][0:1000], axis=1) -increase_cons = cons_level[1:1000] / cons_level[0:999] - -plt.figure(figsize=(5, 5)) -plt.plot(periods, cons_level, label="Consumption level") -plt.legend(loc=2) -plt.show() - -plt.figure(figsize=(5, 5)) -plt.plot(periods[1:1000], increase_cons, label="Consumption increase") -plt.legend(loc=2) -plt.show() - - -# %% [markdown] -# # PART II: advanced methods for the perfect foresight agent -# -# In this part we focus on more complicated cases of the deterministic agent model. -# -# In the previous example survival probability (in the code **LivPrb**) and income increase factor (in the code **PermGroFac**) were stable and set to 1. However, if you want to build deterministic life-cycle model you need to add a age-dependent survival probability or income growth. -# -# Consumer problem in this setting is: -# -# \begin{eqnarray*} -# V_t(M_t,Y_t) &=& \max_{C_t}~U(C_t) + \beta \pi_t V_{t+1}(M_{t+1},Y_{t+1}), \\ -# & s.t. & \\ -# %A_t &=& M_t - C_t, \\ -# M_{t+1} &=& R (M_{t}-C_{t}) + Y_{t+1}, \\ -# Y_{t+1} &=& \Gamma_{t+1} Y_t, \\ -# \end{eqnarray*} -# -# Where $Y_t$ is an age-dependent income, $\pi_t$ is a survival probability and $\Gamma_{t+1}$ is an income growth rate. Also $\pi_{T+1} =0$ -# -# While it does not reduce the computational complexity of the problem (as permanent income is deterministic, given its initial condition $Y_0$), HARK represents this problem with normalized variables (represented in lower case), dividing all real variables by permanent income $Y_t$ and utility levels by $Y_t^{1-\rho}$. The Bellman form of the model thus reduces to: -# -# \begin{eqnarray*} -# v_t(m_t) &=& \max_{c_t}~U(c_t) ~+ \beta_{t+1}\pi_{t+1} \Gamma_{t+1}^{1-\rho} v_{t+1}(m_{t+1}), \\ -# & s.t. & \\ -# a_t &=& m_t - c_t, \\ -# m_{t+1} &=& R / \Gamma_{t+1} a_t + 1. -# \end{eqnarray*} -# -# To solve this problem we need to study the **cycles** parameter more carefully. There is a notebook dedicated to solving and simulating life-cycle models which can be found here: [Cycles_tutorial](https://github.com/econ-ark/HARK/blob/master/examples/LifecycleModel/Cycles_tutorial.ipynb). -# - -# %% [markdown] -# ### Methods of plotting the solution -# -# $\texttt{plot_funcs()}$ enables to plot many functions at the same graph. You need to declare them as vector of functions. -# -# To see this, just follow an example. We plot the consumption functions for each age $t$ of the consumer. -# -# To get better access to the consumption functions, you can use $\texttt{unpack('cFunc')}$ method, which will create the attribute $\texttt{cFunc}$ of the object (so you do not have to use it is as a solution attribute). -# -# We illustrate this with the solution for "Exercise_agent_3". Recall that this agent was given a different time preference value ($\beta = .95$). Here, we also changed the length of the life-cycle of this agent to $10$ periods. - -# %% -Example_agent_3.cycles = 1 -Example_agent_3.LivPrb = [0.99, 0.98, 0.97, 0.96, 0.95, 0.94, 0.93, 0.92, 0.91, 0.90] -Example_agent_3.PermGroFac = [1.01, 1.01, 1.01, 1.02, 1.00, 0.99, 0.5, 1.0, 1.0, 1.0] - - -Example_agent_3.solve() -Example_agent_3.unpack("cFunc") - -# %% [markdown] -# Next, we set the minimal value of the gird such that at least one of the consumption functions is defined. - -# %% -min_v = min(Example_agent_3.solution[t].mNrmMin for t in range(11)) -max_v = -min_v -print("Consumption functions") -plot_funcs(Example_agent_3.cFunc[:], min_v, max_v) - -# %% [markdown] -# If you want to compare a few functions (eg. value functions), you can also construct the vector by yourself, for example: -# - -# %% -print("Value functions") -plot_funcs( - [ - Example_agent_3.solution[0].vFunc, - Example_agent_3.solution[5].vFunc, - Example_agent_3.solution[9].vFunc, - ], - min_v, - max_v, -) - - -# %% [markdown] -# ## Advanced simulation techniques -# Here we present more advanced simulation techniques with the mortal agents and income dynamics. -# -# We will also present how to plot the distribution of assets among the agents. -# -# First, as in the part 1 of the tutorial, you need to define the simulation dictionary. However, you need to be careful with T_age parameter: because a maximal lifespan is 11 (T=10), T_age is set to 10, to ensure that all agents die after this age. -# -# For the rest of the parameters, we set the number of consumers alive in each period to 1000. Initial asset level is near 0 (log of -10). The initial income level is given by the log-normal distribution with mean 0 and std 1. We set the rest of parameters as in the previous example. -# - -# %% -Simulation_dictionary = { - "AgentCount": 1000, - "aNrmInitMean": -10.0, - "aNrmInitStd": 0.0, - "pLvlInitMean": 0.0, - "pLvlInitStd": 1.0, - "PermGroFacAgg": 1.0, - "T_cycle": 1, - "T_sim": 200, - "T_age": 10, -} - -for key, value in Simulation_dictionary.items(): - setattr(Example_agent_3, key, value) - -# %% [markdown] -# Next, we simulate the economy and plot the mean asset level. However, be careful! $\texttt{aNrmNow}$ gives the asset levels normalized by the income. To get the original asset level we need to use $\texttt{aLvlNow}$ (unfortunately, cLvlNow is not implemented). - -# %% -Example_agent_3.track_vars = ["aNrm", "cNrm", "aLvl"] -Example_agent_3.initialize_sim() -Example_agent_3.simulate() - - -periods = np.linspace(0, 200, 200) -assets_level = np.mean(Example_agent_3.history["aLvl"][0:200], axis=1) - -plt.figure(figsize=(5, 5)) -plt.plot(periods, assets_level, label="assets level") -plt.legend(loc=2) -plt.show() - -# %% [markdown] -# As you can see, for the first 10 periods the asset level much more fluctuate. It is because in the first periods the agents which were born in period 0 strictly dominate the population (as only a small fraction die in the first periods of life). -# -# You can simply cut the first observations, to get asset levels for more balanced population. - -# %% -after_burnout = np.mean(Example_agent_3.history["aLvl"][10:200], axis=1) - -plt.figure(figsize=(5, 5)) -plt.plot(periods[10:200], after_burnout, label="assets level") -plt.legend(loc=2) -plt.show() - -# %% [markdown] -# ### Plotting the distribution of assets -# -# When you plot similar simulations, often the main interest is not to get exact assets/consumption levels during the simulation but rather a general distribution of assets. -# -# In our case, we plot the asset distribution. -# -# First, get one vector of the asset levels: -# - -# %% -sim_wealth = np.reshape(Example_agent_3.history["aLvl"], -1) - -# %% [markdown] -# Next, we plot simple histogram of assets level using a standard **hist** function from matplotlib library - -# %% -print("Wealth distribution histogram") -n, bins, patches = plt.hist(sim_wealth, 100, density=True, range=[0.0, 10.0]) - -# %% [markdown] -# With HARK, you can also easily plot the Lorenz curve. To do so import some HARK utilities which help us plot Lorenz curve: - -# %% - -from HARK.utilities import get_lorenz_shares, get_percentiles - - -# %% [markdown] -# Then, use $\texttt{get_lorenz_shares}$ to plot the Lornez curve. - -# %% -pctiles = np.linspace(0.001, 0.999, 15) -# SCF_Lorenz_points = get_lorenz_shares(SCF_wealth,weights=SCF_weights,percentiles=pctiles) -sim_Lorenz_points = get_lorenz_shares(sim_wealth, percentiles=pctiles) - -plt.figure(figsize=(5, 5)) -plt.title("Lorenz curve") -plt.plot(pctiles, sim_Lorenz_points, "-b", label="Lorenz curve") -plt.plot(pctiles, pctiles, "g-.", label="45 Degree") -plt.xlabel("Percentile of net worth") -plt.ylabel("Cumulative share of wealth") -plt.legend(loc=2) -plt.ylim([0, 1]) -plt.show() - -# %% [markdown] -# ## Exercise -# -# Let's make a model with a little more realistic assumptions. -# -# In files 'life_table.csv' you find the death-probablities for Americans in age 25-105 in 2017 from Human Mortality Database. The age-dependent income for American males in file 'productivity_profile.csv' are deduced from Heathcote et al. (2010). Try to build a model with this data, assuming additionaly CRRA parameter to be 2.0, discount rate set to 0.99 and interest rate set to 1.05. Moreover, assume that initial income is given by log-normal distribution with mean 0 and std 0.05. Assume that initial asset is near 0 for all agents. -# -# Do the following tasks: -# - Build a dictionary and create an object with a given data and parameters -# - Solve model and plot a consumption functions for each age -# - Simulate 1000 agents for 2000 periods -# - Plot a histogram of the assets distribution and the Lorenz curve - -# %% solution="hidden" -# Write your solution here - -# Firstly import data, you can use this part of code (however, there are other ways to do this) -import sys -import os - -sys.path.insert(0, os.path.abspath("..")) - - -prob_dead = np.genfromtxt("life_table.csv", delimiter=",", skip_header=1) -prob_surv = 1 - prob_dead - -# The HARK argument need to be a list, thus convert it from numpy array -prob_surv_list = np.ndarray.tolist(prob_surv[:80]) - -income_profile = np.genfromtxt("productivity_profile.csv", delimiter=",", skip_header=1) -income_profile_list = np.ndarray.tolist(income_profile[:80]) - -# Continue your solution - - -# %% [markdown] solution="hidden" solution_first=true -# **Solution**: click on the box on the left to expand the solution - -# %% solution="hidden" -import sys -import os - -sys.path.insert(0, os.path.abspath("..")) - - -prob_dead = np.genfromtxt("life_table.csv", delimiter=",", skip_header=1) -prob_surv = 1 - prob_dead -prob_surv_list = np.ndarray.tolist(prob_surv[:80]) - -income_profile = np.genfromtxt("productivity_profile.csv", delimiter=",", skip_header=1) -income_profile_list = np.ndarray.tolist(income_profile[:80]) - -Ex_dictionary = { - "CRRA": 2.0, - "Rfree": 1.05, - "DiscFac": 0.99, - "LivPrb": prob_surv_list, - "PermGroFac": income_profile_list, - "cycles": 1, - "T_cycle": 1, -} - -Ex_agent = PerfForesightConsumerType(**Ex_dictionary) -Ex_agent.solve() - -Ex_agent.unpack("cFunc") - -min_v = min(Ex_agent.solution[t].mNrmMin for t in range(11)) -max_v = -min_v -print("Consumption functions") -plot_funcs(Ex_agent.cFunc[:], min_v, max_v) - - -Simulation_dictionary = { - "AgentCount": 1000, - "aNrmInitMean": -10.0, - "aNrmInitStd": 0.0, - "pLvlInitMean": 0.0, - "pLvlInitStd": 0.05, - "PermGroFacAgg": 1.0, - "T_cycle": 1, - "T_sim": 2000, - "T_age": 80, - "BoroCnstArt": 0.0, -} - -for key, value in Simulation_dictionary.items(): - setattr(Ex_agent, key, value) - -Ex_agent.track_vars = ["aNrm", "cNrm", "aLvl"] -Ex_agent.initialize_sim() -Ex_agent.simulate() - - -sim_wealth = np.reshape(Ex_agent.history["aLvl"], -1) -print("Wealth distribution histogram") -n, bins, patches = plt.hist(sim_wealth, 50, density=True, range=[-1.0, 2.0]) - -pctiles = np.linspace(0.001, 0.999, 15) -# SCF_Lorenz_points = get_lorenz_shares(SCF_wealth,weights=SCF_weights,percentiles=pctiles) -sim_Lorenz_points = get_lorenz_shares(sim_wealth, percentiles=pctiles) - -plt.figure(figsize=(5, 5)) -plt.title("Lorenz curve") -plt.plot(pctiles, sim_Lorenz_points, "-b", label="Lorenz curve") -plt.plot(pctiles, pctiles, "g-.", label="45 Degree") -plt.xlabel("Percentile of net worth") -plt.ylabel("Cumulative share of wealth") -plt.legend(loc=2) -plt.ylim([0, 1]) -plt.show() diff --git a/examples/LabeledModels/LabeledModels.py b/examples/LabeledModels/LabeledModels.py deleted file mode 100644 index e9125555b..000000000 --- a/examples/LabeledModels/LabeledModels.py +++ /dev/null @@ -1,644 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # Using xarray to solve Heterogeneous Agent Models -# - -# %% [markdown] -# Import required libraries. -# - -# %% -from types import SimpleNamespace - -import estimagic as em -import matplotlib.pyplot as plt -import numpy as np -import xarray as xr -from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType -from HARK.rewards import UtilityFuncCRRA -from HARK.utilities import plot_funcs - -from xarray import DataArray, Dataset - -# %% [markdown] -# Here are some basic parameters that we'll use to construct the model. `CRRA` is the coefficient of constant relative risk aversion, `DiscFac` is the intertemporal discount factor, and `Rfree` is the interest rate on savings. -# - -# %% -epsilon = 1e-6 # lower bound for cash-on-hand -CRRA = 2.0 # coefficient of relative risk aversion -DiscFac = 0.96 # discounting factor -Rfree = 1.03 # risk free interest rate -params = SimpleNamespace(CRRA=CRRA, DiscFac=DiscFac, Rfree=Rfree) - - -util = UtilityFuncCRRA(CRRA) - -# %% [markdown] -# ### The Problem -# - -# %% [markdown] -# First, we explore the structure of a perfect-foresight consumption-savings model. The agent's problem is to maximize their present discounted utility of consumption subject to a budget constraint. The recursive problem is given by -# -# $$ -# v_t(m_t) = \max_{c_t} u(c_t) + \beta v_{t+1}(m_{t+1}) \\ -# s.t. \\ -# a_t = m_t - c_t \\ -# m_{t+1} = R a_t + 1 -# $$ -# - -# %% [markdown] -# This problem can be disected into two stages and two transitions: -# -# First, the agent chooses consumption $c_t$ to maximize their utility given their current cash-on-hand $m_t$ and is left with liquid assets $a_t$. This problem must obey their budget constraint, such that assets is equal to cash-on-hand minus consumption. -# -# $$ -# v_t(m_t) = \max_{c_t} u(c_t) + \beta w_{t}(a_{t}) \\ -# s.t. \\ -# a_t = m_t - c_t \\ -# $$ -# -# Second, the agent receives a constant income and the liquid assets accrue interest, which results in next period's cash-on-hand $m_{t+1}$. -# -# $$ -# w_t(a_t) = v_{t+1}(m_{t+1}) \\ -# s.t. \\ -# m_{t+1} = R a_t + 1 -# $$ -# -# Although this is very simple, it will be apparent later why this separation is useful. -# - -# %% [markdown] -# ### Defining the state space. -# - -# %% [markdown] -# We can define the state space two ways: as a numpy grid, or as an xarray.DataArrray. -# - -# %% -mVec = np.geomspace(epsilon, 20, 100) # grid for market resources - -# %% [markdown] -# The xr.DataArray will be useful for representing the state space in a more general way. We can define the state space as a 1-dimensional array of cash-on-hand values. For our simple example, we use a 1 variable xr.Dataset to represent the state space. -# - -# %% -mNrm = DataArray( - mVec, - name="mNrm", - dims=("mNrm"), - attrs={"long_name": "Normalized Market Resources"}, -) -state = Dataset({"mNrm": mNrm}) # only one state var in this model - -# %% [markdown] -# Notice the structure of an xr.Dataset which includes `mNrm` as a dimension. -# - -# %% -state - -# %% [markdown] -# We can do the same for the liquid assets, which we can refer to as the post-decision state (post-state for short) of the first stage of the problem, or the state of the second stage of the problem. -# - -# %% -aNrm = DataArray( - mVec, - name="aNrm", - dims=("aNrm"), - attrs={"long_name": "Normalized Liquid Assets"}, -) -post_state = Dataset({"aNrm": aNrm}) - -print(post_state) - -# %% [markdown] -# We can now define functions over the state space. In this basic model, we need an action/policy/decision to represent consumption. Starting from the last period, we know that the solution is for the agent to consume all of its resources `mNrm`, which induces a linear function. Defining it as a function of the state space is easy, notice in the expression below that the dimension for `cNrm` is `mNrm`. -# - -# %% -# optimal decision is to consume everything in the last period -cNrm = DataArray( - mVec, - name="cNrm", - dims=state.dims, - coords=state.coords, - attrs={"long_name": "Consumption"}, -) -actions = Dataset({"cNrm": cNrm}) -cNrm - -# %% [markdown] -# ### The Value and Marginal Value functions. -# - -# %% [markdown] -# To define the value and marginal value functions in the last period, we can use the utility and marginal utility functions. -# - -# %% -v = util(cNrm) -v.name = "v" -v.attrs = {"long_name": "Value Function"} - -v_der = util.der(cNrm) -v_der.name = "v_der" -v_der.attrs = {"long_name": "Marginal Value Function"} - -# %% [markdown] -# It will also be useful to define the inverse value and inverse marginal value functions. -# - -# %% -v_inv = cNrm.copy() -v_inv.name = "v_inv" -v_inv.attrs = {"long_name": "Inverse Value Function"} - -v_der_inv = cNrm.copy() -v_der_inv.name = "v_der_inv" -v_der_inv.attrs = {"long_name": "Inverse Marginal Value Function"} - -# %% [markdown] -# We can now create a xr.Dataset to store all of the variables/functions we have created. Datasets are useful containers of variables that are defined over the same dimensions, or in our case states. As we can see, every variable in the dataset shares the same dimension of `mNrm`. -# - -# %% -dataset = Dataset( - { - "cNrm": cNrm, - "v": v, - "v_der": v_der, - "v_inv": v_inv, - "v_der_inv": v_der_inv, - } -) -dataset - -# %% [markdown] -# We can also create separate datasets for the value function variables and the policy function variable. -# - -# %% -value_function = Dataset( - {"v": v, "v_der": v_der, "v_inv": v_inv, "v_der_inv": v_der_inv} -) -policy_function = Dataset({"cNrm": cNrm}) - -# %% [markdown] -# Up to now I've used the word function for the variables stored as datasets. This is because using the `interp` method we can interpolate the values of the variables at any point in the state space. So, if we have enough points we can approximate the true functions numerically. This is useful for solving the model, as we will see later. -# - -# %% -dataset.interp({"mNrm": np.sort(np.random.uniform(epsilon, 20, 10))}) - - -# %% [markdown] -# Because of the curvature of the value and marginal value functions, it'll be useful to use the inverse value and marginal value functions instead and re-curve them. For this, I create a new class `ValueFunctionCRRA` that returns the appropriate value and marginal value functions. -# - - -# %% -class ValueFunctionCRRA(object): - def __init__(self, dataset: xr.Dataset, CRRA: float): - self.dataset = dataset - self.CRRA = CRRA - self.u = UtilityFuncCRRA(CRRA) - - def __call__(self, state): - """ - Interpolate inverse falue function then invert to get value function at given state. - """ - - result = self.u( - self.dataset["v_inv"].interp( - state, - assume_sorted=True, - kwargs={"fill_value": "extrapolate"}, - ) - ) - - result.name = "v" - result.attrs = self.dataset["v"].attrs - - return result - - def derivative(self, state): - """ - Interpolate inverse marginal value function then invert to get marginal value function at given state. - """ - result = self.u.der( - self.dataset["v_der_inv"].interp( - state, - assume_sorted=True, - kwargs={"fill_value": "extrapolate"}, - ) - ) - - result.name = "v_der" - result.attrs = self.dataset["v"].attrs - - return result - - def evaluate(self, state): - """ - Interpolate all data variables in the dataset. - """ - - result = self.dataset.interp(state, kwargs={"fill_value": "extrapolate"}) - result.attrs = self.dataset["v"].attrs - - return result - - -# %% [markdown] -# Now we can create a `ValueFuncCRRA` that will appropriately recurve the value and marginal value functions. - -# %% -vfunc = ValueFunctionCRRA(value_function, CRRA) - -# %% [markdown] -# For an example of how this is useful, we can create a random grid of states and compare the differences in the 2 approaches. - -# %% -rand_states = np.sort(np.random.uniform(mVec[1], mVec[-1], 100)) -rand_states - -# %% [markdown] -# If we simply linearly interpolate the value and marginal value functions using `xarray` interpolation, we get the following results. - -# %% -rand_ds = vfunc.evaluate({"mNrm": rand_states}) -rand_ds - -# %% [markdown] -# However, if we use the inverse value and marginal value functions to interpolate and then re-curve, the results are slightly different. - -# %% -rand_v = vfunc({"mNrm": rand_states}) -np.max(np.abs(rand_v - rand_ds["v"])) - -# %% [markdown] -# The correct answer is of course, the re-curving one using `ValueFunctionCRRA`, as evidenced by the following check. As a reminder, the value function at this stage is the utility of consumption, which in the last period is the utility of the cash-on-hand. - -# %% -rand_v - util(rand_states) - - -# %% [markdown] -# ### Transitions -# -# Another useful feature of `xarray` is that we can easily define the state transitions. Using labels, we can define expresive equations that are easy to read and understand. -# - - -# %% -def state_transition(state=None, action=None, params=None): - """ - state to post_state transition - """ - post_state = {} # pytree - post_state["aNrm"] = state["mNrm"] - action["cNrm"] - return post_state - - -def post_state_transition(post_state=None, params=None): - """ - post_state to next_state transition - """ - next_state = {} # pytree - next_state["mNrm"] = params.Rfree * post_state["aNrm"] + 1 - return next_state - - -# %% [markdown] -# This makes it very easy to define simulations of the model given initial states and optimal actions. - -# %% -Dataset(state_transition(state, policy_function, params)) - -# %% -Dataset(post_state_transition(post_state, params)) - -# %% [markdown] -# These transitions can also be composed. - -# %% -Dataset(post_state_transition(state_transition(state, dataset, params), params)) - - -# %% [markdown] -# We can even define more complex transitions where several variables are created along the way. In the example below, we define the value of an action given some initial state and continuation function, which is the value of having taken that action. -# -# The continuation value function is then the value of some initial post-decision state, which is the value of having taken that action and ending up with next period's state. - - -# %% -def value_transition(action=None, state=None, continuation=None, params=None): - """ - value of action given state and continuation - """ - variables = {} # pytree - post_state = state_transition(state, action, params) - variables.update(post_state) - - variables["reward"] = util(action["cNrm"]) - variables["v"] = variables["reward"] + params.DiscFac * continuation(post_state) - variables["v_inv"] = util.inv(variables["v"]) - - variables["marginal_reward"] = util.der(action["cNrm"]) - variables["v_der"] = variables["marginal_reward"] # envelope condition - variables["v_der_inv"] = util.derinv(variables["v_der"]) - - # for estimagic purposes - variables["contributions"] = variables["v_inv"] - variables["value"] = np.sum(variables["v_inv"]) - - return variables - - -def continuation_transition(post_state=None, value_next=None, params=None): - """ - continuation value function of post_states - """ - variables = {} # pytree - next_state = post_state_transition(post_state, params) - variables.update(next_state) - - variables["v"] = value_next(next_state) - variables["v_inv"] = util.inv(variables["v"]) - - variables["v_der"] = params.Rfree * value_next.derivative(next_state) - variables["v_der_inv"] = util.derinv(variables["v_der"]) - - # for estimagic purposes - variables["contributions"] = variables["v_inv"] - variables["value"] = np.sum(variables["v_inv"]) - - return variables - - -# %% [markdown] -# From these transitions, we can easily calculate the continuation value function as follows. - -# %% -v_end = Dataset(continuation_transition(post_state, vfunc, params)) -v_end = v_end.drop(["mNrm"]) # next period's mNrm is not needed -v_end - -# %% -wfunc = ValueFunctionCRRA(v_end, CRRA) - -# %% [markdown] -# For an example, we can calculate the value of taking the same action as in the last period in the second to last period. As a reminder, that action is consuming everything and saving 0. - -# %% -Dataset(value_transition(policy_function, state, wfunc, params)) - -# %% [markdown] -# ## Solving the Model -# -# It should be obvious however, that this is not the optimal action. The optimal action will consist of consuming some of the resources and saving the rest, but how much exactly to save is not straightforward. For this, we can use numerical optimizer `estimagic` to find the optimal action. - -# %% -res = em.maximize( - value_transition, - params={"cNrm": mVec / 2}, - algorithm="scipy_lbfgsb", - criterion_kwargs={"state": state, "continuation": wfunc, "params": params}, - lower_bounds={"cNrm": np.zeros_like(mVec)}, - upper_bounds={"cNrm": state["mNrm"].data}, -) - -c_opt = DataArray( - res.params["cNrm"], - name="cNrm", - dims=state.dims, - coords=state.coords, - attrs={"long_name": "consumption"}, -) -optimal_actions = Dataset({"cNrm": c_opt}) -optimal_value = Dataset(value_transition(optimal_actions, state, wfunc, params)) -grid_search = xr.merge([optimal_actions, optimal_value]) -grid_search - -# %% [markdown] -# As we can see by looking at the `value` variable, the optimization (grid search) method provides a higher value than the naive strategy of consuming everything. We can also easily plot what this maximization looks like. - -# %% -grid_search["cNrm"].plot() - -# %% [markdown] -# For comparison, we can also check these results against `HARK`'s traditional model solution. - -# %% -hark_agent = PerfForesightConsumerType( - CRRA=params.CRRA, - DiscFac=params.DiscFac, - Rfree=params.Rfree, - LivPrb=[1.0], - PermGroFac=[1.0], - BoroCnstArt=0.0, -) -hark_agent.solve() - -np.max(np.abs(hark_agent.solution[0].cFunc(mVec) - grid_search["cNrm"])) - - -# %% [markdown] -# ## Endogenous Grid Method -# -# As we can see above, the differences are very small. This is because `HARK` uses the endogenous grid method instead of a grid search method to find an optimal solution. To see the endogenous grid method in action, we can instead do the following. -# -# The endogenous grid method consists of starting from the post-decision state and deriving the optimal action that rationalizes ending up at that state. -# -# To do this, the endogenous grid method uses the first order condition of the problem, as can be seen in the `egm_transition` function. Having obtained the optimal consumption from a given post-decision state, we can now back out the starting cash-on-hand that would have induced that consumption. -# -# - - -# %% -def reverse_transition(post_state=None, action=None, params=None): - states = {} # pytree - states["mNrm"] = post_state["aNrm"] + action["cNrm"] - - return states - - -def egm_transition(post_state=None, continuation=None, params=None): - """actions from post_states""" - - actions = {} # pytree - actions["cNrm"] = util.derinv(params.DiscFac * continuation.derivative(post_state)) - - return actions - - -# %% -acted = egm_transition(post_state, wfunc, params) -states = reverse_transition(post_state, acted, params) - -actions = Dataset(acted).swap_dims({"aNrm": "mNrm"}) # egm requires swap dimensions -states = Dataset(states).swap_dims({"aNrm": "mNrm"}) - -egm_dataset = xr.merge([actions, states]) - -values = value_transition(actions, states, wfunc, params) -egm_dataset.update(values) - -# %% [markdown] -# Because we have imposed an artificial borrowing constraint of 0, we can not optimize our problem at `aNrm` = 0 using the first order condition. Instead, we have to plug in these values. - -# %% -borocnst = Dataset( - coords={ - "mNrm": 0.0, - "aNrm": 0.0, - }, - data_vars={ - "cNrm": 0.0, - "v": -np.inf, - "v_inv": 0.0, - "reward": -np.inf, - "marginal_reward": np.inf, - "v_der": np.inf, - "v_der_inv": 0.0, - }, -) - -egm = xr.concat([borocnst, egm_dataset], dim="mNrm", combine_attrs="no_conflicts") -egm - -# %% [markdown] -# Now, we can compare the endogenous grid method approach with `HARK`'s solution, and see that the difference is now much smaller and numerically trivial. - -# %% -np.max(np.abs(egm["cNrm"].interp({"mNrm": mVec}) - hark_agent.solution[0].cFunc(mVec))) - -# %% [markdown] -# ## `ConsLabeledModels` -# -# The `ConsLabeledModels` module provides a number of models that are defined using the `xarray` framework. Below we show some simple examples of how to use these models. - -# %% [markdown] -# ### PerfForesightLabeledType -# -# The `PerfForesightLabeledType` is a perfect foresight model with a constant interest rate and a constant income, so the agent experiences no uncertainty. - -# %% -from HARK.ConsumptionSaving.ConsLabeledModel import ( - PerfForesightLabeledType, -) - -agent = PerfForesightLabeledType(cycles=0, BoroCnstArt=-1.0) -agent.solve() - -# %% -agent.solution[0].policy["cNrm"].plot() - -# %% [markdown] -# The model is equivalent to `PerfForesightConsumerType` presented below. - -# %% -hark_agent = PerfForesightConsumerType(cycles=0, BoroCnstArt=-1.0) -hark_agent.solve() - -# %% -plot_funcs(hark_agent.solution[0].cFunc, hark_agent.solution[0].mNrmMin - 1, 25) - -# %% [markdown] -# The difference in the two models is small. - -# %% -np.max( - np.abs( - hark_agent.solution[0].cFunc(mVec) - - agent.solution[0].policy["cNrm"].interp({"mNrm": mVec}) - ) -) - -# %% [markdown] -# ### `IndShockLabeledType` -# -# The `IndShockLabeledType` is a model with idiosyncratic shocks to income. The model is equivalent to `IndShockConsumerType` presented below. - -# %% -from HARK.ConsumptionSaving.ConsLabeledModel import IndShockLabeledType - -agent = IndShockLabeledType(cycles=0) -agent.solve() - -# %% -agent.solution[0].policy["cNrm"].plot() - -# %% -from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType - -hark_agent = IndShockConsumerType(cycles=0, BoroCnstArt=None) -hark_agent.solve() - -plot_funcs(hark_agent.solution[0].cFunc, hark_agent.solution[0].mNrmMin - 1, 21) - -# %% [markdown] -# The difference in the two models is small. - -# %% -mgrid = np.linspace(hark_agent.solution[0].mNrmMin, 20) -np.max( - np.abs( - hark_agent.solution[0].cFunc(mgrid) - - agent.solution[0].policy["cNrm"].interp({"mNrm": mgrid}) - ), -) - -# %% [markdown] -# ### RiskyAssetLabeled Type -# -# The `RiskyAssetLabeledType` is a model with idiosyncratic shocks to income and a risky asset. The model is equivalent to `RiskyAssetConsumerType`. - -# %% -from HARK.ConsumptionSaving.ConsLabeledModel import RiskyAssetLabeledType - -agent = RiskyAssetLabeledType(cycles=0) -agent.solve() - -# %% -agent.solution[0].policy["cNrm"].plot() -agent.solution[0].value.dataset["v_inv"].plot() -agent.solution[0].value.dataset["v_der_inv"].plot() - -# %% [markdown] -# ### PortfolioLabeledType -# -# The `PortfolioLabeledType` is a model with idiosyncratic shocks to income and a risky asset and a portfolio choice. The model is equivalent to `PortfolioConsumerType`. First we see the consumption function. - -# %% -from HARK.ConsumptionSaving.ConsLabeledModel import PortfolioLabeledType - -agent = PortfolioLabeledType(cycles=0) -agent.solve() -agent.solution[0].policy["cNrm"].plot() - -# %% [markdown] -# Now we can plot the optimal risky share of portfolio conditional on the initial state of market resources. - -# %% -agent.solution[0].continuation.dataset["stigma"].plot() diff --git a/examples/LifecycleModel/Cycles_tutorial.py b/examples/LifecycleModel/Cycles_tutorial.py deleted file mode 100644 index e6584e41b..000000000 --- a/examples/LifecycleModel/Cycles_tutorial.py +++ /dev/null @@ -1,262 +0,0 @@ -# --- -# jupyter: -# jupytext: -# formats: ipynb,py:percent -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# --- - -# %% [markdown] -# # A number of Life-Cycle Examples in HARK -# -# This notebook will offer a number of examples of solving and simulating life-cycle models using the HARK toolkit. Although it is named as a tutorial, this note should be used more as a reference for the original models that the user would like to build using the toolkit (for example, if you are interested in modeling agents who experience seasonal shocks over an infinite horizon, you should adapt the final exercise to your own setting). - -# %% -# Attempt at combining the imports from both notebooks -- it works! -import matplotlib.pyplot as plt -import numpy as np -import HARK - -from copy import deepcopy -from HARK.ConsumptionSaving.ConsIndShockModel import * -from HARK.utilities import plot_funcs_der, plot_funcs - -mystr = lambda number: "{:.4f}".format(number) - -# %% [markdown] -# ## 1. The model with age-dependent parameters -# -# Recall the deterministic, life-cycle version of the perfect foresight model such that it accomodates age-dependent survival probability and/or income growth. -# -# \begin{eqnarray*} -# V_t(M_t,Y_t) &=& \max_{C_t}~U(C_t) + \beta \pi_t V_{t+1}(M_{t+1},Y_{t+1}), \\ -# & s.t. & \\ -# %A_t &=& M_t - C_t, \\ -# M_{t+1} &=& R (M_{t}-C_{t}) + Y_{t+1}, \\ -# Y_{t+1} &=& \Gamma_{t+1} Y_t, \\ -# \end{eqnarray*} -# -# Where $Y_t$ is an age-dependent income, $\pi_t$ is a survival probability and $\Gamma_{t+1}$ is an income growth rate. Also $\pi_{T+1} =0$. - -# %% [markdown] -# ## 2. Introducing the `cycles` attribute -# -# The `cycles` parameter has a more general use than was presented in the [Quickstart](https://github.com/econ-ark/HARK/blob/master/examples/Journeys/Quickstart_tutorial/Quick_start_with_solution.ipynb) notebook. In general, it tells HARK **how many times the one period consumer's problem needs to be solved with the given set of time-changing parameters.** In our microeconomic case, these parameters are LivPrb and PermGroFac (as the discount factor, the CRRA parameter and risk free interest rate are assumed to be stable). -# -# For both the finite and infinite-horizon settings, **the survival probabilities and income growth rates are different in each period** and consumer **never faces the same parameter's combination.** -# -# So, after specifying the lists of survival probabilities and income growth vectors with T+1 non-zero values, one needs only to set the cycles attribute to match the time horizon of interest (finite or infinite). -# -# The cycles parameter can be used by itself in the solution of the one period consumer's problem with time-changing parameters, or it can be used in conjuction with the `T_cycle` attribute, given an interest in simulating the model. As you will see in the examples towards the end of this notebook, the use of the T_cycle parameter is more straightforward. - -# %% [markdown] -# ## 3. Examples (solution only) -# -# ### 3.1 Finite horizon, life-cycle problem -# -# First, consider a finite-horizon life cycle example. We create an agent-type object with maximal lifespan set to 11, and decreasing survival probability and inverse u-shape income dynamics up to period 6 (the consumer retired). Notice that this is acheived by the list of parameters in the code block below. - -# %% -LifeCycle_dictionary = { - "CRRA": 2.0, - "Rfree": 1.04, - "DiscFac": 0.98, - "LivPrb": [0.99, 0.98, 0.97, 0.96, 0.95, 0.94, 0.93, 0.92, 0.91, 0.90], - "PermGroFac": [1.01, 1.01, 1.01, 1.02, 1.00, 0.99, 0.5, 1.0, 1.0, 1.0], - "cycles": 1, -} - -LC_agent = PerfForesightConsumerType(**LifeCycle_dictionary) - -# %% [markdown] -# To solve the model, we use the $\texttt{solve}$ method method finds value and consumption function. In case of $\Gamma_t \neq 1.0$, these functions are defined on the **normalized by $Y_t$** space of the cash-in hands arguments. It is important to remember that, when you will plot them. - -# %% -LC_agent.solve() - -# %% -LC_agent.unpack("cFunc") - -min_v = min(LC_agent.solution[t].mNrmMin for t in range(11)) -max_v = -min_v -print("Consumption functions") -plot_funcs(LC_agent.cFunc[:], min_v, max_v) - -# %% [markdown] -# ### 3.2 Infinite horizon, cyclical problem -# -# Next, consider an infinite horizon, life-cycle problem in the following setting: the consumer lives with certainty but, her income perform cycles. During each cycles she experience two periods of the income increase and two of the decrease. -# -# We assume that for each cycle the income growth factors are: [1.05,1.1, 0.95, 0.92]. In each of the periods, we also assume that survival probability is 1. The rest of the parameters is the same as in the previous example. -# -# Below, we create a HARK agent-type object with the described above income cycles and solve the model. Notice that the cycles parameter is set to $0$ in this case. - -# %% -Cyc_dictionary = { - "CRRA": 2.0, - "Rfree": 1.03, - "DiscFac": 0.96, - "LivPrb": [1.05, 1.1, 0.95, 0.92], - "PermGroFac": 4 * [1.0], - "cycles": 0, -} - -Cyc_agent = PerfForesightConsumerType(**Cyc_dictionary) -Cyc_agent.solve() - -# %% [markdown] -# ## 4. Examples (simulation and solution) -# -# ### 4. 1 An aside on the `T_cycles` attribute -# -# When moving on to the simulation step in the model, the `T_cycles` parameter will need to be defined appropraitely. The best way to understand and implement this, again, is through adapting the following examples to your own liking. -# -# However, as a general rule of thumb: *the value of the `T_cycles` parameter should match the number of periods to be solved in the agent's life-cycle problem.* - -# %% [markdown] -# ### 4.2 Finite horizon, life-cycle problem -# -# Here, we consider a more advanced example using the ConsIndConsumerType class of the toolkit. Suppose again that we want to represent consumers of this class with a *lifecycle*-- parameter values that differ by age, with a finite end point beyond which the individual cannot surive. This can be done very easily by simply specifying the time-varying attributes $\texttt{PermGroFac}$, $\texttt{LivPrb}$, $\texttt{PermShkStd}$, and $\texttt{TranShkStd}$ as Python *lists* specifying the sequence of periods these agents will experience, from beginning to end. -# -# In the cell below, we define a parameter dictionary for a rather short ten period lifecycle, with arbitrarily chosen parameters. For a more realistically calibrated (and much longer) lifecycle model, see the [SolvingMicroDSOPs REMARK](https://github.com/econ-ark/REMARK/blob/master/REMARKs/SolvingMicroDSOPs.md). - -# %% -LifecycleDict = { # Click arrow to expand this fairly large parameter dictionary - # Parameters shared with the perfect foresight model - "CRRA": 2.0, # Coefficient of relative risk aversion - "Rfree": 1.03, # Interest factor on assets - "DiscFac": 0.96, # Intertemporal discount factor - "LivPrb": [0.99, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], - "PermGroFac": [1.01, 1.01, 1.01, 1.02, 1.02, 1.02, 0.7, 1.0, 1.0, 1.0], - # Parameters that specify the income distribution over the lifecycle - "PermShkStd": [0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0, 0, 0], - "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks - "TranShkStd": [0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0, 0, 0], - "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks - "UnempPrb": 0.05, # Probability of unemployment while working - "IncUnemp": 0.3, # Unemployment benefits replacement rate - "UnempPrbRet": 0.0005, # Probability of "unemployment" while retired - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "T_retire": 7, # Period of retirement (0 --> no retirement) - "tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future) - # Parameters for constructing the "assets above minimum" grid - "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 20, # Maximum end-of-period "assets above minimum" value - "aXtraCount": 48, # Number of points in the base grid of "assets above minimum" - "aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraExtra": [None], # Additional values to add to aXtraGrid - # A few other paramaters - "BoroCnstArt": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets - "vFuncBool": True, # Whether to calculate the value function during solution - "CubicBool": False, # Preference shocks currently only compatible with linear cFunc - "T_cycle": 10, # Number of periods in the cycle for this agent type - # Parameters only used in simulation - "AgentCount": 10000, # Number of agents of this type - "T_sim": 120, # Number of periods to simulate - "aNrmInitMean": -6.0, # Mean of log initial assets - "aNrmInitStd": 1.0, # Standard deviation of log initial assets - "pLvlInitMean": 0.0, # Mean of log initial permanent income - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor - "T_age": 11, # Age after which simulated agents are automatically killed -} - -# %% [markdown] -# In this case, we have specified a ten period model in which retirement happens in period $t=7$. Agents in this model are more likely to die as they age, and their permanent income drops by 30\% at retirement. Let's make and solve this lifecycle example, then look at the $\texttt{solution}$ attribute. - -# %% -LifecycleExample = IndShockConsumerType(**LifecycleDict) -LifecycleExample.cycles = ( - 1 # Make this consumer live a sequence of periods -- a lifetime -- exactly once -) -LifecycleExample.solve() -print("First element of solution is", LifecycleExample.solution[0]) -print("Solution has", len(LifecycleExample.solution), "elements.") - -# %% [markdown] -# This was supposed to be a *ten* period lifecycle model-- why does our consumer type have *eleven* elements in its $\texttt{solution}$? It would be more precise to say that this specification has ten *non-terminal* periods. The solution to the 11th and final period in the model would be the same for every set of parameters: consume $c_t = m_t$, because there is no future. In a lifecycle model, the terminal period is assumed to exist; the $\texttt{LivPrb}$ parameter does not need to end with a $0.0$ in order to guarantee that survivors die. -# -# We can quickly plot the consumption functions in each period of the model: - -# %% -print("Consumption functions across the lifecycle:") -mMin = np.min( - [LifecycleExample.solution[t].mNrmMin for t in range(LifecycleExample.T_cycle)] -) -LifecycleExample.unpack( - "cFunc" -) # This makes all of the cFuncs accessible in the attribute cFunc -plot_funcs(LifecycleExample.cFunc, mMin, 5) - -# %% [markdown] -# ### 4.3 Infinite horizon, cyclical problem -# -# As a final example, we model consumers who face an infinite horizon, but who do *not* face the same problem in every period. Consider someone who works as a ski instructor: they make most of their income for the year in the winter, and make very little money in the other three seasons. -# -# We can represent this type of individual as a four period, infinite horizon model in which expected "permanent" income growth varies greatly across seasons. - -# %% -CyclicalDict = { # Click the arrow to expand this parameter dictionary - # Parameters shared with the perfect foresight model - "CRRA": 2.0, # Coefficient of relative risk aversion - "Rfree": 1.03, # Interest factor on assets - "DiscFac": 0.96, # Intertemporal discount factor - "LivPrb": 4 * [0.98], # Survival probability - "PermGroFac": [1.082251, 2.8, 0.3, 1.1], - # Parameters that specify the income distribution over the lifecycle - "PermShkStd": [0.1, 0.1, 0.1, 0.1], - "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks - "TranShkStd": [0.2, 0.2, 0.2, 0.2], - "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks - "UnempPrb": 0.05, # Probability of unemployment while working - "IncUnemp": 0.3, # Unemployment benefits replacement rate - "UnempPrbRet": 0.0005, # Probability of "unemployment" while retired - "IncUnempRet": 0.0, # "Unemployment" benefits when retired - "T_retire": 0, # Period of retirement (0 --> no retirement) - "tax_rate": 0.0, # Flat income tax rate (legacy parameter, will be removed in future) - # Parameters for constructing the "assets above minimum" grid - "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value - "aXtraMax": 20, # Maximum end-of-period "assets above minimum" value - "aXtraCount": 48, # Number of points in the base grid of "assets above minimum" - "aXtraNestFac": 3, # Exponential nesting factor when constructing "assets above minimum" grid - "aXtraExtra": [None], # Additional values to add to aXtraGrid - # A few other paramaters - "BoroCnstArt": 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets - "vFuncBool": True, # Whether to calculate the value function during solution - "CubicBool": False, # Preference shocks currently only compatible with linear cFunc - "T_cycle": 4, # Number of periods in the cycle for this agent type - # Parameters only used in simulation - "AgentCount": 10000, # Number of agents of this type - "T_sim": 120, # Number of periods to simulate - "aNrmInitMean": -6.0, # Mean of log initial assets - "aNrmInitStd": 1.0, # Standard deviation of log initial assets - "pLvlInitMean": 0.0, # Mean of log initial permanent income - "pLvlInitStd": 0.0, # Standard deviation of log initial permanent income - "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor - "T_age": None, # Age after which simulated agents are automatically killed -} - -# %% [markdown] -# This consumer type's parameter dictionary is nearly identical to the original infinite horizon type we made, except that each of the time-varying parameters now have *four* values, rather than just one. Most of these have the same value in each period *except* for $\texttt{PermGroFac}$, which varies greatly over the four seasons. Note that the product of the four "permanent" income growth factors is almost exactly 1.0-- this type's income does not grow on average in the long run! -# -# Let's make and solve this consumer type, then plot his quarterly consumption functions: - -# %% -CyclicalExample = IndShockConsumerType(**CyclicalDict) -CyclicalExample.cycles = 0 # Make this consumer type have an infinite horizon -CyclicalExample.solve() - -CyclicalExample.unpack("cFunc") -print("Quarterly consumption functions:") -mMin = min([X.mNrmMin for X in CyclicalExample.solution]) -plot_funcs(CyclicalExample.cFunc, mMin, 5) - -# %% [markdown] -# The very low orange consumption function corresponds to the quarter in which the ski instructors make most of their income. They know that they are about to experience a 70% drop in "permanent" income, so they do not consume much *relative to their income this quarter*. In the other three quarters, *normalized* consumption is much higher, as current "permanent" income is low relative to future expectations. In *level*, the consumption chosen in each quarter is much more similar. diff --git a/examples/LifecycleModel/LifecycleModel.py b/examples/LifecycleModel/LifecycleModel.py deleted file mode 100644 index ad244ea03..000000000 --- a/examples/LifecycleModel/LifecycleModel.py +++ /dev/null @@ -1,184 +0,0 @@ -# --- -# jupyter: -# jupytext: -# cell_metadata_filter: collapsed,code_folding -# formats: ipynb,py:percent -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: percent -# format_version: '1.3' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.8.16 -# --- - -# %% [markdown] -# -# # A Lifecycle Model: The Distribution of Assets By Age -# -# National registry data on income and wealth from Scandinavian countries has recently become available (with a lot of security) to some (lucky!) researchers. These data offer a uniquely powerful tool for testing (and improving) our models of consumption and saving behavior over the life cycle. -# -# -# But as of this writing (in March of 2019), the data are so new that there do not seem to be any published attempts to compare the data to the implications a standard life cycle model with income uncertainty, constraints, and other modern features. -# -# This notebook is an example of how one could counstruct a life cycle model with the HARK toolkit that would make predictions about the model analogues of the raw data statistics that are available. -# -# For example, the papers have shown information about the growth rate of assets at different ages over the life cycle. Here, we show how (under a given parameterization) we could produce the life cycle model's prediction about the distribution of assets at age 65 and age 66, and the growth rate between 65 and 66. -# -# The parameters of the model have not been optimized to match features of the Norwegian data; a first step in "structural" estimation would be to calibrate the inputs to the model (like the profile of income over the life cycle, and the magnitude of income shocks), and then to find the values of parameters like the time preference rate that allow the model to fit the data best. -# -# An interesting question is whether this exercise will suggest that it is necessary to allow for _ex ante_ heterogeneity in such preference parameters. -# -# This seems likely; a paper by [Carroll et al (2017)](https://www.econ2.jhu.edu/people/ccarroll/papers/cstwMPC) (all of whose results were constructed using the HARK toolkit) finds that, if all other parameters (e.g., rates of return on savings) are the same, models of this kind require substantial heterogeneity in preferences to generate the degree of inequality in U.S. data. -# -# But in one of the many new and interesting findings from the Norwegian data, [Fagereng et al (2020)](https://onlinelibrary.wiley.com/doi/epdf/10.3982/ECTA14835) have shown that there is substantial heterogeneity in rates of return, even on wealth held in public markets. -# -# [Derin Aksit](https://github.com/econ-ark/cstwMPC-RHetero) has shown that the degree of time preference heterogeneity needed to match observed inequality is considerably less when rate-of-return heterogeneity is calibrated to match these data. - -# %% code_folding=[0] -# Initial imports and notebook setup, click arrow to show - -import HARK.ConsumptionSaving.ConsIndShockModel as Model # The consumption-saving micro model -import EstimationParameters as Params # Parameters for the consumer type and the estimation -from HARK.utilities import plot_funcs_der, plot_funcs # Some tools - -import numpy as np - - -# %% code_folding=[0] -# Set up default values for CRRA, DiscFac, and simulation variables in the dictionary -Params.init_consumer_objects[ - "CRRA" -] = 2.00 # Default coefficient of relative risk aversion (rho) -Params.init_consumer_objects[ - "DiscFac" -] = 0.97 # Default intertemporal discount factor (beta) -Params.init_consumer_objects[ - "PermGroFacAgg" -] = 1.0 # Aggregate permanent income growth factor -Params.init_consumer_objects["aNrmInitMean"] = -10.0 # Mean of log initial assets -Params.init_consumer_objects[ - "aNrmInitStd" -] = 1.0 # Standard deviation of log initial assets -Params.init_consumer_objects[ - "pLvlInitMean" -] = 0.0 # Mean of log initial permanent income -Params.init_consumer_objects[ - "pLvlInitStd" -] = 0.0 # Standard deviation of log initial permanent income - - -# %% -# Make a lifecycle consumer to be used for estimation -LifeCyclePop = Model.IndShockConsumerType(**Params.init_consumer_objects) - - -# %% code_folding=[0] -# Solve and simulate the model (ignore the "warning" message) -LifeCyclePop.solve() # Obtain consumption rules by age -LifeCyclePop.unpack("cFunc") # Expose the consumption rules - -# Which variables do we want to track -LifeCyclePop.track_vars = ["aNrm", "pLvl", "mNrm", "cNrm", "TranShk"] - -LifeCyclePop.T_sim = 120 # Nobody lives to be older than 145 years (=25+120) -LifeCyclePop.initialize_sim() # Construct the age-25 distribution of income and assets -LifeCyclePop.simulate() # Simulate a population behaving according to this model - - -# %% code_folding=[0] -# Plot the consumption functions during working life - -print("Consumption as a function of market resources while working:") -mMin = min([LifeCyclePop.solution[t].mNrmMin for t in range(LifeCyclePop.T_cycle)]) -plot_funcs(LifeCyclePop.cFunc[: LifeCyclePop.T_retire], mMin, 5) - - -# %% code_folding=[0] -# Define the saving rate function -def savingRateFunc(SomeType, m): - """ - Parameters: - ----------- - SomeType: - Agent type that has been solved and simulated. - - - Returns: - -------- - SavingRate: float - - """ - inc = (SomeType.Rfree - 1.0) * (m - 1.0) + 1.0 - cons = SomeType.solution[0].cFunc(m) - Saving = inc - cons - SavingRate = Saving / inc - return SavingRate - - -# %% code_folding=[0] -# Create a Giant matrix gathering useful data: -# 't_now', 'aNrmNow_hist', 'cNrmNow_hist', employment-status in date t, in date t-1, aLvlGro_hist, Saving rate - -w, h = 1, LifeCyclePop.T_cycle -giant_list = [[0 for x in range(w)] for y in range(h)] -SavingRate_list = [] - -import warnings - -warnings.filterwarnings("ignore") # Suppress some disturbing but harmless warnings - -for t in range(1, LifeCyclePop.T_cycle + 1): - # aLvlGro_hist[0] = 0 # set the first growth rate to 0, since there is no data for period 0 - aLvlGroNow = np.log( - LifeCyclePop.history["aNrm"][t] / LifeCyclePop.history["aNrm"][t - 1] - ) # (10000,) - - # Call the saving rate function with test value for - SavingRate = savingRateFunc(LifeCyclePop, LifeCyclePop.history["mNrm"][t]) - - SavingRate_list.append(SavingRate) - - # Create elements of matrix list - matrix_list = [0 for number in range(7)] - matrix_list[0] = t - matrix_list[1] = LifeCyclePop.history["aNrm"][t] - matrix_list[2] = LifeCyclePop.history["cNrm"][t] - matrix_list[3] = LifeCyclePop.history["TranShk"][t] - matrix_list[4] = LifeCyclePop.history["TranShk"][t - 1] - matrix_list[5] = aLvlGroNow - matrix_list[6] = SavingRate - - giant_list[t - 1] = matrix_list - -# Print command disabled to prevent giant print! -# print giant_list - - -# %% code_folding=[0] -# Construct the level of assets A from a*p where a is the ratio to permanent income p -LifeCyclePop.history["aLvl"] = ( - LifeCyclePop.history["aNrm"] * LifeCyclePop.history["pLvl"] -) -aGro41 = LifeCyclePop.history["aLvl"][41] / LifeCyclePop.history["aLvl"][40] -aGro41NoU = aGro41[aGro41[:] > 0.2] # Throw out extreme outliers - - -# %% code_folding=[0] -# Plot the distribution of growth rates of wealth between age 65 and 66 (=25 + 41) -from matplotlib import pyplot as plt - -n, bins, patches = plt.hist(aGro41NoU, 50, density=True)