diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 07997e0a3..c16555ce1 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -15,6 +15,7 @@ from copy import copy, deepcopy import numpy as np +import xarray as xr from scipy import sparse as sp from scipy.optimize import newton @@ -3441,6 +3442,58 @@ def construct_lognormal_income_process_unemployment(self): ) return IncShkDstn, PermShkDstn, TranShkDstn + + def make_state_grid( + self, + PLvlGrid=None, + mNrmGrid=None, + ): + if PLvlGrid is None: + PLvlGrid = np.array([1.0]) + if mNrmGrid is None: + mNrmGrid = np.array([1.0]) + + # Create a mesh + points = np.meshgrid(PLvlGrid, mNrmGrid, indexing="ij") + points = np.stack([x.flatten() for x in points], axis=0) + + mesh = xr.DataArray( + points, + dims=["var", "mesh"], + coords={"var": ["PLvl", "mNrm"]}, + ) + + self.state_grid = xr.Dataset( + data_vars={ + "PLvl": ("mesh", points[0]), + "mNrm": ("mesh", points[1]), + }, + coords={"mesh": np.arange(points.shape[1])}, + attrs={ + "grids": { + "PLvl": PLvlGrid, + "mNrm": mNrmGrid, + }, + "mesh_order": ["PLvl", "mNrm"], + }, + ) + + def state_to_state_trans(self, shocks_next, solution, state, PermGroFac, Rfree): + + # Consumption + cNrm = solution.cFunc(state["mNrm"]) + # Savings + aNrm = state["mNrm"] - cNrm + + # Shock realization + PermGroShk = shocks_next["PermShk"] * PermGroFac + PLvl_next = state["PLvl"] * PermGroShk + mNrm_next = aNrm * Rfree / PermGroShk + shocks_next["TranShk"] + + return { + "PLvl": PLvl_next, + "mNrm": mNrm_next, + } class LognormPermIncShk(DiscreteDistribution): diff --git a/HARK/ConsumptionSaving/ConsPortfolioModel.py b/HARK/ConsumptionSaving/ConsPortfolioModel.py index 731f603de..b19802dcd 100644 --- a/HARK/ConsumptionSaving/ConsPortfolioModel.py +++ b/HARK/ConsumptionSaving/ConsPortfolioModel.py @@ -28,7 +28,13 @@ MargValueFuncCRRA, ValueFuncCRRA, ) +from HARK.distribution import ( + combine_indep_dstns, + DiscreteDistribution, + DiscreteDistributionLabeled, +) from HARK.metric import MetricObject +import xarray as xr # Define a class to represent the single period solution of the portfolio choice problem @@ -145,6 +151,20 @@ def __init__( self.EndOfPrddvds_fxd = EndOfPrddvds_fxd self.AdjPrb = AdjPrb + def cFunc(self, mNrm, Share, Adjust): + cNrm = xr.full_like(mNrm, np.nan) + cNrm[Adjust] = self.cFuncAdj(mNrm[Adjust]) + no_adj = ~Adjust + cNrm[no_adj] = self.cFuncFxd(mNrm[no_adj], Share[no_adj]) + return cNrm + + def ShareFunc(self, mNrm, Share, Adjust): + ShareNext = xr.full_like(mNrm, np.nan) + ShareNext[Adjust] = self.ShareFuncAdj(mNrm[Adjust]) + no_adj = ~Adjust + ShareNext[no_adj] = self.ShareFuncFxd(mNrm[no_adj], Share[no_adj]) + return ShareNext + class PortfolioConsumerType(RiskyAssetConsumerType): """ @@ -316,6 +336,114 @@ def get_controls(self): self.controls["cNrm"] = cNrmNow self.controls["Share"] = ShareNow + def shock_dstn_engine(self, ShockDstn, AdjustPrb): + """ + Creates a joint labeled distribution of all the shocks in the model + """ + + # ShockDstn is created by RiskyAssetConsumerType and it contains, in order: + # PermShk, TranShk, and Risky + + # Create a distribution for the Adjust shock. + # TODO: self.AdjustDstn already exists, but it is a FrozenDist type of object + # that does not work with combine_indep_dstns. This should be fixed. + if AdjustPrb < 1.0: + AdjustDstn = DiscreteDistribution( + np.array([1.0 - AdjustPrb, AdjustPrb]), [False, True] + ) + else: + AdjustDstn = DiscreteDistribution(np.array([1.0]), [True]) + + LabeledShkDstn = DiscreteDistributionLabeled.from_unlabeled( + dist=combine_indep_dstns(ShockDstn, AdjustDstn), + name="Full shock distribution", + var_names=["PermShk", "TranShk", "Risky", "Adjust"], + ) + + return LabeledShkDstn + + def make_state_grid( + self, + PLvlGrid=None, + mNrmGrid=None, + ShareGrid=None, + AdjustGrid=None, + ): + if PLvlGrid is None: + PLvlGrid = np.array([1.0]) + if mNrmGrid is None: + mNrmGrid = np.array([1.0]) + if ShareGrid is None: + ShareGrid = np.array([1.0]) + if AdjustGrid is None: + AdjustGrid = np.array([True]) + + # Create a mesh + points = np.meshgrid(PLvlGrid, mNrmGrid, ShareGrid, AdjustGrid, indexing="ij") + points = np.stack([x.flatten() for x in points], axis=0) + + mesh = xr.DataArray( + points, + dims=["var", "mesh"], + coords={"var": ["PLvl", "mNrm", "Share", "Adjust"]}, + ) + + self.state_grid = xr.Dataset( + data_vars={ + "PLvl": ("mesh", points[0]), + "mNrm": ("mesh", points[1]), + "Share": ("mesh", points[2]), + "Adjust": ("mesh", points[3].astype(bool)), + }, + coords={"mesh": np.arange(points.shape[1])}, + attrs={ + "grids": { + "PLvl": PLvlGrid, + "mNrm": mNrmGrid, + "Share": ShareGrid, + "Adjust": AdjustGrid, + }, + "mesh_order": ["PLvl", "mNrm", "Share", "Adjust"], + }, + ) + + def state_to_state_trans(self, shocks_next, solution, state, PermGroFac, Rfree): + # Consumption + cNrm = solution.cFunc(state["mNrm"], state["Share"], state["Adjust"]) + # Savings + aNrm = state["mNrm"] - cNrm + # Share + Share_next = solution.ShareFunc(state["mNrm"], state["Share"], state["Adjust"]) + # Shock transition + state_next = post_state_transition( + shocks_next, + state["PLvl"], + aNrm, + Share_next, + PermGroFac, + Rfree, + ) + + return state_next + + +def post_state_transition(shocks_next, PLvl, aNrm, Share_next, PermGroFac, Rfree): + PermGroShk = shocks_next["PermShk"] * PermGroFac + PLvl_next = PLvl * PermGroShk + Rport = Rfree + Share_next * (shocks_next["Risky"] - Rfree) + mNrm_next = aNrm * Rport / PermGroShk + shocks_next["TranShk"] + + # Augment dimensions if needed + Share_next = Share_next * xr.ones_like(PLvl_next) + Adjust_next = shocks_next["Adjust"] * xr.ones_like(PLvl_next, dtype=bool) + + return { + "PLvl": PLvl_next, + "mNrm": mNrm_next, + "Share": Share_next, + "Adjust": Adjust_next, + } + class SequentialPortfolioConsumerType(PortfolioConsumerType): def __init__(self, verbose=False, quiet=False, **kwds): diff --git a/HARK/ConsumptionSaving/tests/test_ConsPortfolioModel.py b/HARK/ConsumptionSaving/tests/test_ConsPortfolioModel.py index 96b047a60..10dc99706 100644 --- a/HARK/ConsumptionSaving/tests/test_ConsPortfolioModel.py +++ b/HARK/ConsumptionSaving/tests/test_ConsPortfolioModel.py @@ -5,6 +5,8 @@ import HARK.ConsumptionSaving.ConsPortfolioModel as cpm from HARK import make_one_period_oo_solver from HARK.tests import HARK_PRECISION +from copy import copy +from HARK.distribution import DiscreteDistributionLabeled class PortfolioConsumerTypeTestCase(unittest.TestCase): @@ -304,3 +306,104 @@ def test_draws(self): # Adjust self.assertTrue(np.all(Adjust_draws[t_age == 1] == 0)) self.assertTrue(np.all(Adjust_draws[t_age == 2] == 1)) + + +from HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle +from HARK.ConsumptionSaving.ConsRiskyAssetModel import risky_asset_parms + + +class test_transition_mat(unittest.TestCase): + def setUp(self): + # Define some default newborn distribution over all states + self.newborn_dstn = DiscreteDistributionLabeled( + pmv=np.array([1.0]), + atoms=np.array([[1.0], [1.0], [0.5], [1.0]]), + var_names=["PLvl", "mNrm", "Share", "Adjust"], + ) + + def test_LC(self): + # Low number of points, else RAM reqs are high + npoints = 50 + + # Create an lc agent + lc_pars = copy(init_lifecycle) + lc_pars.update(risky_asset_parms) + lc_pars["DiscFac"] = 0.9 + agent = cpm.PortfolioConsumerType(**lc_pars) + agent.solve() + + # Make shock distribution and grid + agent.make_shock_distributions() + agent.make_state_grid( + PLvlGrid=None, + mNrmGrid=np.linspace(0, 20, npoints), + ShareGrid=None, + AdjustGrid=None, + ) + # Solve + agent.solve() + # Check that it is indeed an LC model + assert len(agent.solution) > 10 + + # Get transition matrices + agent.find_transition_matrices(newborn_dstn=self.newborn_dstn) + assert len(agent.solution) - 1 == len(agent.trans_mat.living_transitions) + + # Check the bruteforce representation that treats age as a state. + full_mat = agent.trans_mat.get_full_tmat() + # Rows of valid transition matrix sum to 1.0 + self.assertTrue(np.allclose(np.sum(full_mat, 1), 1.0)) + + # Check iterating distributions forward + + # Set an initial distribution where everyone starts at the youngest age, + # in the first gridpoint. + dstn = np.zeros((npoints, len(agent.trans_mat.living_transitions))) + dstn[0, 0] = 1.0 + # Find steady_state + ss_dstn = agent.trans_mat.find_steady_state_dstn(dstn_init=dstn, max_iter=1e4) + + def test_adjust(self): + # Create agent + npoints = 5 + agent = cpm.PortfolioConsumerType(**cpm.init_portfolio) + agent.make_shock_distributions() + agent.make_state_grid( + PLvlGrid=None, + mNrmGrid=np.linspace(0, 10, npoints), + ShareGrid=None, + AdjustGrid=None, + ) + agent.solve() + agent.find_transition_matrices(newborn_dstn=self.newborn_dstn) + self.assertTrue( + agent.trans_mat.living_transitions[0].size == np.power(npoints, 2) + ) + + def test_calvo(self): + # Create agent that has some chance of not being able to + # adjust + params = copy(cpm.init_portfolio) + params["AdjustPrb"] = 0.5 + + agent = cpm.PortfolioConsumerType(**params) + agent.make_shock_distributions() + # Share and adjust become states, so we need grids for them + agent.make_state_grid( + PLvlGrid=None, + mNrmGrid=np.linspace(0, 30, 50), + ShareGrid=np.linspace(0, 1, 10), + AdjustGrid=np.array([False, True]), + ) + agent.solve() + agent.find_transition_matrices(newborn_dstn=self.newborn_dstn) + self.assertTrue( + agent.trans_mat.living_transitions[0].size == np.power(50 * 10 * 2, 2) + ) + + # Check that we can simulate it + dstn = np.zeros((len(agent.state_grid.coords["mesh"]), 1)) + dstn[0, 0] = 1.0 + + for _ in range(1000): + dstn = agent.trans_mat.iterate_dstn_forward(dstn) diff --git a/HARK/ConsumptionSaving/tests/test_IndShockConsumerType.py b/HARK/ConsumptionSaving/tests/test_IndShockConsumerType.py index 92923bd1d..b73b3a7d3 100644 --- a/HARK/ConsumptionSaving/tests/test_IndShockConsumerType.py +++ b/HARK/ConsumptionSaving/tests/test_IndShockConsumerType.py @@ -3,6 +3,7 @@ import numpy as np +from HARK.distribution import DiscreteDistributionLabeled from HARK.ConsumptionSaving.ConsIndShockModel import ( ConsIndShockSolverBasic, IndShockConsumerType, @@ -922,3 +923,76 @@ def test_calc_jacobian(self): self.assertAlmostEqual(CJAC_Perm.T[30][29], -0.06120, places=HARK_PRECISION) self.assertAlmostEqual(CJAC_Perm.T[30][30], 0.05307, places=HARK_PRECISION) self.assertAlmostEqual(CJAC_Perm.T[30][31], 0.04674, places=HARK_PRECISION) + + +# %% Compare with newer transition matrix methods + + +class test_compare_trans_mats(unittest.TestCase): + def setUp(self): + # Grid + self.m_grid = np.linspace(0, 20, 10) + + # Newborn distribution. Will's method assumes everyone starts + # at m=1. + self.newborn_dstn = DiscreteDistributionLabeled( + pmv=np.array([1.0]), + atoms=np.array([[1.0], [1.0]]), + var_names=["PLvl", "mNrm"], + ) + + def test_compare_will_mateo(self): + # Create and solve agents + will = IndShockConsumerType(**dict_harmenberg) + mateo = IndShockConsumerType(**dict_harmenberg) + for agent in [will, mateo]: + agent.cycles = 0 + agent.solve() + # Activate harmenberg + agent.neutral_measure = True + agent.update_income_process() + + # %% 1. Transition matrices + + # Define grids (Will) + will.define_distribution_grid() + m_grid = will.dist_mGrid + # Define grids and shock distributions (Mateo) + mateo.make_state_grid(mNrmGrid=m_grid) + mateo.full_shock_dstns = mateo.IncShkDstn + + # Calculate transition matrix (Will) + will.calc_transition_matrix() + tm_will = will.tran_matrix + # Calculate transition matrix (Mateo) + mateo.find_transition_matrices(newborn_dstn=self.newborn_dstn) + tm_mateo = mateo.trans_mat.get_full_tmat() + # Compare + self.assertTrue(np.allclose(tm_will, tm_mateo.T, atol=1e-10)) + + # %% 2. Ergodic distributions + + # Steady state distribution (Will) + will.calc_ergodic_dist() + will_ss = will.vec_erg_dstn + # Steady state distribution (Mateo) + mateo_ss = mateo.trans_mat.find_steady_state_dstn(tol=1e-14, max_iter=1e4) + # Compare + self.assertTrue(np.allclose(will_ss, mateo_ss, atol=1e-14)) + + # Outcome grids (Will) + a_points_will = will.aPol_Grid + c_points_will = will.cPol_Grid + # Outcome grids (Mateo) + out_fns = { + "c": lambda solution, mNrm: solution.cFunc(mNrm), + "a": lambda solution, mNrm: mNrm - solution.cFunc(mNrm), + } + points_mateo = mateo.eval_outcomes_on_mesh(out_fns) + # Compare + self.assertTrue( + np.allclose(a_points_will, points_mateo["a"].flatten(), atol=1e-14) + ) + self.assertTrue( + np.allclose(c_points_will, points_mateo["c"].flatten(), atol=1e-14) + ) diff --git a/HARK/core.py b/HARK/core.py index 3149ebf0d..2f43a2a7a 100644 --- a/HARK/core.py +++ b/HARK/core.py @@ -16,6 +16,7 @@ import numpy as np import pandas as pd from xarray import DataArray +from inspect import signature from HARK.distribution import ( Distribution, @@ -26,6 +27,8 @@ from HARK.parallel import multi_thread_commands, multi_thread_commands_fake from HARK.utilities import NullFunc, get_arg_names +from HARK.mat_methods import mass_to_grid, transition_mat + class Model: """ @@ -850,6 +853,176 @@ def clear_history(self): self.history[var_name] = np.empty((self.T_sim, self.AgentCount)) self.history[var_name].fill(np.nan) + def make_shock_distributions(self): + # Calculate number of periods per cycle, defaults to 1 if all variables are time invariant + if len(self.time_vary) > 0: + # name = agent.time_vary[0] + # T = len(eval('agent.' + name)) + T = len(self.__dict__[self.time_vary[0]]) + else: + T = 1 + + dstn_dict = {parameter: self.__dict__[parameter] for parameter in self.time_inv} + dstn_dict.update({parameter: None for parameter in self.time_vary}) + + if hasattr(self.shock_dstn_engine, "dstn_args"): + these_args = self.shock_dstn_engine.dstn_args + else: + these_args = get_arg_names(self.shock_dstn_engine) + + these_args = tuple(filter(lambda x: x != "self", these_args)) + + # Initialize the list of shock distributions for this cycle, + # then iterate on periods + shock_dstns = [] + + cycles_range = [0] + list(range(T - 1, 0, -1)) + for k in range(T - 1, -1, -1) if self.cycles == 1 else cycles_range: + # Update time-varying single period inputs + for name in self.time_vary: + if name in these_args: + dstn_dict[name] = self.__dict__[name][k] + + # Make a temporary dictionary for this period + temp_dict = {name: dstn_dict[name] for name in these_args} + + # Construct this period's shock distribution one period + # Add it to the solution, and move to the next period + dstn_t = self.shock_dstn_engine(**temp_dict) + shock_dstns.insert(0, dstn_t) + + # Save list of shock distributions + self.full_shock_dstns = shock_dstns + + def eval_outcomes_on_mesh(self, outcomes): + # TODO: stuff that depends on time varying parameters + + T = len(self.solution) + M = len(self.state_grid.coords["mesh"]) + + outcome_mesh = {} + + for out_name, out_fn in outcomes.items(): + # Initialize + outcome_mesh[out_name] = np.zeros((M, T)) + + # Parse args + args = list(signature(out_fn).parameters) + if len(args) > 0 and args[0] == "solution": + uses_sol = True + state_args = args[1:] + else: + uses_sol = False + state_args = args + + if uses_sol: + for t in range(T): + outcome_mesh[out_name][:, t] = out_fn( + *([self.solution[t]] + [self.state_grid[x] for x in state_args]) + ) + else: + # TODO: this could change with time-varying parameters + for t in range(T): + outcome_mesh[out_name][:, t] = out_fn( + *[self.state_grid[x] for x in state_args] + ) + + return outcome_mesh + + def find_transition_matrices(self, newborn_dstn=None): + # Calculate number of periods per cycle, defaults to 1 if all variables are time invariant + if len(self.time_vary) > 0: + # name = agent.time_vary[0] + # T = len(eval('agent.' + name)) + T = len(self.__dict__[self.time_vary[0]]) + else: + T = 1 + + trans_dict = { + parameter: self.__dict__[parameter] for parameter in self.time_inv + } + trans_dict.update({parameter: None for parameter in self.time_vary}) + + if hasattr(self.state_to_state_trans, "trans_args"): + these_args = self.state_to_state_trans.trans_args + else: + these_args = get_arg_names(self.state_to_state_trans) + + exclude_args = ["self", "shocks_next", "state", "solution"] + these_args = tuple(filter(lambda x: x not in exclude_args, these_args)) + + # Extract state grid data + grids = {} + for x in self.state_grid.attrs["mesh_order"]: + grids[x] = self.state_grid.attrs["grids"][x].astype(float) + + # Find names and values of non-trivial grids + nt_states = [x for x, grid in grids.items() if grid.size > 1] + nt_grids = tuple(grids[x] for x in nt_states) + + # Number of points in full grid + mesh_size = self.state_grid.coords["mesh"].size + + # Find newborn distribution + if newborn_dstn is not None: + nb_points = newborn_dstn.dataset[nt_states].to_array().values.T + nb_pmv = newborn_dstn.probability.values + + newborn_mass = mass_to_grid( + points=nb_points, + mass=nb_pmv, + grids=nt_grids, + ) + else: + newborn_mass = None + + # Initialize the list of matrices conditional on survival + surv_mats = [] + + cycles_range = [0] + list(range(T - 1, 0, -1)) + for k in range(T - 1, -1, -1) if self.cycles == 1 else cycles_range: + # Update time-varying single period inputs + for name in self.time_vary: + if name in these_args: + trans_dict[name] = self.__dict__[name][k] + + # Make a temporary dictionary for this period + temp_dict = {name: trans_dict[name] for name in these_args} + + shock_dstn = self.full_shock_dstns[k] + + def trans_wrapper(shocks_next, solution, state_points): + return self.state_to_state_trans( + shocks_next, solution, state_points, **temp_dict + ) + + state_dstn = shock_dstn.dist_of_func( + trans_wrapper, solution=self.solution[k], state_points=self.state_grid + ) + + state_points = state_dstn.dataset[nt_states].to_array().values + pmv = state_dstn.probability.values + + # Construct transition matrix from the object above + tmat = np.zeros((mesh_size, mesh_size)) + for i in range(mesh_size): + tmat[i, :] = mass_to_grid( + points=state_points[:, i, :].T, + mass=pmv, + grids=nt_grids, + ) + + # Prepend + surv_mats.insert(0, tmat) + + # Save matrices + self.trans_mat = transition_mat( + living_transitions=surv_mats, + surv_probs=self.LivPrb, + newborn_dstn=newborn_mass, + life_cycle=T > 1, + ) + def solve_agent(agent, verbose): """ diff --git a/HARK/mat_methods.py b/HARK/mat_methods.py index abe2cb4fd..4e7983e85 100644 --- a/HARK/mat_methods.py +++ b/HARK/mat_methods.py @@ -254,3 +254,198 @@ def mass_to_grid( distr = sum_weights(weights, dims, add_inds) return distr + + +class transition_mat: + def __init__( + self, + living_transitions: list, + surv_probs: list, + newborn_dstn: np.ndarray, + life_cycle: bool, + ) -> None: + self.living_transitions = living_transitions + self.surv_probs = surv_probs + self.newborn_dstn = newborn_dstn + self.life_cycle = life_cycle + + if self.life_cycle: + assert len(self.living_transitions) == len( + self.surv_probs + ), "living_transitions must be a list of length len(surv_probs) + 1 if life_cycle is True" + else: + assert ( + len(self.living_transitions) == 1 + ), "living_transitions must be a list of length 1 if life_cycle is False" + assert ( + len(self.surv_probs) == 1 + ), "surv_probs must be a list of length 1 if life_cycle is False" + + self.T = len(self.living_transitions) + 1 + + self.grid_len = self.living_transitions[0].shape[0] + + def get_full_tmat(self): + if self.life_cycle: + # Life cycle + dim = self.T * self.grid_len + full_mat = np.zeros((dim, dim)) + for k in range(self.T - 1): + row_init = k * self.grid_len + row_end = row_init + self.grid_len + # Living-to-newborn + full_mat[row_init:row_end, : self.grid_len] += ( + 1 - self.surv_probs[k] + ) * self.newborn_dstn[np.newaxis, :] + # Living-to-age+1 + col_init = row_init + self.grid_len + col_end = col_init + self.grid_len + full_mat[row_init:row_end, col_init:col_end] += ( + self.surv_probs[k] * self.living_transitions[k] + ) + + # In at the end of the last age, everyone turns into a newborn + full_mat[ + (self.T - 1) * self.grid_len :, : self.grid_len + ] += self.newborn_dstn[np.newaxis, :] + + else: + # Infinite horizon + full_mat = ( + self.surv_probs[0] * self.living_transitions[0] + + (1 - self.surv_probs[0]) * self.newborn_dstn[np.newaxis, :] + ) + + return full_mat + + def post_multiply(self, mat): + # Check dimension compatibility + n_rows, n_cols = mat.shape + if self.life_cycle: + ncols_fullmat = self.T * self.grid_len + else: + ncols_fullmat = self.grid_len + + if n_rows != ncols_fullmat: + raise Exception( + "Matrix has {} rows, but should have {}".format(n_rows, ncols_fullmat) + ) + + if self.life_cycle: + full_mat_dim = self.T * self.grid_len + prod = np.zeros((full_mat_dim, n_cols)) + + for k in range(self.T): + row_init = k * self.grid_len + row_end = row_init + self.grid_len + if k < self.T - 1: + sp = self.surv_probs[k] + else: + sp = 0.0 + + for j in range(n_cols): + # From the newborn dstn + prod[row_init:row_end, j] += (1 - sp) * np.dot( + self.newborn_dstn[np.newaxis, :], mat[: self.grid_len, j] + ) + if k < self.T - 1: + # From the living dstn + prod[row_init:row_end, j] += sp * np.dot( + self.living_transitions[k], + mat[row_end : (row_end + self.grid_len), j], + ) + + return prod + + else: + # Infinite horizon + prod = self.surv_probs[0] * np.dot(self.living_transitions[0], mat) + prod += (1 - self.surv_probs[0]) * np.dot( + self.newborn_dstn[np.newaxis, :], mat + ) + return prod + + def pre_multiply(self, mat): + # Check dimension compatibility + n_rows, n_cols = mat.shape + if self.life_cycle: + nrows_fullmat = self.T * self.grid_len + else: + nrows_fullmat = self.grid_len + + if n_cols != nrows_fullmat: + raise Exception( + "Matrix has {} cols, but should have {}".format(n_cols, nrows_fullmat) + ) + + if self.life_cycle: + full_mat_dim = self.T * self.grid_len + prod = np.zeros((n_rows, nrows_fullmat)) + + for k in range(self.T): + col_init = k * self.grid_len + col_end = col_init + self.grid_len + if k < self.T - 1: + sp = self.surv_probs[k] + else: + sp = 0.0 + + # Newborns contribute to first block of + # cols + nb_mat = np.tile(self.newborn_dstn, (self.grid_len, 1)) + prod[:, : self.grid_len] += (1 - sp) * np.dot( + mat[:, col_init:col_end], nb_mat + ) + # Living contribute to other columns + if k < self.T - 1: + prod[:, col_end : (col_end + self.grid_len)] += sp * np.dot( + mat[:, col_init:col_end], self.living_transitions[k] + ) + + return prod + + else: + # Infinite horizon + prod = np.dot( + mat, + self.surv_probs[0] * self.living_transitions[0] + + (1 - self.surv_probs[0]) * self.newborn_dstn[np.newaxis, :], + ) + + return prod + + def iterate_dstn_forward(self, dstn_init: np.ndarray) -> np.ndarray: + dstn_final = self.pre_multiply(dstn_init.T).T + + return dstn_final + + def find_steady_state_dstn( + self, + dstn_init=None, + tol=1e-10, + max_iter=1000, + check_every=10, + normalize_every=20, + ): + if dstn_init is None: + # Create an initial distribution that concentrates + # on the first gridpoint of the first age + dstn_init = dstn = np.zeros( + (len(self.newborn_dstn), len(self.living_transitions)) + ) + dstn[0, 0] = 1.0 + + # Initialize + dstn = dstn_init + err = tol + 1 + i = 0 + while err > tol and i < max_iter: + dstn_new = self.iterate_dstn_forward(dstn) + if i % normalize_every == 0: + dstn_new /= np.sum(dstn_new) + if i % check_every == 0: + err = np.max(np.abs(dstn_new - dstn)) + dstn = dstn_new + i += 1 + + return dstn diff --git a/HARK/tests/test_mat_methods.py b/HARK/tests/test_mat_methods.py index 7e5b84374..69b7a12f2 100644 --- a/HARK/tests/test_mat_methods.py +++ b/HARK/tests/test_mat_methods.py @@ -1,7 +1,7 @@ import unittest import numpy as np from HARK.utilities import jump_to_grid_1D, jump_to_grid_2D -from HARK.mat_methods import mass_to_grid +from HARK.mat_methods import mass_to_grid, transition_mat # Compare general mass_to_grid with jump_to_grid_1D @@ -85,3 +85,68 @@ def test_simple_3d(self): self.assertTrue(grid_mass[1, 0, 1] == 1 / 8) self.assertTrue(grid_mass[1, 1, 0] == 1 / 8) self.assertTrue(grid_mass[1, 1, 1] == (1 / 8 + 1.0)) + + +class TestTransMatMultiplication(unittest.TestCase): + def setUp(self): + # Create dummy 2-gridpoint problems + + # A newborn "distribution" + self.newborn_dstn = np.array([0.5, 0.3, 0.2]) + + # Infinite horizon transition + inf_h_trans = np.array([[0.9, 0.1, 0.0], [0.1, 0.8, 0.1], [0.0, 0.1, 0.9]]) + self.inf_horizon_mat = transition_mat( + living_transitions=[inf_h_trans], + surv_probs=[0.9], + newborn_dstn=self.newborn_dstn, + life_cycle=False, + ) + + # Life cycle transition + lc_trans = [ + np.array( + [ + [x, 1 - x, 0], + [x / 2, 1 - x / 2, 0], + [0, 1 - x, x], + ] + ) + for x in [0.1, 0.2, 0.3, 0.4, 0.5] + ] + self.life_cycle_mat = transition_mat( + living_transitions=lc_trans, + surv_probs=[0.95, 0.93, 0.92, 0.91, 0.90], + newborn_dstn=self.newborn_dstn, + life_cycle=True, + ) + + def test_post_multiply(self): + # Infinite horizon + nrows = self.inf_horizon_mat.grid_len + mat = np.random.rand(nrows, 3) + res = self.inf_horizon_mat.post_multiply(mat) + res2 = np.dot(self.inf_horizon_mat.get_full_tmat(), mat) + self.assertTrue(np.allclose(res, res2)) + + # Life cycle + nrows = self.life_cycle_mat.grid_len * self.life_cycle_mat.T + mat = np.random.rand(nrows, 3) + res = self.life_cycle_mat.post_multiply(mat) + res2 = np.dot(self.life_cycle_mat.get_full_tmat(), mat) + self.assertTrue(np.allclose(res, res2)) + + def test_pre_multiply(self): + # Infinite horizon + ncols = self.inf_horizon_mat.grid_len + mat = np.random.rand(3, ncols) + res = self.inf_horizon_mat.pre_multiply(mat) + res2 = np.dot(mat, self.inf_horizon_mat.get_full_tmat()) + self.assertTrue(np.allclose(res, res2)) + + # Life cycle + ncols = self.life_cycle_mat.grid_len * self.life_cycle_mat.T + mat = np.random.rand(3, ncols) + res = self.life_cycle_mat.pre_multiply(mat) + res2 = np.dot(mat, self.life_cycle_mat.get_full_tmat()) + self.assertTrue(np.allclose(res, res2))