diff --git a/Resources/LaTeXInputs/local-macros.tex b/Resources/LaTeXInputs/local-macros.tex deleted file mode 100644 index 4f8fb703e..000000000 --- a/Resources/LaTeXInputs/local-macros.tex +++ /dev/null @@ -1,251 +0,0 @@ -% Define macros for this paper -% Only works after \input{./.econtexRoot} -\ProvidesPackage{SolvingMicroDSOPs-private} - -\renewcommand{\econtexRoot}{.} -\renewcommand{\packages}{\econtexRoot/Resources/texmf-local/tex/latex} -\renewcommand{\econtex}{\packages/econtex} -\renewcommand{\econark}{\econtexRoot/Resources/texmf-local/tex/latex/econark} -\renewcommand{\econtexSetup}{\econtexRoot/Resources/texmf-local/tex/latex/econtexSetup} -\renewcommand{\econtexShortcuts}{\econtexRoot/Resources/texmf-local/tex/latex/econtexShortcuts} -\renewcommand{\econtexBibMake}{\econtexRoot/Resources/texmf-local/tex/latex/econtexBibMake} -\renewcommand{\econtexBibStyle}{\econtexRoot/Resources/texmf-local/bibtex/bst/econtex} -\renewcommand{\LaTeXInputs}{\econtexRoot/Resources/LaTeXInputs} -\renewcommand{\local}{\LaTeXInputs/local} - -\providecommand{\onlyinsubfile}{}\renewcommand{\onlyinsubfile}[1]{#1} -\providecommand{\notinsubfile}{}\renewcommand{\notinsubfile}[1]{} - -\providecommand{\aboveMin}{}\renewcommand{\aboveMin}{\blacktriangle} -\providecommand{\chiFunc}{}\providecommand{\chiFunc}{\pmb{\chi}} - -% version for urls - [userGitHubID] or econ-ark - -\newcommand{\Arrival}{Arrival}\newcommand{\arvl}{\leftarrow} -\newcommand{\Decision}{Decision}\newcommand{\dcsn}{\null}%{\sim} -\newcommand{\Continuation}{Continuation}\newcommand{\cntn}{\rightarrow}%{\rightharpoonup} - -% providecommand then renewcommand works to redefine it -% if it already exists - -\providecommand{\prdNxt}{}\renewcommand{\prdNxt}{\prdt+1} -\providecommand{\prdLst}{}\renewcommand{\prdLst}{\prdt-1} -\providecommand{\prdNxT}{}\renewcommand{\prdNxT}{\prdT+1} -\providecommand{\prdLsT}{}\renewcommand{\prdLsT}{\prdT-1} - -\providecommand{\BegMark}{}\renewcommand{\BegMark}{\arvl} -\providecommand{\BegStp}{}\renewcommand{\BegStp}{_\arvl} -\providecommand{\BegStpNxt}{}\renewcommand{\BegStpNxt}{_\arvl{(+)}} -\providecommand{\BegStpLst}{}\renewcommand{\BegStpLst}{_\arvl{(-)}} - -\providecommand{\BegPrd}{}\renewcommand{\BegPrd}{_\arvl{\prd}} -\providecommand{\BegPrdNxt}{}\renewcommand{\BegPrdNxt}{_\arvl{(\prd+1)}} -\providecommand{\BegPrdLst}{}\renewcommand{\BegPrdLst}{_\arvl\prd-1} - -\providecommand{\MidStp}{}\renewcommand{\MidStp}{\dcsn} -\providecommand{\MidPrd}{}\renewcommand{\MidPrd}{\prd} -\providecommand{\MidStpLst}{}\renewcommand{\MidStpLst}{{\prd-1}} -\providecommand{\MidStpLsT}{}\renewcommand{\MidStpLsT}{{\prdT-1}} -\providecommand{\MidStpNxt}{}\renewcommand{\MidStpNxt}{{\prd+1}} -\providecommand{\MidStpNxT}{}\renewcommand{\MidStpNxT}{{\prdT+1}} -\providecommand{\EndMark}{}\renewcommand{\EndMark}{\cntn} -\providecommand{\EndPrd}{}\renewcommand{\EndPrd}{\prd_\cntn} -\providecommand{\EndStg}{}\renewcommand{\EndStg}{_\cntn} -\providecommand{\EndPrdLst}{}\renewcommand{\EndPrdLst}{{(\prd-1)}_\cntn} -\providecommand{\EndPrdLsT}{}\renewcommand{\EndPrdLsT}{{(\prd-1)}_\cntn} -\providecommand{\MidPrdLst}{}\renewcommand{\MidPrdLst}{{(\prd-1)}} % \dcsn -\providecommand{\MidPrdLsT}{}\renewcommand{\MidPrdLsT}{{(\prd-1)}} % \dcsn -\providecommand{\BegPrdLst}{}\renewcommand{\BegPrdLst}{{_\arvl(\prd-1)}} % \dcsn -\providecommand{\BegPrdLsT}{}\renewcommand{\BegPrdLsT}{{_\arvl(\prd-1)}} % \dcsn -\providecommand{\MidLsT}{}\renewcommand{\EndPrdLsT}{{(\prd-1})_\cntn} -\providecommand{\EndPrdNxt}{}\renewcommand{\EndPrdNxt}{(\prd+1)_\cntn} -\providecommand{\EndPrdNxT}{}\renewcommand{\EndPrdNxT}{(\prdT+1)_\cntn} -\providecommand{\EndStp}{}\renewcommand{\EndStp}{_\cntn} -\providecommand{\EndStpLst}{}\renewcommand{\EndStpLst}{_\cntn} -\providecommand{\EndStpLsT}{}\renewcommand{\EndStpLsT}{_\cntn} -\providecommand{\EndStpNxt}{}\renewcommand{\EndStpNxt}{_\cntn} -\newcommand{\vBegStp}{\vFunc_{\BegStp}} -\newcommand{\vBegStpNxt}{\vFunc_{\BegStpNxt}} -\newcommand{\vBegPrd}{\vFunc_{\BegPrd}} -\newcommand{\vBegPrdNxt}{\vFunc_{\BegPrdNxt}} -\newcommand{\vBegStpLst}{\vFunc_{\BegStpLst}} -\newcommand{\vMidStp}{\vFunc_{\MidStp}} -\newcommand{\vMidStpNxt}{\vFunc_{\MidStpNxt}} -\newcommand{\vEnd}{\vFunc_{\EndStp}} -\newcommand{\vEndStp}{\vFunc_{\EndStp}} -\newcommand{\vEndStg}{\vFunc_{\EndStg}} -\newcommand{\vEndPrd}{\vFunc_{\EndPrd}} -\newcommand{\vEndStpLst}{\vFunc_{\EndStpLst}} -\newcommand{\vEndStpLsT}{\vFunc_{\EndStpLsT}} -\newcommand{\vEndPrdLst}{\vFunc_{\EndPrdLst}} -\newcommand{\vEndPrdLsT}{\vFunc_{\EndPrdLsT}} -\newcommand{\vMidPrdLst}{\vFunc_{\MidPrdLst}} -\newcommand{\vMidPrdLsT}{\vFunc_{\MidPrdLsT}} -\newcommand{\cEndStp}{\cFunc_{\EndStp}} -\newcommand{\vPEndStp}{\vFunc^{a}_{\EndStp}} -\newcommand{\vPMidStp}{\vFunc^{m}_{\MidStp}} -\newcommand{\vPBegStp}{\vFunc^{k}_{\BegStp}} -\newcommand{\vPEndStpNxt}{\vFunc^{a}_{\EndStpNxt}} -\newcommand{\vPMidStpNxt}{\vFunc^{m}_{\MidStpNxt}} -\newcommand{\vPBegStpNxt}{\vFunc^{k}_{\BegStpNxt}} -\renewcommand{\vPBegStpNxt}{\vFunc^{k}_{\BegStpNxt}} - -\providecommand{\step}{}\renewcommand{\step}{s} - -\providecommand{\PDVCoverc}{\mathbb{C}} - -% \newcommand{\Mma}{\textit{Mathematica}} -\providecommand{\step}{}\renewcommand{\step}{t} -\providecommand{\stp}{}\renewcommand{\stp}{t} -\providecommand{\Stp}{}\renewcommand{\Stp}{T} -% \providecommand{step}{}\renewcommand{step}{step} -\providecommand{step}{}\renewcommand{step}{toe} -\providecommand{Step}{}\renewcommand{Step}{Toe} -% \providecommand{steps}{}\renewcommand{steps}{steps} -\providecommand{steps}{}\renewcommand{steps}{toes} -\providecommand{Steps}{}\renewcommand{Steps}{Toes} -\providecommand{period}{}\renewcommand{period}{tic} -\providecommand{Period}{}\renewcommand{Period}{Tic} -\providecommand{periods}{}\renewcommand{periods}{tics} -\providecommand{Periods}{}\renewcommand{Periods}{Tics} - -\providecommand{\trnsn}{}\renewcommand{\trnsn}{transition} -\providecommand{\Trnsn}{}\renewcommand{\Trnsn}{Transition} -\providecommand{transitions}{}\renewcommand{transitions}{transitions} -\providecommand{Transitions}{}\renewcommand{Transitions}{Transitions} - -\providecommand{\evltn}{}\renewcommand{\evltn}{evolution} -\providecommand{\Evltn}{}\renewcommand{\Evltn}{Evolution} -\providecommand{evolutions}{}\renewcommand{evolutions}{evolutions} -\providecommand{Evolutions}{}\renewcommand{Evolutions}{Evolutions} - - -\providecommand{stage}{}\renewcommand{stage}{tac} -\providecommand{Stage}{}\renewcommand{Stage}{Tac} -\providecommand{stages}{}\renewcommand{stages}{tacs} -\providecommand{Stages}{}\renewcommand{Stages}{Tacs} - -\newcommand{\ExEndStp}{\Ex_{\EndStp}} -\newcommand{\ExBegStp}{\Ex_{\BegStp}} -\newcommand{\ExMidStp}{\Ex_{\MidStp}} -\newcommand{\ExMidPrd}{\Ex_{\MidPrd}} - -\newcommand{\ExEndStpLst}{\Ex_{\EndStpLst}} -\newcommand{\ExEndStpLsT}{\Ex_{\EndStpLsT}} -\newcommand{\ExBegStpLst}{\Ex_{\BegStpLst}} -\newcommand{\ExBegStpLsT}{\Ex_{\BegStpLsT}} -\newcommand{\ExMidStpLst}{\Ex_{\MidStpLst}} -\newcommand{\ExMidStpLsT}{\Ex_{\MidStpLsT}} - -\providecommand{\stigma}{}\renewcommand{\stigma}{\varsigma} -\providecommand{\Shr}{}\renewcommand{\Shr}{\stigma} -\providecommand{\Aprx}{}\renewcommand{\Aprx}{\Alt} - -%\newcommand{\lqd}{\ell} -\newcommand{\wlthAftr}{\acute{w}} -\newcommand{\wlthBefr}{w} - -\newcommand{\optml}[1]{\breve{#1}} - -\providecommand{\prd}{}\renewcommand{\prd}{t} -\providecommand{\prdm}{}\renewcommand{\prdm}{({\prd-1})} - - -\newcommand{\code}[1]{\ensuremath{\mathtt{#1}}} % code should be in tt font -%\newcommand{\code}{\mathtt} - -% These allow turning on or off the growth factor for permanent income -% (preserves symbolic derivations in LaTeX but makes them disappear) -% value function growth normed by G^{1-\CRRA} -\newcommand{\PermGroFacAdjV}{\PermGroFac_{\prd}^{1-\CRRA}} -\newcommand{\PermGroFacAdjVNxt}{\PermGroFac_{\prd+1}^{1-\CRRA}} -\renewcommand{\PermGroFacAdjV}{\null} -\newcommand{\AdjV}{_{\prd}^{1-\CRRA}} -\newcommand{\AdjVNxt}{_{\prd+1}^{1-\CRRA}} -%\providecommand{\PermGroFacAdjV}{}\renewcommand{\PermGroFacAdjV}{.} % turn off -% marginal utility growth normed by G^{-\CRRA} -\newcommand{\PermGroFacAdjMu}{\PermGroFac_{\prd}^{-\CRRA}} -\newcommand{\PermGroFacAdjMuNxt}{\PermGroFac_{\prd+1}^{-\CRRA}} -\newcommand{\AdjMu}{_{\prd}^{-\CRRA}} -\newcommand{\AdjMuNxt}{_{\prd+1}^{-\CRRA}} -\providecommand{\PermGroFacAdjMu}{}\renewcommand{\PermGroFacAdjMu}{\null} % turn off - -% To compile the Habits subfile, go to the Subfiles directory - -\newcommand{\trmT}{T} - -\providecommand{\VInv}{}\providecommand{\VInv}{}\renewcommand{\VInv}{{\Lambda}} -\providecommand{\vInv}{}\providecommand{\vInv}{}\renewcommand{\vInv}{{\scriptstyle \VInv \displaystyle}} - -\providecommand{\NatBoroCnstra}{}\providecommand{\NatBoroCnstra}{}\renewcommand{\NatBoroCnstra}{\underline{a}} - -\providecommand{\DiscAlt}{}\renewcommand{\DiscAlt}{\beta} % Erase the distinction between the alternative and the standard discount factor - -\providecommand{\aVecCode}{}\renewcommand{\aVecCode}{\code{aVec}} -\providecommand{\mVecCode}{}\renewcommand{\mVecCode}{\texttt{mVec}} -\providecommand{\kVecCode}{}\renewcommand{\kVecCode}{\texttt{kVec}} -\providecommand{\cVecCode}{}\renewcommand{\cVecCode}{\texttt{cVec}} - -\newcommand{\vctr}{\pmb} -\newcommand{\mVec}{\texttt{mVec}} -\newcommand{\aVec}{\texttt{aVec}} -\newcommand{\vVec}{\texttt{vVec}} - -\newcommand{\arvlstepShr}{_\arvl\step[\Shr]} -\newcommand{\stepShr}{\step[\Shr]} - -\newcommand{\vBegStpShr}{\vFunc_{_arvl[\Shr]}} -\newcommand{\vMidStpShr}{\vFunc_{\stp[\Shr]}} -\newcommand{\vEndStpShr}{\vFunc_{\stp[\Shr]_\cntn}} -\newcommand{\dvdShrBeg}{\vFunc^{\Shr}_{_\arvl\prd[\Shr]}} -\newcommand{\vShrMid}{\vFunc^{\Shr}_{\prd[\Shr]}} -\newcommand{\vShrEnd}{\vFunc^{\Shr}_{\prd[\Shr]_\cntn}} -\newcommand{\vVecShr}{\vctr{v}_{_\arvl\prd[\Shr]}} - -%\newcommand{\prd}{t} -\newcommand{\prdt}{t} -\newcommand{\prdT}{t} - -\providecommand{\hEndMin}{\underline{\hNrm}}\renewcommand{\hEndMin}{\underline{\hNrm}} -\newcommand{\TranShkEmpDummy}{\vartheta} - -\newcommand{\aVecMin}{{a}_{1}} -\newcommand{\cMin}{{c}_{1}} -\newcommand{\vctratm}{\vctr{a}_{\prd-1}}\renewcommand{\vctratm}{\vctr{a}} -\providecommand{\TranShkEmpMin}{}\renewcommand{\TranShkEmpMin}{\underline{\TranShkEmp}} - -\newcommand{\cLst}{{c}_{\prdLst}} -\newcommand{\cLsT}{{c}_{\prdLsT}} -\newcommand{\mLst}{{m}_{\prdLst}} -\newcommand{\mLsT}{{m}_{\prdLsT}} - -% Alternative definitions of these would restore the time subscript for c -% in the max problem statements -\newcommand{\cLvlMaxPrdT}{\cLvl} -\newcommand{\cLvlMaxPrdt}{\cLvl} -\newcommand{\cNrmMaxPrdT}{\cNrm} -\newcommand{\cNrmMaxPrdt}{\cNrm} -% \newcommand{\cLvlMaxPrd}{\cLvl_{\prd}} - - -\renewcommand{\std}{\sigma} -\newcommand{\vrnc}{\std^{2}} - -\newcommand{\cStg}{\cFunc} -\newcommand{\cStgSub}{[\cStg]} -\newcommand{\cStgSup}{\cStg} - -\newcommand{\cStgBeg}{\arvlc} -\newcommand{\cStgMid}{[c]} -\newcommand{\cStgEnd}{{c}\cntn} - -\newcommand{\vFuncBeg}{\nu} -\newcommand{\vFuncMid}{\vFunc} -\newcommand{\vFuncEnd}{\mathfrak{v}} - -\renewcommand{\vBegStpNxt}{\vFunc^{\cStgBeg}_{\prd+1}} -\newcommand{\cCtrl}{\cFunc} -\newcommand{\mStte}{\mNrm} % m as a state variable - -\newcommand{\aStte}{\aNrm} -\newcommand{\cStte}{\cNrm} diff --git a/Resources/LaTeXInputs/local-packages.tex b/Resources/LaTeXInputs/local-packages.tex deleted file mode 100644 index 8c48e5f87..000000000 --- a/Resources/LaTeXInputs/local-packages.tex +++ /dev/null @@ -1,106 +0,0 @@ -% Define a few objects that are unique to this paper -% Allow different actions depending on whether document is being processed as -% subfile or being process as standalone - -% \usepackage{import} % used in slides and cctwMoM -\usepackage{\LaTeXInputs/pdfsuppressruntime} % prevent timestamp - -% Get fonts so pdflatex can handle unicode characters -\usepackage[utf8]{inputenc} -\usepackage[T1]{fontenc} - -\usepackage[authoryear]{natbib} -\usepackage{listings} -\usepackage{mathtools} -\usepackage{accents,xr-hyper} -%\usepackage{\econark} -\usepackage{dcolumn} % seems to need to come after hyperref -\usepackage{moreverb} % Used in slides -%\usepackage{\econtexSetup} % Gets, configures often-used packages - -\provideboolean{Web} -\setboolean{Web}{false} % reset to true if running via dvi; search for \ifdvi below - -\makeatletter -\newif\ifdvi\dvitrue -\@ifundefined{pdfoutput}{}{\ifnum\pdfoutput>0 \dvifalse\fi} -\makeatother - - - -\ifdvi - \setboolean{Web}{true} - \RequirePackage{tex4ht} - \RequirePackage{graphicx} - \RequirePackage[tex4ht]{hyperref} - \provideboolean{bigPrint}\setboolean{bigPrint}{true} % HTM output looks better in a larger font size - \DeclareGraphicsExtensions{.png} - \providecommand{\textSizeDefault}{\large} - \providecommand{\titlepagefinish}{\newpage\textSizeDefault} - \providecommand{\abstractSizeDefault}{\large} - \let\footnoterule\relax - \makeatletter - \renewenvironment{abstract}{% - \begin{spacing}{0.9} - \noindent {\tiny \phantom{.}} \\ % Trick to get proper spacing in html - \noindent \hrule height 0.4pt depth 0.0pt width \textwidth \relax - \vspace*{5mm} - \noindent \textbf{Abstract}\\ - \indent \abstractSizeDefault - }{% - \noindent {\tiny \phantom{.}} \\ % Trick to get proper spacing in html -% \noindent \hrule height 0.4pt depth 0.0pt width \textwidth \relax - \vspace*{3mm} - \ifthenelse{ \isundefined\@keywords }{ - \ClassWarningNoLine{bejournal}{No keywords specified. - Please use the command \protect\keywords} - }{ - } - \end{spacing} - \begin{quote} - \begin{Description} - \item[\textbf{~~~~~~~~~~~~Keywords~}] \@keywords - \ifthenelse{ \isundefined\@jelclass }{ - \ClassWarningNoLine{bejournal}{No JEL classification specified. - Please use the command \protect\jelclass} - }{ - \item[\textbf{~~~~~~~~~~~~JEL codes~}] \@jelclass - \end{Description} - } - \end{quote} - \makeatother - } -\else - \RequirePackage{graphicx} % requiring [pdftex] seems to interfere with Pandemic build - \RequirePackage{hyperref} % plainpages seems to mess up BST - \DeclareGraphicsExtensions{.pdf} -\fi - - -% \usepackage{econtexSetup} sets boolean Web=true if compilation type is dvi -% also includes hyperref -\provideboolean{showPageHead}{} -\ifthenelse{\boolean{Web}}{ - \setboolean{showPageHead}{false} -}{ % {pdf} - \setboolean{showPageHead}{true} - \usepackage{scrlayer-scrpage} % Package for page headers if PDF - \usepackage{caption} % allow suppression of appendix figures in NoAppendix PDF -} - -%\usepackage{\econtexShortcuts} -\usepackage{subfiles} - -\newcommand{\urlPDF}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs.pdf}{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs.pdf}}} -\newcommand{\urlSlides}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs-Slides.pdf}{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs-Slides.pdf}}} -\newcommand{\urlHTML}{\texttt{\href{https://\owner.github.io/SolvingMicroDSOPs}{https://\owner.github.io/SolvingMicroDSOPs}}} -\newcommand{\urlCode}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs/tree/master/Code}{https://github.com/\owner/SolvingMicroDSOPs/tree/master/Code}}} -\newcommand{\urlRepo}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs}{https://github.com/\owner/SolvingMicroDSOPs}}} - - -\newcommand{\SMDSOPrepo}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs}{SolvingMicroDSOPs}}} -\newcommand{\EMDSOPrepo}{\texttt{\href{https://github.com/\owner/EstimatingMicroDSOPs}{EstimatingMicroDSOPs repo}}} -\newcommand{\HARKrepo}{\texttt{\href{https://github.com/econ-ark/HARK}{HARK}}} -\newcommand{\HARKdocs}{\texttt{\href{https://docs.econ-ark.org}{documentation}}} - -% \newcommand{\ARKurl}{\texttt{\href{https://econ-ark.org}{Econ-ARK}}} diff --git a/Resources/LaTeXInputs/unicode-subs-declare.sty b/Resources/LaTeXInputs/unicode-subs-declare.sty deleted file mode 100644 index d6d41541f..000000000 --- a/Resources/LaTeXInputs/unicode-subs-declare.sty +++ /dev/null @@ -1,17 +0,0 @@ -% If you encounter the unicode character, substitute the LaTeX on compile -\DeclareUnicodeCharacter{1D53C}{\mathbb{E}} % 𝔼 -\DeclareUnicodeCharacter{1D41A}{\mathbf{a}} % 𝐚 -\DeclareUnicodeCharacter{1D41B}{\mathbf{b}} % 𝐛 -\DeclareUnicodeCharacter{1D41C}{\mathbf{c}} % 𝐜 -\DeclareUnicodeCharacter{1D429}{\mathbf{p}} % 𝐩 -\DeclareUnicodeCharacter{1D424}{\mathbf{k}} % 𝐀 -\DeclareUnicodeCharacter{1D426}{\mathbf{m}} % 𝐦 -\DeclareUnicodeCharacter{1D42F}{\mathbf{v}} % 𝐯 -\DeclareUnicodeCharacter{1D432}{\mathbf{y}} % 𝐲 -\DeclareUnicodeCharacter{1D4A2}{\mathcal{G}} % 𝒒 -\DeclareUnicodeCharacter{211B}{\mathcal{R}} % β„› -\DeclareUnicodeCharacter{1D69E}{\mathrm{u}} % 𝚞 -\DeclareUnicodeCharacter{1D69F}{\mathrm{v}} % 𝚟 -\DeclareUnicodeCharacter{03B2}{\beta} % Ξ² -\DeclareUnicodeCharacter{03C3}{\sigma} % Οƒ -\DeclareUnicodeCharacter{03C1}{\rho} % ρ diff --git a/SolvingMicroDSOPs-clean-clean.dep b/SolvingMicroDSOPs-clean-clean.dep deleted file mode 100644 index c2f7ec491..000000000 --- a/SolvingMicroDSOPs-clean-clean.dep +++ /dev/null @@ -1,169 +0,0 @@ -\RequireVersions{ - *{application}{TeX} {1990/03/25 v3.x} - *{format} {LaTeX2e} {2022-11-01 v2.e} - *{class} {Resources/texmf-local/tex/latex/econtex}{2017/08/01 v0.0} - *{package}{snapshot} {2002/03/05 v1.14} - *{package}{ifthen} {2022/04/13 v1.1d} - *{package}{changepage} {2009/10/20 v1.0c} - *{package}{currfile} {2022/10/10 v0.8} - *{package}{kvoptions} {2022-06-15 v3.15} - *{package}{keyval} {2022/05/29 v1.15} - *{package}{ltxcmds} {2020-05-10 v1.25} - *{package}{kvsetkeys} {2022-10-05 v1.19} - *{package}{filehook} {2022/10/25 v0.8b} - *{package}{filehook-2020}{2022/10/25 v0.8b} - *{package}{setspace} {2022/12/04 v6.7b} - *{class} {scrartcl} {2022/10/12 v3.38} - *{package}{scrkbase} {2022/10/12 v3.38} - *{package}{scrbase} {2022/10/12 v3.38} - *{package}{scrlfile} {2022/10/12 v3.38} - *{package}{scrlfile-hook}{2022/10/12 v3.38} - *{package}{scrlogo} {2022/10/12 v3.38} - *{package}{tocbasic} {2022/10/12 v3.38} - *{file} {scrsize12pt.clo}{2022/10/12 v3.38} - *{package}{typearea} {2022/10/12 v3.38} - *{package}{fontenc} {0000/00/00 v0.0} - *{package}{babel} {2023/02/13 v3.86} - *{file} {english.ldf} {2017/06/06 v3.3r} - *{file} {babel-english.tex}{0000/00/00 v0.0} - *{package}{calc} {2017/05/25 v4.3} - *{package}{cancel} {2013/04/12 v2.2} - *{package}{verbatim} {2022-07-02 v1.5u} - *{package}{amsmath} {2022/04/08 v2.17n} - *{package}{amstext} {2021/08/26 v2.01} - *{package}{amsgen} {1999/11/30 v2.0} - *{package}{amsbsy} {1999/11/29 v1.2d} - *{package}{amsopn} {2022/04/08 v2.04} - *{package}{amssymb} {2013/01/14 v3.01} - *{package}{amsfonts} {2013/01/14 v3.01} - *{package}{amsthm} {2020/05/29 v2.20.6} - *{package}{xpatch} {2020/03/25 v0.3a} - *{package}{expl3} {2023-02-22 v3} - *{file} {l3backend-pdftex.def}{2023-01-16 v3} - *{package}{xparse} {2023-02-02 v3} - *{package}{etoolbox} {2020/10/05 v2.5k} - *{package}{threeparttable}{2003/06/13 v3.0} - *{package}{dcolumn} {2014/10/28 v1.06} - *{package}{array} {2022/09/04 v2.5g} - *{package}{multicol} {2021/11/30 v1.9d} - *{package}{multirow} {2021/03/15 v2.8} - *{package}{booktabs} {2020/01/12 v1.61803398} - *{package}{latexsym} {1998/08/17 v2.2e} - *{package}{afterpage} {2014/10/28 v1.08} - *{package}{enotez} {2022/01/04 v0.10d} - *{package}{l3keys2e} {2023-02-02 v2e} - *{package}{xtemplate} {2023-02-02 v3} - *{package}{translations}{2022/02/05 v1.12} - *{package}{pdftexcmds} {2020-06-27 v0.33} - *{package}{infwarerr} {2019/12/03 v1.5} - *{package}{iftex} {2022/02/03 v1.0f} - *{package}{moreverb} {2008/06/03 v2.3a} - *{package}{hhline} {2020/01/04 v2.04} - *{package}{xcolor} {2022/06/12 v2.14} - *{file} {color.cfg} {2016/01/02 v1.6} - *{file} {pdftex.def} {2022/09/22 v1.2b} - *{file} {mathcolor.ltx}{0000/00/00 v0.0} - *{package}{accents} {2006/05/12 v1.4} - *{package}{appendix} {2020/02/08 v1.2c} - *{package}{eucal} {2009/06/22 v3.00} - *{package}{ulem} {2019/11/18 v0.0} - *{package}{bm} {2022/01/05 v1.2f} - *{package}{bbm} {1999/03/15 v1.2} - *{package}{url} {2013/09/16 v3.4} - *{package}{optional} {2005/01/26 v2.2b;} - *{package}{natbib} {2010/09/13 v8.31b} - *{package}{footmisc} {2022/03/08 v6.0d} - *{package}{manyfoot} {2019/08/03 v1.11} - *{package}{nccfoots} {2005/02/03 v1.2} - *{package}{perpage} {2014/10/25 v2.0} - *{package}{./.econtexRoot}{0000/00/00 v0.0} - *{file} {./Resources/econtexPaths.tex}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/local-macros}{0000/00/00 v0.0} - *{package}{./Resources/texmf-local/tex/latex/econark}{0000/00/00 v0.0} - *{package}{subfiles} {2020/11/14 v2.2} - *{package}{import} {2020/04/01 v6.2} - *{package}{xmpincl} {2021/09/22 v2.4} - *{package}{ifpdf} {2019/10/25 v3.4} - *{package}{./Resources/texmf-local/tex/latex/llorracc-handouts}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/owner}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/local-packages}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/pdfsuppressruntime}{0000/00/00 v0.0} - *{package}{inputenc} {2021/02/14 v1.3d} - *{package}{fontenc} {0000/00/00 v0.0} - *{package}{listings} {2023/02/27 v1.9} - *{package}{lstmisc} {2023/02/27 v1.9} - *{file} {listings.cfg}{2023/02/27 v1.9} - *{package}{mathtools} {2022/06/29 v1.29} - *{package}{mhsetup} {2021/03/18 v1.4} - *{package}{xr-hyper} {2023-02-07 v7.00v} - *{package}{graphicx} {2021/09/16 v1.2d} - *{package}{graphics} {2022/03/10 v1.4e} - *{package}{trig} {2021/08/11 v1.11} - *{file} {graphics.cfg}{2016/06/04 v1.11} - *{package}{hyperref} {2023-02-07 v7.00v} - *{package}{kvdefinekeys}{2019-12-19 v1.6} - *{package}{pdfescape} {2019/12/09 v1.15} - *{package}{hycolor} {2020-01-27 v1.10} - *{package}{letltxmacro} {2019/12/03 v1.6} - *{package}{auxhook} {2019-12-17 v1.6} - *{package}{nameref} {2022-05-17 v2.50} - *{package}{refcount} {2019/12/15 v3.6} - *{package}{gettitlestring}{2019/12/15 v1.6} - *{file} {pd1enc.def} {2023-02-07 v7.00v} - *{package}{intcalc} {2019/12/15 v1.3} - *{package}{etexcmds} {2019/12/15 v1.7} - *{file} {puenc.def} {2023-02-07 v7.00v} - *{package}{bitset} {2019/12/09 v1.3} - *{package}{bigintcalc} {2019/12/15 v1.5} - *{package}{atbegshi-ltx}{2021/01/10 v1.0c} - *{file} {hpdftex.def} {2023-02-07 v7.00v} - *{package}{atveryend-ltx}{2020/08/19 v1.0a} - *{package}{rerunfilecheck}{2022-07-10 v1.10} - *{package}{uniquecounter}{2019/12/15 v1.4} - *{package}{scrlayer-scrpage}{2022/10/12 v3.38} - *{package}{scrlayer} {2022/10/12 v3.38} - *{package}{caption} {2023/03/12 v3.6j} - *{package}{caption3} {2023/03/12 v2.4} - *{file} {caption-koma.sto}{2022/12/27 v2.0c} - *{package}{showlabels} {2022/07/18 v1.9.2} - *{package}{./Resources/LaTeXInputs/cctwMoM}{0000/00/00 v0.0} - *{file} {translations-basic-dictionary-english.trsl}{(english v0.0} - *{file} {supp-pdf.mkii}{0000/00/00 v0.0} - *{package}{epstopdf-base}{2020-01-24 v2.11} - *{file} {epstopdf-sys.cfg}{2010/07/13 v1.3} - *{file} {SolvingMicroDSOPs-clean-clean.out}{0000/00/00 v0.0} - *{file} {SolvingMicroDSOPs-clean-clean.out}{0000/00/00 v0.0} - *{package}{bookmark} {2020-11-06 v1.29} - *{file} {bkm-pdftex.def}{2020-11-06 v1.29} - *{file} {umsa.fd} {2013/01/14 v3.01} - *{file} {umsb.fd} {2013/01/14 v3.01} - *{file} {ulasy.fd} {1998/08/17 v2.2e} - *{file} {t1cmtt.fd} {2022/07/10 v2.5l} - *{file} {t1cmss.fd} {2022/07/10 v2.5l} - *{package}{lstlang1} {2023/02/27 v1.9} - *{file} {./Code/Python/snippets/rawsolution.py}{0000/00/00 v0.0} - *{file} {./Figures/discreteApprox.pdf}{0000/00/00 v0.0} - *{file} {./Code/Python/snippets/equiprobable-make.py}{0000/00/00 v0.0} - *{file} {./Code/Python/snippets/equiprobable-max-using.py}{0000/00/00 v0.0} - *{file} {./Figures/PlotcTm1Simple.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotVTm1Simple.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotOTm1RawVSInt.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotComparecTm1AB.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotuPrimeVSOPrime.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotOPRawVSFOC.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotcTm1ABC.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVInvVSGothC.pdf}{0000/00/00 v0.0} - *{file} {ueuf.fd} {2013/01/14 v3.01} - *{file} {./Figures/GothVVSGothCInv.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotComparecTm1AD.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVInvVSGothCEEE.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVVSGothCInvEEE.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotCFuncsConverge.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotctMultContr.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotRiskySharetOfat.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotTimeVaryingParam.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotMeanMedianSCFcollegeGrads.pdf}{0000/00/00 v0.0} - *{file} {./Figures/Sensitivity.pdf}{0000/00/00 v0.0} - *{file} {./Figures/SMMcontour.pdf}{0000/00/00 v0.0} - *{file} {SolvingMicroDSOPs-clean-clean.bbl}{0000/00/00 v0.0} -} diff --git a/SolvingMicroDSOPs-clean-clean.pdf b/SolvingMicroDSOPs-clean-clean.pdf deleted file mode 100644 index b671a1c4c..000000000 Binary files a/SolvingMicroDSOPs-clean-clean.pdf and /dev/null differ diff --git a/SolvingMicroDSOPs-clean-clean.tex b/SolvingMicroDSOPs-clean-clean.tex index c18dc0ffd..7e51ad361 100644 --- a/SolvingMicroDSOPs-clean-clean.tex +++ b/SolvingMicroDSOPs-clean-clean.tex @@ -334,9 +334,7 @@ \subsection{The Decision Problem in the New Notation}\label{subsec:decision-prob Having defined these notational conventions, we are now ready to move to substance. -\begin{comment} % - % \subsection{Implementation in Python} The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. @@ -470,7 +468,6 @@ \subsection{The Approximate Consumption and Value Functions} \hypertarget{an-interpolated-consumption-function}{} \subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $c$ that corresponds to a linear `connect-the-dots' interpolation of the value of $c$ from the values of the two nearest computed $\{m,c\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\mathrm{c}}_{\MidPrdLsT}(m)$. %When called with an $\mNrm$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mNrm$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mNrm$. @@ -564,7 +561,6 @@ \subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} \label{fig:PlotuPrimeVSOPrime} \end{figure} -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package.% The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} @@ -653,7 +649,6 @@ \subsection{Transformation}\label{subsec:transformation} In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $t$ or the current period $t-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $m$ and `human wealth' (the PDV of future income) of $h_{t-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). -The well-known optimal solution is to spend half of total lifetime resources in period $t-1$ and the remainder in period $t (=T)$. Since total resources are known with certainty to be $m+h_{t-1}= m+1$, and since $v_{\MidStg}^{m}(m) = u^{c}(c)$, this implies that \begin{equation} v^{m}_{\MidPrdLsT}(m) = \left(\frac{m+1}{2}\right)^{-\rho} \label{eq:vPLin}. \end{equation} diff --git a/SolvingMicroDSOPs-clean.dep b/SolvingMicroDSOPs-clean.dep deleted file mode 100644 index 9b6a058c0..000000000 --- a/SolvingMicroDSOPs-clean.dep +++ /dev/null @@ -1,168 +0,0 @@ -\RequireVersions{ - *{application}{TeX} {1990/03/25 v3.x} - *{format} {LaTeX2e} {2022-11-01 v2.e} - *{class} {Resources/texmf-local/tex/latex/econtex}{2017/08/01 v0.0} - *{package}{snapshot} {2002/03/05 v1.14} - *{package}{ifthen} {2022/04/13 v1.1d} - *{package}{changepage} {2009/10/20 v1.0c} - *{package}{currfile} {2022/10/10 v0.8} - *{package}{kvoptions} {2022-06-15 v3.15} - *{package}{keyval} {2022/05/29 v1.15} - *{package}{ltxcmds} {2020-05-10 v1.25} - *{package}{kvsetkeys} {2022-10-05 v1.19} - *{package}{filehook} {2022/10/25 v0.8b} - *{package}{filehook-2020}{2022/10/25 v0.8b} - *{package}{setspace} {2022/12/04 v6.7b} - *{class} {scrartcl} {2022/10/12 v3.38} - *{package}{scrkbase} {2022/10/12 v3.38} - *{package}{scrbase} {2022/10/12 v3.38} - *{package}{scrlfile} {2022/10/12 v3.38} - *{package}{scrlfile-hook}{2022/10/12 v3.38} - *{package}{scrlogo} {2022/10/12 v3.38} - *{package}{tocbasic} {2022/10/12 v3.38} - *{file} {scrsize12pt.clo}{2022/10/12 v3.38} - *{package}{typearea} {2022/10/12 v3.38} - *{package}{fontenc} {0000/00/00 v0.0} - *{package}{babel} {2023/02/13 v3.86} - *{file} {english.ldf} {2017/06/06 v3.3r} - *{file} {babel-english.tex}{0000/00/00 v0.0} - *{package}{calc} {2017/05/25 v4.3} - *{package}{cancel} {2013/04/12 v2.2} - *{package}{verbatim} {2022-07-02 v1.5u} - *{package}{amsmath} {2022/04/08 v2.17n} - *{package}{amstext} {2021/08/26 v2.01} - *{package}{amsgen} {1999/11/30 v2.0} - *{package}{amsbsy} {1999/11/29 v1.2d} - *{package}{amsopn} {2022/04/08 v2.04} - *{package}{amssymb} {2013/01/14 v3.01} - *{package}{amsfonts} {2013/01/14 v3.01} - *{package}{amsthm} {2020/05/29 v2.20.6} - *{package}{xpatch} {2020/03/25 v0.3a} - *{package}{expl3} {2023-02-22 v3} - *{file} {l3backend-pdftex.def}{2023-01-16 v3} - *{package}{xparse} {2023-02-02 v3} - *{package}{etoolbox} {2020/10/05 v2.5k} - *{package}{threeparttable}{2003/06/13 v3.0} - *{package}{dcolumn} {2014/10/28 v1.06} - *{package}{array} {2022/09/04 v2.5g} - *{package}{multicol} {2021/11/30 v1.9d} - *{package}{multirow} {2021/03/15 v2.8} - *{package}{booktabs} {2020/01/12 v1.61803398} - *{package}{latexsym} {1998/08/17 v2.2e} - *{package}{afterpage} {2014/10/28 v1.08} - *{package}{enotez} {2022/01/04 v0.10d} - *{package}{l3keys2e} {2023-02-02 v2e} - *{package}{xtemplate} {2023-02-02 v3} - *{package}{translations}{2022/02/05 v1.12} - *{package}{pdftexcmds} {2020-06-27 v0.33} - *{package}{infwarerr} {2019/12/03 v1.5} - *{package}{iftex} {2022/02/03 v1.0f} - *{package}{moreverb} {2008/06/03 v2.3a} - *{package}{hhline} {2020/01/04 v2.04} - *{package}{xcolor} {2022/06/12 v2.14} - *{file} {color.cfg} {2016/01/02 v1.6} - *{file} {pdftex.def} {2022/09/22 v1.2b} - *{file} {mathcolor.ltx}{0000/00/00 v0.0} - *{package}{accents} {2006/05/12 v1.4} - *{package}{appendix} {2020/02/08 v1.2c} - *{package}{eucal} {2009/06/22 v3.00} - *{package}{ulem} {2019/11/18 v0.0} - *{package}{bm} {2022/01/05 v1.2f} - *{package}{bbm} {1999/03/15 v1.2} - *{package}{url} {2013/09/16 v3.4} - *{package}{optional} {2005/01/26 v2.2b;} - *{package}{natbib} {2010/09/13 v8.31b} - *{package}{footmisc} {2022/03/08 v6.0d} - *{package}{manyfoot} {2019/08/03 v1.11} - *{package}{nccfoots} {2005/02/03 v1.2} - *{package}{perpage} {2014/10/25 v2.0} - *{package}{./.econtexRoot}{0000/00/00 v0.0} - *{file} {./Resources/econtexPaths.tex}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/local-macros}{0000/00/00 v0.0} - *{package}{./Resources/texmf-local/tex/latex/econark}{0000/00/00 v0.0} - *{package}{subfiles} {2020/11/14 v2.2} - *{package}{import} {2020/04/01 v6.2} - *{package}{xmpincl} {2021/09/22 v2.4} - *{package}{ifpdf} {2019/10/25 v3.4} - *{package}{./Resources/texmf-local/tex/latex/llorracc-handouts}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/owner}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/local-packages}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/pdfsuppressruntime}{0000/00/00 v0.0} - *{package}{inputenc} {2021/02/14 v1.3d} - *{package}{fontenc} {0000/00/00 v0.0} - *{package}{listings} {2023/02/27 v1.9} - *{package}{lstmisc} {2023/02/27 v1.9} - *{file} {listings.cfg}{2023/02/27 v1.9} - *{package}{mathtools} {2022/06/29 v1.29} - *{package}{mhsetup} {2021/03/18 v1.4} - *{package}{xr-hyper} {2023-02-07 v7.00v} - *{package}{graphicx} {2021/09/16 v1.2d} - *{package}{graphics} {2022/03/10 v1.4e} - *{package}{trig} {2021/08/11 v1.11} - *{file} {graphics.cfg}{2016/06/04 v1.11} - *{package}{hyperref} {2023-02-07 v7.00v} - *{package}{kvdefinekeys}{2019-12-19 v1.6} - *{package}{pdfescape} {2019/12/09 v1.15} - *{package}{hycolor} {2020-01-27 v1.10} - *{package}{letltxmacro} {2019/12/03 v1.6} - *{package}{auxhook} {2019-12-17 v1.6} - *{package}{nameref} {2022-05-17 v2.50} - *{package}{refcount} {2019/12/15 v3.6} - *{package}{gettitlestring}{2019/12/15 v1.6} - *{file} {pd1enc.def} {2023-02-07 v7.00v} - *{package}{intcalc} {2019/12/15 v1.3} - *{package}{etexcmds} {2019/12/15 v1.7} - *{file} {puenc.def} {2023-02-07 v7.00v} - *{package}{bitset} {2019/12/09 v1.3} - *{package}{bigintcalc} {2019/12/15 v1.5} - *{package}{atbegshi-ltx}{2021/01/10 v1.0c} - *{file} {hpdftex.def} {2023-02-07 v7.00v} - *{package}{atveryend-ltx}{2020/08/19 v1.0a} - *{package}{rerunfilecheck}{2022-07-10 v1.10} - *{package}{uniquecounter}{2019/12/15 v1.4} - *{package}{scrlayer-scrpage}{2022/10/12 v3.38} - *{package}{scrlayer} {2022/10/12 v3.38} - *{package}{caption} {2023/03/12 v3.6j} - *{package}{caption3} {2023/03/12 v2.4} - *{file} {caption-koma.sto}{2022/12/27 v2.0c} - *{package}{showlabels} {2022/07/18 v1.9.2} - *{package}{./Resources/LaTeXInputs/cctwMoM}{0000/00/00 v0.0} - *{file} {translations-basic-dictionary-english.trsl}{(english v0.0} - *{file} {supp-pdf.mkii}{0000/00/00 v0.0} - *{package}{epstopdf-base}{2020-01-24 v2.11} - *{file} {epstopdf-sys.cfg}{2010/07/13 v1.3} - *{file} {SolvingMicroDSOPs-clean.out}{0000/00/00 v0.0} - *{file} {SolvingMicroDSOPs-clean.out}{0000/00/00 v0.0} - *{package}{bookmark} {2020-11-06 v1.29} - *{file} {bkm-pdftex.def}{2020-11-06 v1.29} - *{file} {umsa.fd} {2013/01/14 v3.01} - *{file} {umsb.fd} {2013/01/14 v3.01} - *{file} {ulasy.fd} {1998/08/17 v2.2e} - *{file} {t1cmtt.fd} {2022/07/10 v2.5l} - *{file} {t1cmss.fd} {2022/07/10 v2.5l} - *{package}{lstlang1} {2023/02/27 v1.9} - *{file} {./Code/Python/snippets/rawsolution.py}{0000/00/00 v0.0} - *{file} {./Figures/discreteApprox.pdf}{0000/00/00 v0.0} - *{file} {./Code/Python/snippets/equiprobable-make.py}{0000/00/00 v0.0} - *{file} {./Code/Python/snippets/equiprobable-max-using.py}{0000/00/00 v0.0} - *{file} {./Figures/PlotcTm1Simple.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotVTm1Simple.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotOTm1RawVSInt.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotComparecTm1AB.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotuPrimeVSOPrime.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotOPRawVSFOC.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotcTm1ABC.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVInvVSGothC.pdf}{0000/00/00 v0.0} - *{file} {ueuf.fd} {2013/01/14 v3.01} - *{file} {./Figures/GothVVSGothCInv.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotComparecTm1AD.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVInvVSGothCEEE.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVVSGothCInvEEE.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotCFuncsConverge.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotctMultContr.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotRiskySharetOfat.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotTimeVaryingParam.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotMeanMedianSCFcollegeGrads.pdf}{0000/00/00 v0.0} - *{file} {./Figures/Sensitivity.pdf}{0000/00/00 v0.0} - *{file} {./Figures/SMMcontour.pdf}{0000/00/00 v0.0} -} diff --git a/SolvingMicroDSOPs-clean.pdf b/SolvingMicroDSOPs-clean.pdf deleted file mode 100644 index b5637356f..000000000 Binary files a/SolvingMicroDSOPs-clean.pdf and /dev/null differ diff --git a/SolvingMicroDSOPs-clean.tex b/SolvingMicroDSOPs-clean.tex index e57ab8478..9d9145e29 100644 --- a/SolvingMicroDSOPs-clean.tex +++ b/SolvingMicroDSOPs-clean.tex @@ -334,9 +334,7 @@ \subsection{The Decision Problem in the New Notation}\label{subsec:decision-prob Having defined these notational conventions, we are now ready to move to substance. -\begin{comment} % - % \subsection{Implementation in Python} The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. @@ -470,7 +468,6 @@ \subsection{The Approximate Consumption and Value Functions} \hypertarget{an-interpolated-consumption-function}{} \subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $c$ that corresponds to a linear `connect-the-dots' interpolation of the value of $c$ from the values of the two nearest computed $\{m,c\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\cFunc}_{\MidPrdLsT}(m)$. %When called with an $\mNrm$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mNrm$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mNrm$. @@ -564,7 +561,6 @@ \subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} \label{fig:PlotuPrimeVSOPrime} \end{figure} -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package.% The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} @@ -653,7 +649,6 @@ \subsection{Transformation}\label{subsec:transformation} In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $t$ or the current period $t-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $m$ and `human wealth' (the PDV of future income) of $h_{t-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). -The well-known optimal solution is to spend half of total lifetime resources in period $t-1$ and the remainder in period $t (=T)$. Since total resources are known with certainty to be $m+h_{t-1}= m+1$, and since $v_{\MidStg}^{m}(m) = u^{c}(c)$, this implies that \begin{equation} v^{m}_{\MidPrdLsT}(m) = \left(\frac{m+1}{2}\right)^{-\rho} \label{eq:vPLin}. \end{equation} diff --git a/SolvingMicroDSOPs.bib b/SolvingMicroDSOPs.bib index 0e08ae5c8..139597f9c 100644 --- a/SolvingMicroDSOPs.bib +++ b/SolvingMicroDSOPs.bib @@ -1,285 +1,2 @@ -,-------------------. -| PREAMBLE | -`-------------------' - -@preamble{ "\providecommand{\noopsort}[1]{} - \providecommand{\singleletter}[1]{#1} " -} - -,-------------------. -| BIBTEX ENTRIES | -`-------------------' - -@article{gpLifecycle, - author = {Gourinchas, Pierre-Olivier and Parker, Jonathan}, - journal = {Econometrica}, - number = {1}, - pages = {47--89}, - title = {Consumption Over the Life Cycle}, - volume = {70}, - year = {2002}, -} - -@article{cagettiWprofiles, - author = {Marco Cagetti}, - journal = {Journal of Business and Economic Statistics}, - number = {3}, - pages = {339--353}, - title = {Wealth Accumulation Over the Life Cycle and - Precautionary Savings}, - volume = {21}, - year = {2003}, -} - -@article{carrollEGM, - author = {Carroll, Christopher D.}, - journal = {Economics Letters}, - month = {September}, - note = - {\url{https://www.econ2.jhu.edu/people/ccarroll/EndogenousGridpoints.pdf}}, - number = {3}, - pages = {312--320}, - title = {The {M}ethod of {E}ndogenous {G}ridpoints for - {S}olving {D}ynamic {S}tochastic {O}ptimization - {P}roblems}, - volume = {91}, - year = {2006}, - doi = {10.1016/j.econlet.2005.09.013}, - url = {https://www.econ2.jhu.edu/people/ccarroll/ - EndogenousArchive.zip}, -} - -@article{SolvingMicroDSOPs, - author = {Carroll, Christopher D.}, - journal = {Econ-ARK REMARK}, - title = {Solving Microeconomic Dynamic Stochastic Optimization - Problems}, - year = {2023}, - url = {https://llorracc.github.io/SolvingMicroDSOPs}, -} - -@article{merton:restat, - author = {Merton, Robert C.}, - journal = {Review of Economics and Statistics}, - pages = {247--257}, - title = {Lifetime Portfolio Selection under Uncertainty: The - Continuous Time Case}, - volume = {51}, - year = {1969}, -} - -@article{samuelson:portfolio, - author = {Samuelson, Paul A.}, - journal = {Review of Economics and Statistics}, - pages = {239--46}, - title = {Lifetime Portfolio Selection by Dynamic Stochastic - Programming}, - volume = {51}, - year = {1969}, -} - -@book{deatonUnderstandingC, - address = {New York}, - author = {Deaton, Angus S.}, - publisher = {Oxford University Press}, - title = {{U}nderstanding {C}onsumption}, - year = {1992}, -} - -@article{BufferStockTheory, - author = {Christopher D. Carroll}, - journal = {Revise and Resubmit, Quantitative Economics}, - title = {Theoretical Foundations of Buffer Stock Saving}, - year = {2023}, - doi = {https://zenodo.org/badge/latestdoi/304124725}, - url = {https://econ-ark.github.io/BufferStockTheory}, -} - -@article{Tanaka2013-bc, - author = {Tanaka, Ken'ichiro and Toda, Alexis Akira}, - journal = {Economics letters}, - month = mar, - number = {3}, - pages = {445--450}, - publisher = {Elsevier}, - title = {{Discrete approximations of continuous distributions - by maximum entropy}}, - volume = {118}, - year = {2013}, - abstract = {PDF | On Mar 1, 2013, Ken'ichiro Tanaka and others - published Discrete approximations of continuous - distributions by maximum entropy | Find, read and - cite all the research you need on ResearchGate}, - doi = {10.1016/j.econlet.2012.12.020}, - issn = {0165-1765}, - url = {https://www.researchgate.net/publication/ - 256994855_Discrete_approximations_of_continuous_distributions_by_maximum_entropy}, -} - -@article{kopecky2010finite, - author = {Kopecky, Karen A. and Suen, Richard M.H.}, - journal = {Review of Economic Dynamics}, - note = {\url{http://www.karenkopecky.net/RouwenhorstPaper.pdf}}, - number = {3}, - pages = {701--714}, - publisher = {Elsevier}, - title = {Finite State Markov-Chain Approximations To Highly - Persistent Processes}, - volume = {13}, - year = {2010}, - url = {http://www.karenkopecky.net/RouwenhorstPaper.pdf}, -} - -@article{denHaanMarcet:parameterized, - author = {den Haan, Wouter J and Marcet, Albert}, - journal = {Journal of Business and Economic Statistics}, - month = {January}, - note = {Available at - {\url{http://ideas.repec.org/a/bes/jnlbes/v8y1990i1p31-34.html}}}, - number = {1}, - pages = {31--34}, - title = {Solving the Stochastic Growth Model by Parameterizing - Expectations}, - volume = {8}, - year = {1990}, -} - -@article{ckConcavity, - author = {Carroll, Christo{\-}pher D. and - Kim{\-}ball, Miles S.}, - journal = {Econometrica}, - note = - {\url{https://www.econ2.jhu.edu/people/ccarroll/concavity.pdf}}, - number = {4}, - pages = {981--992}, - title = {On the {C}on{\-}cav{\-}ity of the {C}onsumption - {F}unction}, - volume = {64}, - year = {1996}, - url = {https://www.econ2.jhu.edu/people/ccarroll/concavity.pdf}, -} - -@article{zeldesStochastic, - author = {Zeldes, Stephen P.}, - journal = {Quarterly Journal of Economics}, - month = {May}, - number = {2}, - pages = {275--298}, - title = {Optimal Consumption with Stochastic Income: - {D}eviations from Certainty Equivalence}, - volume = {104}, - year = {1989}, -} - -@article{deatonLiqConstr, - author = {Deaton, Angus S.}, - journal = {Econometrica}, - note = {\url{https://www.jstor.org/stable/2938366}}, - pages = {1221-1248}, - title = {Saving and Liquidity Constraints}, - volume = {59}, - year = {1991}, - url = {http://www.jstor.org/stable/2938366}, -} - -@article{carroll:brookings, - author = {Carroll, Christopher D.}, - journal = {Brookings Papers on Economic Activity}, - note = - {\url{https://www.econ2.jhu.edu/people/ccarroll/BufferStockBPEA.pdf}}, - number = {2}, - pages = {61--156}, - title = {The Buffer-Stock Theory of Saving: Some Macroeconomic - Evidence}, - volume = {1992}, - year = {1992}, - url = {https://www.econ2.jhu.edu/people/ccarroll/ - BufferStockBPEA.pdf}, -} - -@article{carrollBSLCPIH, - author = {Carroll, Christopher D.}, - journal = {Quarterly Journal of Economics}, - number = {1}, - pages = {1--56}, - title = {Buffer Stock Saving and the Life Cycle/Permanent - Income Hypothesis}, - volume = {CXII}, - year = {1997}, -} - -@incollection{carroll:richportfolios, - address = {Cambridge, MA}, - author = {Carroll, Christopher D.}, - booktitle = {Household Portfolios: Theory and Evidence}, - note = - {\url{https://www.econ2.jhu.edu/people/ccarroll/richportfolios.pdf}}, - publisher = {MIT Press}, - title = {Portfolios of the Rich}, - year = {2002}, - url = {https://www.econ2.jhu.edu/people/ccarroll/ - richportfolios.pdf}, -} - -@article{palumbo:medical, - author = {Palumbo, Michael G}, - journal = {Review of Economic Studies}, - note = {Available at - {\url{http://ideas.repec.org/a/bla/restud/v66y1999i2p395-421.html}}}, - number = {2}, - pages = {395--421}, - title = {Uncertain Medical Expenses and Precautionary Saving - Near the End of the Life Cycle}, - volume = {66}, - year = {1999}, -} - -@article{carroll&samwick:nature, - author = {Carroll, Christopher D. and Samwick, Andrew A.}, - journal = {Journal of Monetary Economics}, - number = {1}, - pages = {41--71}, - title = {The {N}ature of {P}recautionary {W}ealth}, - volume = {40}, - year = {1997}, - url = {https://www.econ2.jhu.edu/people/ccarroll/papers/ - nature.pdf}, -} - -@article{AttanasioBanksMeghirWeber, - author = {Attanasio, O.P. and Banks, J. and Meghir, C. and - Weber, G.}, - journal = {Journal of Business and Economic Statistics}, - number = {1}, - pages = {22--35}, - publisher = {JSTOR}, - title = {Humps and Bumps in Lifetime Consumption}, - volume = {17}, - year = {1999}, -} - -@inproceedings{horowitzBootstrap, - author = {Joel L. Horowitz}, - booktitle = {Handbook of Econometrics}, - editor = {James J. Heckman and Edward Leamer}, - publisher = {Elsevier/North Holland}, - title = {The Bootstrap}, - volume = {5}, - year = {2001}, -} - -@article{andrews2017measuring, - author = {Andrews, Isaiah and Gentzkow, Matthew and - Shapiro, Jesse M}, - journal = {The Quarterly Journal of Economics}, - number = {4}, - pages = {1553--1592}, - publisher = {Oxford University Press}, - title = {Measuring the sensitivity of parameter estimates to - estimation moments}, - volume = {132}, - year = {2017}, -} - diff --git a/SolvingMicroDSOPs.pdf b/SolvingMicroDSOPs.pdf deleted file mode 100644 index 68af7f026..000000000 Binary files a/SolvingMicroDSOPs.pdf and /dev/null differ diff --git a/docs/Resources/LaTeXInputs/bibliography_blend-clean.tex b/docs/Resources/LaTeXInputs/bibliography_blend-clean.tex deleted file mode 100644 index a07f1f2c5..000000000 --- a/docs/Resources/LaTeXInputs/bibliography_blend-clean.tex +++ /dev/null @@ -1,56 +0,0 @@ -% Allows two (optional) supplements to hard-wired \texname.bib bibfile: -% economics.bib is a default bibfile that supplies anything missing elsewhere -% Add-Refs.bib is an override bibfile that supplants anything in \texfile.bib or economics.bib -\provideboolean{AddRefsExists} -\provideboolean{economicsExists} -\provideboolean{BothExist} -\provideboolean{NeitherExists} -\setboolean{BothExist}{true} -\setboolean{NeitherExists}{true} - -\IfFileExists{\econtexRoot/\texname-Add-Refs.bib}{ - % then - \typeout{References in Add-Refs.bib will take precedence over those elsewhere} - \setboolean{AddRefsExists}{true} - \setboolean{NeitherExists}{false} % Default is true -}{ - % else - \setboolean{AddRefsExists}{false} % No added refs exist so defaults will be used - \setboolean{BothExist}{false} % Default is that Add-Refs and economics.bib both exist -} - -% Deal with case where economics.bib is found by kpsewhich -\IfFileExists{/usr/local/texlive/texmf-local/bibtex/bib/economics.bib}{ - % then - \typeout{References in default global economics.bib will be used for items not found elsewhere} - \setboolean{economicsExists}{true} - \setboolean{NeitherExists}{false} -}{ - % else - \typeout{Found no global database file} - \setboolean{economicsExists}{false} - \setboolean{BothExist}{false} -} - -\ifthenelse{\boolean{showPageHead}}{ %then - \clearpairofpagestyles % No header for references pages - }{} % No head has been set to clear - -\ifthenelse{\boolean{BothExist}}{ - % then use both - \typeout{bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname,economics}} - \bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname,economics} - % else both do not exist -}{ % maybe neither does? - \ifthenelse{\boolean{NeitherExists}}{ - \typeout{bibliography{\texname}} - \bibliography{\texname}}{ - % no -- at least one exists - \ifthenelse{\boolean{AddRefsExists}}{% yes - \typeout{\bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname}} - \bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname}} - {% else \texname-Add-Refs does not exist - \typeout{\bibliography{\econtexRoot/\texname,economics}} - \bibliography{ \econtexRoot/\texname,economics}} - } % end of picking the one that exists -} % end of testing whether neither exists diff --git a/docs/Resources/LaTeXInputs/local-macros.tex b/docs/Resources/LaTeXInputs/local-macros.tex deleted file mode 100644 index 4f8fb703e..000000000 --- a/docs/Resources/LaTeXInputs/local-macros.tex +++ /dev/null @@ -1,251 +0,0 @@ -% Define macros for this paper -% Only works after \input{./.econtexRoot} -\ProvidesPackage{SolvingMicroDSOPs-private} - -\renewcommand{\econtexRoot}{.} -\renewcommand{\packages}{\econtexRoot/Resources/texmf-local/tex/latex} -\renewcommand{\econtex}{\packages/econtex} -\renewcommand{\econark}{\econtexRoot/Resources/texmf-local/tex/latex/econark} -\renewcommand{\econtexSetup}{\econtexRoot/Resources/texmf-local/tex/latex/econtexSetup} -\renewcommand{\econtexShortcuts}{\econtexRoot/Resources/texmf-local/tex/latex/econtexShortcuts} -\renewcommand{\econtexBibMake}{\econtexRoot/Resources/texmf-local/tex/latex/econtexBibMake} -\renewcommand{\econtexBibStyle}{\econtexRoot/Resources/texmf-local/bibtex/bst/econtex} -\renewcommand{\LaTeXInputs}{\econtexRoot/Resources/LaTeXInputs} -\renewcommand{\local}{\LaTeXInputs/local} - -\providecommand{\onlyinsubfile}{}\renewcommand{\onlyinsubfile}[1]{#1} -\providecommand{\notinsubfile}{}\renewcommand{\notinsubfile}[1]{} - -\providecommand{\aboveMin}{}\renewcommand{\aboveMin}{\blacktriangle} -\providecommand{\chiFunc}{}\providecommand{\chiFunc}{\pmb{\chi}} - -% version for urls - [userGitHubID] or econ-ark - -\newcommand{\Arrival}{Arrival}\newcommand{\arvl}{\leftarrow} -\newcommand{\Decision}{Decision}\newcommand{\dcsn}{\null}%{\sim} -\newcommand{\Continuation}{Continuation}\newcommand{\cntn}{\rightarrow}%{\rightharpoonup} - -% providecommand then renewcommand works to redefine it -% if it already exists - -\providecommand{\prdNxt}{}\renewcommand{\prdNxt}{\prdt+1} -\providecommand{\prdLst}{}\renewcommand{\prdLst}{\prdt-1} -\providecommand{\prdNxT}{}\renewcommand{\prdNxT}{\prdT+1} -\providecommand{\prdLsT}{}\renewcommand{\prdLsT}{\prdT-1} - -\providecommand{\BegMark}{}\renewcommand{\BegMark}{\arvl} -\providecommand{\BegStp}{}\renewcommand{\BegStp}{_\arvl} -\providecommand{\BegStpNxt}{}\renewcommand{\BegStpNxt}{_\arvl{(+)}} -\providecommand{\BegStpLst}{}\renewcommand{\BegStpLst}{_\arvl{(-)}} - -\providecommand{\BegPrd}{}\renewcommand{\BegPrd}{_\arvl{\prd}} -\providecommand{\BegPrdNxt}{}\renewcommand{\BegPrdNxt}{_\arvl{(\prd+1)}} -\providecommand{\BegPrdLst}{}\renewcommand{\BegPrdLst}{_\arvl\prd-1} - -\providecommand{\MidStp}{}\renewcommand{\MidStp}{\dcsn} -\providecommand{\MidPrd}{}\renewcommand{\MidPrd}{\prd} -\providecommand{\MidStpLst}{}\renewcommand{\MidStpLst}{{\prd-1}} -\providecommand{\MidStpLsT}{}\renewcommand{\MidStpLsT}{{\prdT-1}} -\providecommand{\MidStpNxt}{}\renewcommand{\MidStpNxt}{{\prd+1}} -\providecommand{\MidStpNxT}{}\renewcommand{\MidStpNxT}{{\prdT+1}} -\providecommand{\EndMark}{}\renewcommand{\EndMark}{\cntn} -\providecommand{\EndPrd}{}\renewcommand{\EndPrd}{\prd_\cntn} -\providecommand{\EndStg}{}\renewcommand{\EndStg}{_\cntn} -\providecommand{\EndPrdLst}{}\renewcommand{\EndPrdLst}{{(\prd-1)}_\cntn} -\providecommand{\EndPrdLsT}{}\renewcommand{\EndPrdLsT}{{(\prd-1)}_\cntn} -\providecommand{\MidPrdLst}{}\renewcommand{\MidPrdLst}{{(\prd-1)}} % \dcsn -\providecommand{\MidPrdLsT}{}\renewcommand{\MidPrdLsT}{{(\prd-1)}} % \dcsn -\providecommand{\BegPrdLst}{}\renewcommand{\BegPrdLst}{{_\arvl(\prd-1)}} % \dcsn -\providecommand{\BegPrdLsT}{}\renewcommand{\BegPrdLsT}{{_\arvl(\prd-1)}} % \dcsn -\providecommand{\MidLsT}{}\renewcommand{\EndPrdLsT}{{(\prd-1})_\cntn} -\providecommand{\EndPrdNxt}{}\renewcommand{\EndPrdNxt}{(\prd+1)_\cntn} -\providecommand{\EndPrdNxT}{}\renewcommand{\EndPrdNxT}{(\prdT+1)_\cntn} -\providecommand{\EndStp}{}\renewcommand{\EndStp}{_\cntn} -\providecommand{\EndStpLst}{}\renewcommand{\EndStpLst}{_\cntn} -\providecommand{\EndStpLsT}{}\renewcommand{\EndStpLsT}{_\cntn} -\providecommand{\EndStpNxt}{}\renewcommand{\EndStpNxt}{_\cntn} -\newcommand{\vBegStp}{\vFunc_{\BegStp}} -\newcommand{\vBegStpNxt}{\vFunc_{\BegStpNxt}} -\newcommand{\vBegPrd}{\vFunc_{\BegPrd}} -\newcommand{\vBegPrdNxt}{\vFunc_{\BegPrdNxt}} -\newcommand{\vBegStpLst}{\vFunc_{\BegStpLst}} -\newcommand{\vMidStp}{\vFunc_{\MidStp}} -\newcommand{\vMidStpNxt}{\vFunc_{\MidStpNxt}} -\newcommand{\vEnd}{\vFunc_{\EndStp}} -\newcommand{\vEndStp}{\vFunc_{\EndStp}} -\newcommand{\vEndStg}{\vFunc_{\EndStg}} -\newcommand{\vEndPrd}{\vFunc_{\EndPrd}} -\newcommand{\vEndStpLst}{\vFunc_{\EndStpLst}} -\newcommand{\vEndStpLsT}{\vFunc_{\EndStpLsT}} -\newcommand{\vEndPrdLst}{\vFunc_{\EndPrdLst}} -\newcommand{\vEndPrdLsT}{\vFunc_{\EndPrdLsT}} -\newcommand{\vMidPrdLst}{\vFunc_{\MidPrdLst}} -\newcommand{\vMidPrdLsT}{\vFunc_{\MidPrdLsT}} -\newcommand{\cEndStp}{\cFunc_{\EndStp}} -\newcommand{\vPEndStp}{\vFunc^{a}_{\EndStp}} -\newcommand{\vPMidStp}{\vFunc^{m}_{\MidStp}} -\newcommand{\vPBegStp}{\vFunc^{k}_{\BegStp}} -\newcommand{\vPEndStpNxt}{\vFunc^{a}_{\EndStpNxt}} -\newcommand{\vPMidStpNxt}{\vFunc^{m}_{\MidStpNxt}} -\newcommand{\vPBegStpNxt}{\vFunc^{k}_{\BegStpNxt}} -\renewcommand{\vPBegStpNxt}{\vFunc^{k}_{\BegStpNxt}} - -\providecommand{\step}{}\renewcommand{\step}{s} - -\providecommand{\PDVCoverc}{\mathbb{C}} - -% \newcommand{\Mma}{\textit{Mathematica}} -\providecommand{\step}{}\renewcommand{\step}{t} -\providecommand{\stp}{}\renewcommand{\stp}{t} -\providecommand{\Stp}{}\renewcommand{\Stp}{T} -% \providecommand{step}{}\renewcommand{step}{step} -\providecommand{step}{}\renewcommand{step}{toe} -\providecommand{Step}{}\renewcommand{Step}{Toe} -% \providecommand{steps}{}\renewcommand{steps}{steps} -\providecommand{steps}{}\renewcommand{steps}{toes} -\providecommand{Steps}{}\renewcommand{Steps}{Toes} -\providecommand{period}{}\renewcommand{period}{tic} -\providecommand{Period}{}\renewcommand{Period}{Tic} -\providecommand{periods}{}\renewcommand{periods}{tics} -\providecommand{Periods}{}\renewcommand{Periods}{Tics} - -\providecommand{\trnsn}{}\renewcommand{\trnsn}{transition} -\providecommand{\Trnsn}{}\renewcommand{\Trnsn}{Transition} -\providecommand{transitions}{}\renewcommand{transitions}{transitions} -\providecommand{Transitions}{}\renewcommand{Transitions}{Transitions} - -\providecommand{\evltn}{}\renewcommand{\evltn}{evolution} -\providecommand{\Evltn}{}\renewcommand{\Evltn}{Evolution} -\providecommand{evolutions}{}\renewcommand{evolutions}{evolutions} -\providecommand{Evolutions}{}\renewcommand{Evolutions}{Evolutions} - - -\providecommand{stage}{}\renewcommand{stage}{tac} -\providecommand{Stage}{}\renewcommand{Stage}{Tac} -\providecommand{stages}{}\renewcommand{stages}{tacs} -\providecommand{Stages}{}\renewcommand{Stages}{Tacs} - -\newcommand{\ExEndStp}{\Ex_{\EndStp}} -\newcommand{\ExBegStp}{\Ex_{\BegStp}} -\newcommand{\ExMidStp}{\Ex_{\MidStp}} -\newcommand{\ExMidPrd}{\Ex_{\MidPrd}} - -\newcommand{\ExEndStpLst}{\Ex_{\EndStpLst}} -\newcommand{\ExEndStpLsT}{\Ex_{\EndStpLsT}} -\newcommand{\ExBegStpLst}{\Ex_{\BegStpLst}} -\newcommand{\ExBegStpLsT}{\Ex_{\BegStpLsT}} -\newcommand{\ExMidStpLst}{\Ex_{\MidStpLst}} -\newcommand{\ExMidStpLsT}{\Ex_{\MidStpLsT}} - -\providecommand{\stigma}{}\renewcommand{\stigma}{\varsigma} -\providecommand{\Shr}{}\renewcommand{\Shr}{\stigma} -\providecommand{\Aprx}{}\renewcommand{\Aprx}{\Alt} - -%\newcommand{\lqd}{\ell} -\newcommand{\wlthAftr}{\acute{w}} -\newcommand{\wlthBefr}{w} - -\newcommand{\optml}[1]{\breve{#1}} - -\providecommand{\prd}{}\renewcommand{\prd}{t} -\providecommand{\prdm}{}\renewcommand{\prdm}{({\prd-1})} - - -\newcommand{\code}[1]{\ensuremath{\mathtt{#1}}} % code should be in tt font -%\newcommand{\code}{\mathtt} - -% These allow turning on or off the growth factor for permanent income -% (preserves symbolic derivations in LaTeX but makes them disappear) -% value function growth normed by G^{1-\CRRA} -\newcommand{\PermGroFacAdjV}{\PermGroFac_{\prd}^{1-\CRRA}} -\newcommand{\PermGroFacAdjVNxt}{\PermGroFac_{\prd+1}^{1-\CRRA}} -\renewcommand{\PermGroFacAdjV}{\null} -\newcommand{\AdjV}{_{\prd}^{1-\CRRA}} -\newcommand{\AdjVNxt}{_{\prd+1}^{1-\CRRA}} -%\providecommand{\PermGroFacAdjV}{}\renewcommand{\PermGroFacAdjV}{.} % turn off -% marginal utility growth normed by G^{-\CRRA} -\newcommand{\PermGroFacAdjMu}{\PermGroFac_{\prd}^{-\CRRA}} -\newcommand{\PermGroFacAdjMuNxt}{\PermGroFac_{\prd+1}^{-\CRRA}} -\newcommand{\AdjMu}{_{\prd}^{-\CRRA}} -\newcommand{\AdjMuNxt}{_{\prd+1}^{-\CRRA}} -\providecommand{\PermGroFacAdjMu}{}\renewcommand{\PermGroFacAdjMu}{\null} % turn off - -% To compile the Habits subfile, go to the Subfiles directory - -\newcommand{\trmT}{T} - -\providecommand{\VInv}{}\providecommand{\VInv}{}\renewcommand{\VInv}{{\Lambda}} -\providecommand{\vInv}{}\providecommand{\vInv}{}\renewcommand{\vInv}{{\scriptstyle \VInv \displaystyle}} - -\providecommand{\NatBoroCnstra}{}\providecommand{\NatBoroCnstra}{}\renewcommand{\NatBoroCnstra}{\underline{a}} - -\providecommand{\DiscAlt}{}\renewcommand{\DiscAlt}{\beta} % Erase the distinction between the alternative and the standard discount factor - -\providecommand{\aVecCode}{}\renewcommand{\aVecCode}{\code{aVec}} -\providecommand{\mVecCode}{}\renewcommand{\mVecCode}{\texttt{mVec}} -\providecommand{\kVecCode}{}\renewcommand{\kVecCode}{\texttt{kVec}} -\providecommand{\cVecCode}{}\renewcommand{\cVecCode}{\texttt{cVec}} - -\newcommand{\vctr}{\pmb} -\newcommand{\mVec}{\texttt{mVec}} -\newcommand{\aVec}{\texttt{aVec}} -\newcommand{\vVec}{\texttt{vVec}} - -\newcommand{\arvlstepShr}{_\arvl\step[\Shr]} -\newcommand{\stepShr}{\step[\Shr]} - -\newcommand{\vBegStpShr}{\vFunc_{_arvl[\Shr]}} -\newcommand{\vMidStpShr}{\vFunc_{\stp[\Shr]}} -\newcommand{\vEndStpShr}{\vFunc_{\stp[\Shr]_\cntn}} -\newcommand{\dvdShrBeg}{\vFunc^{\Shr}_{_\arvl\prd[\Shr]}} -\newcommand{\vShrMid}{\vFunc^{\Shr}_{\prd[\Shr]}} -\newcommand{\vShrEnd}{\vFunc^{\Shr}_{\prd[\Shr]_\cntn}} -\newcommand{\vVecShr}{\vctr{v}_{_\arvl\prd[\Shr]}} - -%\newcommand{\prd}{t} -\newcommand{\prdt}{t} -\newcommand{\prdT}{t} - -\providecommand{\hEndMin}{\underline{\hNrm}}\renewcommand{\hEndMin}{\underline{\hNrm}} -\newcommand{\TranShkEmpDummy}{\vartheta} - -\newcommand{\aVecMin}{{a}_{1}} -\newcommand{\cMin}{{c}_{1}} -\newcommand{\vctratm}{\vctr{a}_{\prd-1}}\renewcommand{\vctratm}{\vctr{a}} -\providecommand{\TranShkEmpMin}{}\renewcommand{\TranShkEmpMin}{\underline{\TranShkEmp}} - -\newcommand{\cLst}{{c}_{\prdLst}} -\newcommand{\cLsT}{{c}_{\prdLsT}} -\newcommand{\mLst}{{m}_{\prdLst}} -\newcommand{\mLsT}{{m}_{\prdLsT}} - -% Alternative definitions of these would restore the time subscript for c -% in the max problem statements -\newcommand{\cLvlMaxPrdT}{\cLvl} -\newcommand{\cLvlMaxPrdt}{\cLvl} -\newcommand{\cNrmMaxPrdT}{\cNrm} -\newcommand{\cNrmMaxPrdt}{\cNrm} -% \newcommand{\cLvlMaxPrd}{\cLvl_{\prd}} - - -\renewcommand{\std}{\sigma} -\newcommand{\vrnc}{\std^{2}} - -\newcommand{\cStg}{\cFunc} -\newcommand{\cStgSub}{[\cStg]} -\newcommand{\cStgSup}{\cStg} - -\newcommand{\cStgBeg}{\arvlc} -\newcommand{\cStgMid}{[c]} -\newcommand{\cStgEnd}{{c}\cntn} - -\newcommand{\vFuncBeg}{\nu} -\newcommand{\vFuncMid}{\vFunc} -\newcommand{\vFuncEnd}{\mathfrak{v}} - -\renewcommand{\vBegStpNxt}{\vFunc^{\cStgBeg}_{\prd+1}} -\newcommand{\cCtrl}{\cFunc} -\newcommand{\mStte}{\mNrm} % m as a state variable - -\newcommand{\aStte}{\aNrm} -\newcommand{\cStte}{\cNrm} diff --git a/docs/Resources/LaTeXInputs/local-packages.tex b/docs/Resources/LaTeXInputs/local-packages.tex deleted file mode 100644 index 8c48e5f87..000000000 --- a/docs/Resources/LaTeXInputs/local-packages.tex +++ /dev/null @@ -1,106 +0,0 @@ -% Define a few objects that are unique to this paper -% Allow different actions depending on whether document is being processed as -% subfile or being process as standalone - -% \usepackage{import} % used in slides and cctwMoM -\usepackage{\LaTeXInputs/pdfsuppressruntime} % prevent timestamp - -% Get fonts so pdflatex can handle unicode characters -\usepackage[utf8]{inputenc} -\usepackage[T1]{fontenc} - -\usepackage[authoryear]{natbib} -\usepackage{listings} -\usepackage{mathtools} -\usepackage{accents,xr-hyper} -%\usepackage{\econark} -\usepackage{dcolumn} % seems to need to come after hyperref -\usepackage{moreverb} % Used in slides -%\usepackage{\econtexSetup} % Gets, configures often-used packages - -\provideboolean{Web} -\setboolean{Web}{false} % reset to true if running via dvi; search for \ifdvi below - -\makeatletter -\newif\ifdvi\dvitrue -\@ifundefined{pdfoutput}{}{\ifnum\pdfoutput>0 \dvifalse\fi} -\makeatother - - - -\ifdvi - \setboolean{Web}{true} - \RequirePackage{tex4ht} - \RequirePackage{graphicx} - \RequirePackage[tex4ht]{hyperref} - \provideboolean{bigPrint}\setboolean{bigPrint}{true} % HTM output looks better in a larger font size - \DeclareGraphicsExtensions{.png} - \providecommand{\textSizeDefault}{\large} - \providecommand{\titlepagefinish}{\newpage\textSizeDefault} - \providecommand{\abstractSizeDefault}{\large} - \let\footnoterule\relax - \makeatletter - \renewenvironment{abstract}{% - \begin{spacing}{0.9} - \noindent {\tiny \phantom{.}} \\ % Trick to get proper spacing in html - \noindent \hrule height 0.4pt depth 0.0pt width \textwidth \relax - \vspace*{5mm} - \noindent \textbf{Abstract}\\ - \indent \abstractSizeDefault - }{% - \noindent {\tiny \phantom{.}} \\ % Trick to get proper spacing in html -% \noindent \hrule height 0.4pt depth 0.0pt width \textwidth \relax - \vspace*{3mm} - \ifthenelse{ \isundefined\@keywords }{ - \ClassWarningNoLine{bejournal}{No keywords specified. - Please use the command \protect\keywords} - }{ - } - \end{spacing} - \begin{quote} - \begin{Description} - \item[\textbf{~~~~~~~~~~~~Keywords~}] \@keywords - \ifthenelse{ \isundefined\@jelclass }{ - \ClassWarningNoLine{bejournal}{No JEL classification specified. - Please use the command \protect\jelclass} - }{ - \item[\textbf{~~~~~~~~~~~~JEL codes~}] \@jelclass - \end{Description} - } - \end{quote} - \makeatother - } -\else - \RequirePackage{graphicx} % requiring [pdftex] seems to interfere with Pandemic build - \RequirePackage{hyperref} % plainpages seems to mess up BST - \DeclareGraphicsExtensions{.pdf} -\fi - - -% \usepackage{econtexSetup} sets boolean Web=true if compilation type is dvi -% also includes hyperref -\provideboolean{showPageHead}{} -\ifthenelse{\boolean{Web}}{ - \setboolean{showPageHead}{false} -}{ % {pdf} - \setboolean{showPageHead}{true} - \usepackage{scrlayer-scrpage} % Package for page headers if PDF - \usepackage{caption} % allow suppression of appendix figures in NoAppendix PDF -} - -%\usepackage{\econtexShortcuts} -\usepackage{subfiles} - -\newcommand{\urlPDF}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs.pdf}{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs.pdf}}} -\newcommand{\urlSlides}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs-Slides.pdf}{https://github.com/\owner/SolvingMicroDSOPs/blob/master/SolvingMicroDSOPs-Slides.pdf}}} -\newcommand{\urlHTML}{\texttt{\href{https://\owner.github.io/SolvingMicroDSOPs}{https://\owner.github.io/SolvingMicroDSOPs}}} -\newcommand{\urlCode}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs/tree/master/Code}{https://github.com/\owner/SolvingMicroDSOPs/tree/master/Code}}} -\newcommand{\urlRepo}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs}{https://github.com/\owner/SolvingMicroDSOPs}}} - - -\newcommand{\SMDSOPrepo}{\texttt{\href{https://github.com/\owner/SolvingMicroDSOPs}{SolvingMicroDSOPs}}} -\newcommand{\EMDSOPrepo}{\texttt{\href{https://github.com/\owner/EstimatingMicroDSOPs}{EstimatingMicroDSOPs repo}}} -\newcommand{\HARKrepo}{\texttt{\href{https://github.com/econ-ark/HARK}{HARK}}} -\newcommand{\HARKdocs}{\texttt{\href{https://docs.econ-ark.org}{documentation}}} - -% \newcommand{\ARKurl}{\texttt{\href{https://econ-ark.org}{Econ-ARK}}} diff --git a/docs/Resources/LaTeXInputs/unicode-subs-declare.sty b/docs/Resources/LaTeXInputs/unicode-subs-declare.sty deleted file mode 100644 index d6d41541f..000000000 --- a/docs/Resources/LaTeXInputs/unicode-subs-declare.sty +++ /dev/null @@ -1,17 +0,0 @@ -% If you encounter the unicode character, substitute the LaTeX on compile -\DeclareUnicodeCharacter{1D53C}{\mathbb{E}} % 𝔼 -\DeclareUnicodeCharacter{1D41A}{\mathbf{a}} % 𝐚 -\DeclareUnicodeCharacter{1D41B}{\mathbf{b}} % 𝐛 -\DeclareUnicodeCharacter{1D41C}{\mathbf{c}} % 𝐜 -\DeclareUnicodeCharacter{1D429}{\mathbf{p}} % 𝐩 -\DeclareUnicodeCharacter{1D424}{\mathbf{k}} % 𝐀 -\DeclareUnicodeCharacter{1D426}{\mathbf{m}} % 𝐦 -\DeclareUnicodeCharacter{1D42F}{\mathbf{v}} % 𝐯 -\DeclareUnicodeCharacter{1D432}{\mathbf{y}} % 𝐲 -\DeclareUnicodeCharacter{1D4A2}{\mathcal{G}} % 𝒒 -\DeclareUnicodeCharacter{211B}{\mathcal{R}} % β„› -\DeclareUnicodeCharacter{1D69E}{\mathrm{u}} % 𝚞 -\DeclareUnicodeCharacter{1D69F}{\mathrm{v}} % 𝚟 -\DeclareUnicodeCharacter{03B2}{\beta} % Ξ² -\DeclareUnicodeCharacter{03C3}{\sigma} % Οƒ -\DeclareUnicodeCharacter{03C1}{\rho} % ρ diff --git a/docs/Resources/texmf-local/tex/latex/econark-clean-clean.sty b/docs/Resources/texmf-local/tex/latex/econark-clean-clean.sty new file mode 100644 index 000000000..cec9e0e53 --- /dev/null +++ b/docs/Resources/texmf-local/tex/latex/econark-clean-clean.sty @@ -0,0 +1,367 @@ +% Style file with stuff that should be available for any latex doc in Econ-ARK +% \renewcommand{[macroName]}{[value]} does not overwrite [macroName] +% so if used after macroName is already defined, it leaves the macro unchanged + +% Get references right whether compiled as subfile or main file +% https://tex.stackexchange.com/questions/463699/proper-reference-numbers-with-subfiles +\newcommand\labelprefix{} +\newcommand\localorexternallabel[1]{% Removing this comment breaks the command + \expandafter\ifx\csname r@#1\endcsname\relax + \labelprefix + \fi #1% Removing this comment breaks the command +} + +% Appendices and body are subfiles +\RequirePackage{subfiles} +\RequirePackage{xmpincl} % mathstat.dal..ca/~sellinger/pdfa + +% Get all the packages from the American Mathematical Society +\RequirePackage{amsmath,amsfonts,amsmath,amsthm,amssymb} + +% Command to define a label only if it does not yet exist (suppresses misleading +% warnings when material created in subfiles is read in while references already +% exist from master document) +\makeatletter +\renewcommand{\iflabelexists}[3]{\@ifundefined{r@#1}{\G@refundefinedtrue{#3}}{#2}} +\makeatother + +% Home of Econ-ARK +\renewcommand{\ARKurl}{\href{https://econ-ark.org}{{Econ-ARK}}} + +% Define various generically useful terms and items +\renewcommand{\avg}{\bar} +\renewcommand{\cov}{}\renewcommand{\cov}{\textup{cov}} +\renewcommand{\Abve}{\bar} +\renewcommand{\Belw}{\underline} +\renewcommand{\CDF}{\mathcal{F}} +\renewcommand{\GroFac}{\Omega} % Generic for growth factor +\renewcommand{\GroRte}{\omega} % Generic for growth rate +\renewcommand{\Lvl}{\mathbf} % Levels of variables are bold + +% Constrained +\renewcommand{\cnstr}[1]{\grave{#1}} + +\renewcommand{\BalGroFac}{\check} % Balanced growth factor +\renewcommand{\BalGroRte}{\tilde} % Balanced growth rate (log change) +\renewcommand{\TargetNrm}{\hat} % Target +\renewcommand{\ABalLvl}{\BalGroFac{\ALvl}} % m where ALvl grows by PermGroFac +\renewcommand{\MBalLvl}{\BalGroFac{\MNrm}} % m where MLvl grows by PermGroFac +\renewcommand{\mBalLog}{\BalGroRte{\mNrm}} % m where mLog grows by PermGroRte +\renewcommand{\mTrgNrm}{\TargetNrm{\mNrm}} % m where E[m_{t+1}]=m_{t} + +% Levels are boldface +\renewcommand{\aLvl}{\mathbf{a}} +\renewcommand{\bLvl}{\mathbf{b}} +\renewcommand{\cLvl}{\mathbf{c}} +\renewcommand{\dLvl}{\mathbf{d}} +\renewcommand{\eLvl}{\mathbf{e}} +\renewcommand{\fLvl}{\mathbf{f}} +\renewcommand{\gLvl}{\mathbf{g}} +\renewcommand{\hLvl}{\mathbf{h}} +\renewcommand{\iLvl}{\mathbf{i}} +\renewcommand{\jLvl}{\mathbf{j}} +\renewcommand{\kLvl}{\mathbf{k}} +\renewcommand{\mLvl}{\mathbf{m}} +\renewcommand{\nLvl}{\mathbf{n}} +\renewcommand{\pLvl}{\mathbf{p}} +\renewcommand{\qLvl}{\mathbf{q}} +\renewcommand{\rLvl}{\mathbf{r}} +\renewcommand{\sLvl}{\mathbf{s}} +\renewcommand{\tLvl}{\mathbf{t}} +\renewcommand{\uLvl}{\mathbf{u}} +\renewcommand{\vLvl}{\mathbf{v}} +\renewcommand{\wLvl}{\mathbf{w}} +\renewcommand{\xLvl}{\mathbf{x}} +\renewcommand{\yLvl}{\mathbf{y}} +\renewcommand{\zLvl}{\mathbf{z}} + +\renewcommand{\ALvl}{\mathbf{A}} +\renewcommand{\BLvl}{\mathbf{B}} +\renewcommand{\CLvl}{\mathbf{C}} +\renewcommand{\DLvl}{\mathbf{D}} +\renewcommand{\ELvl}{\mathbf{E}} +\renewcommand{\FLvl}{\mathbf{F}} +\renewcommand{\GLvl}{\mathbf{G}} +\renewcommand{\HLvl}{\mathbf{H}} +\renewcommand{\ILvl}{\mathbf{I}} +\renewcommand{\JLvl}{\mathbf{J}} +\renewcommand{\KLvl}{\mathbf{K}} +\renewcommand{\LLvl}{\mathbf{L}} +\renewcommand{\MLvl}{\mathbf{M}} +\renewcommand{\NLvl}{\mathbf{N}} +\renewcommand{\OLvl}{\mathbf{O}} +\renewcommand{\PLvl}{\mathbf{P}} +\renewcommand{\QLvl}{\mathbf{Q}} +\renewcommand{\RLvl}{\mathbf{R}} +\renewcommand{\SLvl}{\mathbf{S}} +\renewcommand{\TLvl}{\mathbf{T}} +\renewcommand{\ULvl}{\mathbf{U}} +\renewcommand{\VLvl}{\mathbf{V}} +\renewcommand{\WLvl}{\mathbf{W}} +\renewcommand{\XLvl}{\mathbf{X}} +\renewcommand{\YLvl}{\mathbf{Y}} +\renewcommand{\ZLvl}{\mathbf{Z}} + +% Functions are Roman not italicized +\renewcommand{\aFunc}{\mathrm{a}} +\renewcommand{\bFunc}{\mathrm{b}} +\renewcommand{\cFunc}{\mathrm{c}} +\renewcommand{\dFunc}{\mathrm{d}} +\renewcommand{\eFunc}{\mathrm{e}} +\renewcommand{\fFunc}{\mathrm{f}} +\renewcommand{\hFunc}{\mathrm{h}} +\renewcommand{\iFunc}{\mathrm{i}} +\renewcommand{\jFunc}{\mathrm{j}} +\renewcommand{\kFunc}{\mathrm{k}} +\renewcommand{\mFunc}{\mathrm{m}} +\renewcommand{\nFunc}{\mathrm{n}} +\renewcommand{\pFunc}{\mathrm{p}} +\renewcommand{\sFunc}{\mathrm{s}} +\renewcommand{\rFunc}{\mathrm{r}} +\renewcommand{\uFunc}{\mathrm{u}} +\renewcommand{\vFunc}{\mathrm{v}} +\renewcommand{\wFunc}{\mathrm{w}} +\renewcommand{\xFunc}{\mathrm{x}} +\renewcommand{\yFunc}{\mathrm{y}} +\renewcommand{\zFunc}{\mathrm{z}} + +\renewcommand{\AFunc}{\mathrm{A}} +\renewcommand{\BFunc}{\mathrm{B}} +\renewcommand{\CFunc}{\mathrm{C}} +\renewcommand{\DFunc}{\mathrm{D}} +\renewcommand{\EFunc}{\mathrm{E}} +\renewcommand{\FFunc}{\mathrm{F}} +\renewcommand{\GFunc}{\mathrm{G}} +\renewcommand{\HFunc}{\mathrm{H}} +\renewcommand{\IFunc}{\mathrm{I}} +\renewcommand{\JFunc}{\mathrm{J}} +\renewcommand{\KFunc}{\mathrm{K}} +\renewcommand{\LFunc}{\mathrm{L}} +\renewcommand{\MFunc}{\mathrm{M}} +\renewcommand{\NFunc}{\mathrm{N}} +\renewcommand{\OFunc}{\mathrm{O}} +\renewcommand{\PFunc}{\mathrm{P}} +\renewcommand{\QFunc}{\mathrm{Q}} +\renewcommand{\RFunc}{\mathrm{R}} +\renewcommand{\SFunc}{\mathrm{S}} +\renewcommand{\TFunc}{\mathrm{T}} +\renewcommand{\UFunc}{\mathrm{U}} +\renewcommand{\VFunc}{\mathrm{V}} +\renewcommand{\WFunc}{\mathrm{W}} +\renewcommand{\XFunc}{\mathrm{X}} +\renewcommand{\YFunc}{\mathrm{Y}} +\renewcommand{\ZFunc}{\mathrm{Z}} +% Ratios to permanent income are normal face +\renewcommand{\aNrm}{a} +\renewcommand{\bNrm}{b} +\renewcommand{\cNrm}{c} +\renewcommand{\dNrm}{d} +\renewcommand{\eNrm}{e} +\renewcommand{\fNrm}{f} +\renewcommand{\hNrm}{h} +\renewcommand{\iNrm}{i} +\renewcommand{\jNrm}{j} +\renewcommand{\kNrm}{k} +\renewcommand{\mNrm}{m} +\renewcommand{\pNrm}{p} +\renewcommand{\rNrm}{s} +\renewcommand{\sNrm}{s} +\renewcommand{\vNrm}{v} +\renewcommand{\yNrm}{y} +\renewcommand{\zNrm}{z} + +\renewcommand{\ANrm}{A} +\renewcommand{\BNrm}{B} +\renewcommand{\CNrm}{C} +\renewcommand{\DNrm}{D} +\renewcommand{\ENrm}{E} +\renewcommand{\FNrm}{F} +\renewcommand{\HNrm}{H} +\renewcommand{\INrm}{I} +\renewcommand{\JNrm}{J} +\renewcommand{\KNrm}{K} +\renewcommand{\MNrm}{M} +\renewcommand{\PNrm}{P} +\renewcommand{\SNrm}{S} +\renewcommand{\VNrm}{V} +\renewcommand{\YNrm}{Y} +\renewcommand{\ZNrm}{Z} + +\renewcommand{\RNrm}{\mathcal{R}} +% Ind and Agg varaibles begin with lower case +\renewcommand{\tranShkInd}{\theta} % +\renewcommand{\tranShk}{\tranShkInd} % +\renewcommand{\tranShkAgg}{\Theta} % +\renewcommand{\permShkInd}{\psi} % +\renewcommand{\permShk}{\permShkInd} % +\renewcommand{\PermShkAgg}{\Psi} % +\renewcommand{\tranShkAgg}{\Theta} % +\renewcommand{\std}{\sigma} +\renewcommand{\tranShkIndStd}{\std_{\tranShkInd}} % +\renewcommand{\tranShkIndVar}{\std^{2}_{\tranShkInd}} % +\renewcommand{\tranShkAggStd}{\std_{\tranShkAgg}} % +\renewcommand{\tranShkAggVar}{\std^{2}_{\tranShkAgg}} % + +% Combo variables (combining Ind and Agg) +\renewcommand{\PermShk}{\mathbf{\Psi}} +\renewcommand{\PermShkStd}{\std_{\PermShk}} +\renewcommand{\PermShkVar}{\std^{2}_{\PermShk}} +\renewcommand{\PermLvl}{\pLvl} +\renewcommand{\PermLvlAgg}{\PLvl} + +% More specialized variables +\renewcommand{\TranShkAll}{\pmb{\xi}} +\renewcommand{\TranShkMin}{\underline{\xi}} +\renewcommand{\TranShkMax}{\overline{\xi}} +\renewcommand{\TranShkStd}{\std_{\TranShk}} +\renewcommand{\TranShkVar}{\std^{2}_{\TranShk}} +\renewcommand{\TranShkEmp}{\pmb{\theta}} +\renewcommand{\TranShkEmpMin}{}\renewcommand{\TranShkEmpMin}{\underline{\TranShkEmp}} +\renewcommand{\TranShkEmpMax}{}\renewcommand{\TranShkEmpMax}{\overline{\TranShkEmp}} +\renewcommand{\IncUnemp}{\mu} % Income in unemployment + +\renewcommand{\permLvlAgg}{\mathrm{P}} % +\renewcommand{\permLvlInd}{\mathrm{p}} % + +\renewcommand{\MPCmin}{{\uline{\kappa}}} +\renewcommand{\MPCmax}{{\bar{\kappa}}} +\renewcommand{\MPCmaxmax}{{\bar{\bar{\kappa}}}} +\renewcommand{\MPCmaxmin}{{\hat{\underline{\kappa}}}} +\renewcommand{\MPCminmin}{{\underline{\kappa}}} +\renewcommand{\Opt}{\tilde} +\renewcommand{\permGroFacAgg}{\mathrm{G}} +\renewcommand{\permGroFacInd}{\mathsf{G}} +\renewcommand{\PermGroFac}{\mathcal{G}} +\renewcommand{\PermGroFacAdj}{\underline{\PermGroFac}} +\renewcommand{\PermGroFacuAdj}{\underline{\underline{\PermGroFac}}} +\renewcommand{\PermGroRte}{g} + +\renewcommand{\Alive}{\mathcal{L}}\renewcommand{\Alive}{\mathcal{L}} +\renewcommand{\RfreeAgg}{}\renewcommand{\RfreeAgg}{\Agg{\Rfree}} + +\renewcommand{\DeprFac}{\daleth} +\renewcommand{\deprRte}{\delta} % +\renewcommand{\DiscFac}{\beta} +\renewcommand{\DiscFacAlt}{\beth} +\renewcommand{\DiscAltuAdj}{{\underline{\underline{\beth}}}} +\renewcommand{\DiscAlt}{}\renewcommand{\DiscAlt}{\beth} +\renewcommand{\DiscFacRaw}{\beta} +\renewcommand{\DiscFacLiv}{\underline{\DiscFacRaw}} +\renewcommand{\discRte}{\vartheta} % + +\renewcommand{\APFac}{\text{\pmb{\Thorn}}} % Former \Pat +\renewcommand{\APFacDefn}{\hyperlink{APFacDefn}{\textrm{APF}}} + +\renewcommand{\GPFacRaw}{\APFac_{\PermGroFac}} +\renewcommand{\GPFacNrm}{\APFac_{\PermGroFacAdj}} +\renewcommand{\RPFac}{\APFac_{\Rfree}} + +\renewcommand{\RPRte}{\text{\thorn}_{\rfree}} +\renewcommand{\GPRte}{\text{\thorn}_{\PermGroRte}} +\renewcommand{\APRte}{\text{\thorn}} + + +\renewcommand{\EPermShkInv}{\Ex[\PermShk^{-1}]} % Formerly EpShkInv +\renewcommand{\InvEPermShkInv}{\underline{\PermShk}} % Formerly InvEpShkInv +\renewcommand{\uInvEuPermShk}{\underline{\underline{\PermShk}}} % Formerly {\uInvEpShkuInv} + +\renewcommand{\RfreeEff}{\bar{\Rfree}} % Blanchard-adjusted interest rate + +\renewcommand{\PopnGroFac}{\Xi} +\renewcommand{\PopnGroRte}{\xi} +\renewcommand{\PopnLvl}{\pmb{\mathrm{N}}} + +\renewcommand{\LivPrb}{\Alive} +\renewcommand{\livPrb}{\ell} + +\renewcommand{\cncl}{} +\renewcommand\cncl[1]{{\cancel{#1}}} + +\renewcommand{\pNotZero}{(1-\pZero)} + +\renewcommand{\CARA}{{\alpha}} +\renewcommand{\CRRA}{\rho} +\renewcommand{\diePrb}{{\mathsf{d}}} % Continuous time death rate +\renewcommand{\DiePrb}{{\mathsf{D}}} % Discrete-time one-period death rate +\renewcommand{\Ex}{{\mathbb{E}}} % Expectations operator defined in econtex.cls +\renewcommand{\Mean}{{\mathbb{M}}} % Mean +\renewcommand{\MPC}{{\kappa}} +\renewcommand{\MPCFunc}{\pmb{\kappa}} +\renewcommand{\pZero}{\wp} + +\renewcommand{\rfree}{\mathsf{r}} % The net return rate on the safe asset +\renewcommand{\Rfree}{\mathsf{R}} % The return factor on the safe asset +\renewcommand{\RSave}{{\underline{\Rfree}}} +\renewcommand{\rsave}{{\underline{\rfree}}} +\renewcommand{\RBoro}{{\bar{\Rfree}}} +\renewcommand{\rboro}{{\bar{\rfree}}} + +\renewcommand{\Risky}{{\mathbf{R}}} % The return factor on the risky asset +\renewcommand{\risky}{{\mathbf{r}}} % The arithmetic return rate E[\Risky] - 1 +\renewcommand{\riskyELog}{\risky} % The arithmetic return rate \Risky - 1 +\renewcommand{\riskyELev}{\boldsymbol{r}} % The geometric return rate \log \Risky +\renewcommand{\riskyshare}{{\varsigma}} +\renewcommand{\riskyvar}{\std^{2}_{\risky}} +\renewcommand{\Rport}{\mathfrak{R}} % Portfolio -weighted return +\renewcommand{\rport}{\mathfrak{r}} + +\renewcommand{\uPPP}{{{\mathrm{u}^{\prime\prime\prime}}}} +\renewcommand{\uPP}{{{\mathrm{u}^{\prime\prime}}}} +\renewcommand{\uP}{{{\mathrm{u}^{\prime}}}} +\renewcommand{\util}{u} + +\renewcommand{\Kap}{{K}} +\renewcommand{\kap}{k} + +\renewcommand{\leiShare}{\zeta} % + +\renewcommand{\MPSmin}{\pZero^{1/\CRRA} \RPFac} +\renewcommand{\MPSmax}{\RPFac} + +\renewcommand{\PDV}{{\mathbb{P}}} % +\renewcommand{\Wage}{{\mathsf{W}}} +\renewcommand{\wage}{{\mathsf{w}}} + +\renewcommand{\TaxLev}{T} +\renewcommand{\Tax}{} +\renewcommand{\TaxFree}{{\cancel{\Tax}}} + +\renewcommand{\Alt}{\grave} + +\renewcommand{\urate}{{\mho}} +\renewcommand{\erate}{{\cancel{\mho}}} +\renewcommand{\unins}{\upsilon} + +\renewcommand{\Labor}{}\renewcommand{\Labor}{\mathrm{L}} +\renewcommand{\labor}{}\renewcommand{\labor}{\ell} + +\renewcommand{\EEndMap}{{\mathsf{E}}} +\renewcommand{\TMap}{\mathscr{T}} + +\renewcommand{\CEndFunc}{{\mathfrak{C}}} +\renewcommand{\cEndFunc}{{\mathfrak{c}}} + +\renewcommand{\uFuncInv}{\rotatebox{180}{$\uFunc$}} +\renewcommand{\muFuncInv}{\rotatebox{180}{$\uFunc$}} + +\renewcommand{\Hi}{\overline} +\renewcommand{\Lo}{\underline} + +\renewcommand{\Rnorm}{{\mathcal{R}}} % Normalized version of riskless return factor +\renewcommand{\rnorm}{{\mathit{r}}} % Normalized version of riskless rate of return + +\renewcommand{\EpremLog}{\varphi} % Not using regular \eprem because want to distinguish between \varphi = log E_{t}[\Phi_{t+1}] and \phi_{t} = E[\log \Phi_{t}] +\renewcommand{\EPrem}{\pmb{\varphi}} % equity premium +\renewcommand{\eprem}{\phi} % log equity premium + +\renewcommand{\weight}{\omega} + +\renewcommand{\FDist}{{\mathcal{F}}} +\renewcommand{\fDist}{{\mathcal{f}}} + +\renewcommand{\aMin}{{\underline{\aNrm}}} + +\renewcommand{\FDist}{}\renewcommand{\FDist}{\mathcal{F}} +\renewcommand{\fDist}{}\renewcommand{\fDist}{\mathcal{f}} + +\renewcommand{\Nrml}{}\renewcommand{\Nrml}{\mathcal{N}} diff --git a/econark-replace.sty b/docs/Resources/texmf-local/tex/latex/econark-clean.sty similarity index 93% rename from econark-replace.sty rename to docs/Resources/texmf-local/tex/latex/econark-clean.sty index c287401a2..da0671170 100644 --- a/econark-replace.sty +++ b/docs/Resources/texmf-local/tex/latex/econark-clean.sty @@ -1,3 +1,4 @@ +% Those things that can be replaced by regular letters, should be % Ratios to permanent income are normal face \renewcommand{\aNrm}{a} \renewcommand{\bNrm}{b} @@ -66,4 +67,5 @@ \newcommand{\prdT}{t} \newcommand{\prd}{t} \newcommand{\trmT}{T} - +\newcommand{\prdLsT}{t-1} +\newcommand{\prdLst}{t-1} diff --git a/docs/Resources/texmf-local/tex/latex/econark-replace.sty b/docs/Resources/texmf-local/tex/latex/econark-replace.sty deleted file mode 100644 index d8553da13..000000000 --- a/docs/Resources/texmf-local/tex/latex/econark-replace.sty +++ /dev/null @@ -1,58 +0,0 @@ -% Ratios to permanent income are normal face -\renewcommand{\aNrm}{a} -\renewcommand{\bNrm}{b} -\renewcommand{\cNrm}{c} -\renewcommand{\dNrm}{d} -\renewcommand{\eNrm}{e} -\renewcommand{\fNrm}{f} -\renewcommand{\hNrm}{h} -\renewcommand{\iNrm}{i} -\renewcommand{\jNrm}{j} -\renewcommand{\kNrm}{k} -\renewcommand{\mNrm}{m} -\renewcommand{\pNrm}{p} -\renewcommand{\rNrm}{s} -\renewcommand{\sNrm}{s} -\renewcommand{\vNrm}{v} -\renewcommand{\yNrm}{y} -\renewcommand{\zNrm}{z} - -\renewcommand{\ANrm}{A} -\renewcommand{\BNrm}{B} -\renewcommand{\CNrm}{C} -\renewcommand{\DNrm}{D} -\renewcommand{\ENrm}{E} -\renewcommand{\FNrm}{F} -\renewcommand{\HNrm}{H} -\renewcommand{\INrm}{I} -\renewcommand{\JNrm}{J} -\renewcommand{\KNrm}{K} -\renewcommand{\MNrm}{M} -\renewcommand{\PNrm}{P} -\renewcommand{\SNrm}{S} -\renewcommand{\VNrm}{V} -\renewcommand{\YNrm}{Y} -\renewcommand{\ZNrm}{Z} - -\renewcommand{\DiscFac}{\beta} -\renewcommand{\rfree}{r} % The net return rate on the safe asset -\renewcommand{\Rfree}{R} % The return factor on the safe asset -\renewcommand{\CDF}{F} -\renewcommand{\prd}{t} -\renewcommand{\prdT}{T} -\renewcommand{\prdt}{t} -\renewcommand{\trmT}{T} -\renewcommand{\CRRA}{\rho} -\renewcommand{\CARA}{\alpha} -\renewcommand{\MPC}{\kappa} -\renewcommand{\Ex}{𝔼} -\renewcommand{\aLvl}{𝐚} -\renewcommand{\bLvl}{𝐛} -\renewcommand{\cLvl}{𝐜} -\renewcommand{\pLvl}{𝐩} -\renewcommand{\kLvl}{𝐀} -\renewcommand{\mLvl}{𝐦} -\renewcommand{\vLvl}{𝐯} -\renewcommand{\yLvl}{𝐲} -\renewcommand{\PermGroFac}{𝒒} -\renewcommand{\RNrm}{β„›} diff --git a/docs/SolvingMicroDSOPs-clean.dep b/docs/SolvingMicroDSOPs-clean.dep deleted file mode 100644 index 9b6a058c0..000000000 --- a/docs/SolvingMicroDSOPs-clean.dep +++ /dev/null @@ -1,168 +0,0 @@ -\RequireVersions{ - *{application}{TeX} {1990/03/25 v3.x} - *{format} {LaTeX2e} {2022-11-01 v2.e} - *{class} {Resources/texmf-local/tex/latex/econtex}{2017/08/01 v0.0} - *{package}{snapshot} {2002/03/05 v1.14} - *{package}{ifthen} {2022/04/13 v1.1d} - *{package}{changepage} {2009/10/20 v1.0c} - *{package}{currfile} {2022/10/10 v0.8} - *{package}{kvoptions} {2022-06-15 v3.15} - *{package}{keyval} {2022/05/29 v1.15} - *{package}{ltxcmds} {2020-05-10 v1.25} - *{package}{kvsetkeys} {2022-10-05 v1.19} - *{package}{filehook} {2022/10/25 v0.8b} - *{package}{filehook-2020}{2022/10/25 v0.8b} - *{package}{setspace} {2022/12/04 v6.7b} - *{class} {scrartcl} {2022/10/12 v3.38} - *{package}{scrkbase} {2022/10/12 v3.38} - *{package}{scrbase} {2022/10/12 v3.38} - *{package}{scrlfile} {2022/10/12 v3.38} - *{package}{scrlfile-hook}{2022/10/12 v3.38} - *{package}{scrlogo} {2022/10/12 v3.38} - *{package}{tocbasic} {2022/10/12 v3.38} - *{file} {scrsize12pt.clo}{2022/10/12 v3.38} - *{package}{typearea} {2022/10/12 v3.38} - *{package}{fontenc} {0000/00/00 v0.0} - *{package}{babel} {2023/02/13 v3.86} - *{file} {english.ldf} {2017/06/06 v3.3r} - *{file} {babel-english.tex}{0000/00/00 v0.0} - *{package}{calc} {2017/05/25 v4.3} - *{package}{cancel} {2013/04/12 v2.2} - *{package}{verbatim} {2022-07-02 v1.5u} - *{package}{amsmath} {2022/04/08 v2.17n} - *{package}{amstext} {2021/08/26 v2.01} - *{package}{amsgen} {1999/11/30 v2.0} - *{package}{amsbsy} {1999/11/29 v1.2d} - *{package}{amsopn} {2022/04/08 v2.04} - *{package}{amssymb} {2013/01/14 v3.01} - *{package}{amsfonts} {2013/01/14 v3.01} - *{package}{amsthm} {2020/05/29 v2.20.6} - *{package}{xpatch} {2020/03/25 v0.3a} - *{package}{expl3} {2023-02-22 v3} - *{file} {l3backend-pdftex.def}{2023-01-16 v3} - *{package}{xparse} {2023-02-02 v3} - *{package}{etoolbox} {2020/10/05 v2.5k} - *{package}{threeparttable}{2003/06/13 v3.0} - *{package}{dcolumn} {2014/10/28 v1.06} - *{package}{array} {2022/09/04 v2.5g} - *{package}{multicol} {2021/11/30 v1.9d} - *{package}{multirow} {2021/03/15 v2.8} - *{package}{booktabs} {2020/01/12 v1.61803398} - *{package}{latexsym} {1998/08/17 v2.2e} - *{package}{afterpage} {2014/10/28 v1.08} - *{package}{enotez} {2022/01/04 v0.10d} - *{package}{l3keys2e} {2023-02-02 v2e} - *{package}{xtemplate} {2023-02-02 v3} - *{package}{translations}{2022/02/05 v1.12} - *{package}{pdftexcmds} {2020-06-27 v0.33} - *{package}{infwarerr} {2019/12/03 v1.5} - *{package}{iftex} {2022/02/03 v1.0f} - *{package}{moreverb} {2008/06/03 v2.3a} - *{package}{hhline} {2020/01/04 v2.04} - *{package}{xcolor} {2022/06/12 v2.14} - *{file} {color.cfg} {2016/01/02 v1.6} - *{file} {pdftex.def} {2022/09/22 v1.2b} - *{file} {mathcolor.ltx}{0000/00/00 v0.0} - *{package}{accents} {2006/05/12 v1.4} - *{package}{appendix} {2020/02/08 v1.2c} - *{package}{eucal} {2009/06/22 v3.00} - *{package}{ulem} {2019/11/18 v0.0} - *{package}{bm} {2022/01/05 v1.2f} - *{package}{bbm} {1999/03/15 v1.2} - *{package}{url} {2013/09/16 v3.4} - *{package}{optional} {2005/01/26 v2.2b;} - *{package}{natbib} {2010/09/13 v8.31b} - *{package}{footmisc} {2022/03/08 v6.0d} - *{package}{manyfoot} {2019/08/03 v1.11} - *{package}{nccfoots} {2005/02/03 v1.2} - *{package}{perpage} {2014/10/25 v2.0} - *{package}{./.econtexRoot}{0000/00/00 v0.0} - *{file} {./Resources/econtexPaths.tex}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/local-macros}{0000/00/00 v0.0} - *{package}{./Resources/texmf-local/tex/latex/econark}{0000/00/00 v0.0} - *{package}{subfiles} {2020/11/14 v2.2} - *{package}{import} {2020/04/01 v6.2} - *{package}{xmpincl} {2021/09/22 v2.4} - *{package}{ifpdf} {2019/10/25 v3.4} - *{package}{./Resources/texmf-local/tex/latex/llorracc-handouts}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/owner}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/local-packages}{0000/00/00 v0.0} - *{package}{./Resources/LaTeXInputs/pdfsuppressruntime}{0000/00/00 v0.0} - *{package}{inputenc} {2021/02/14 v1.3d} - *{package}{fontenc} {0000/00/00 v0.0} - *{package}{listings} {2023/02/27 v1.9} - *{package}{lstmisc} {2023/02/27 v1.9} - *{file} {listings.cfg}{2023/02/27 v1.9} - *{package}{mathtools} {2022/06/29 v1.29} - *{package}{mhsetup} {2021/03/18 v1.4} - *{package}{xr-hyper} {2023-02-07 v7.00v} - *{package}{graphicx} {2021/09/16 v1.2d} - *{package}{graphics} {2022/03/10 v1.4e} - *{package}{trig} {2021/08/11 v1.11} - *{file} {graphics.cfg}{2016/06/04 v1.11} - *{package}{hyperref} {2023-02-07 v7.00v} - *{package}{kvdefinekeys}{2019-12-19 v1.6} - *{package}{pdfescape} {2019/12/09 v1.15} - *{package}{hycolor} {2020-01-27 v1.10} - *{package}{letltxmacro} {2019/12/03 v1.6} - *{package}{auxhook} {2019-12-17 v1.6} - *{package}{nameref} {2022-05-17 v2.50} - *{package}{refcount} {2019/12/15 v3.6} - *{package}{gettitlestring}{2019/12/15 v1.6} - *{file} {pd1enc.def} {2023-02-07 v7.00v} - *{package}{intcalc} {2019/12/15 v1.3} - *{package}{etexcmds} {2019/12/15 v1.7} - *{file} {puenc.def} {2023-02-07 v7.00v} - *{package}{bitset} {2019/12/09 v1.3} - *{package}{bigintcalc} {2019/12/15 v1.5} - *{package}{atbegshi-ltx}{2021/01/10 v1.0c} - *{file} {hpdftex.def} {2023-02-07 v7.00v} - *{package}{atveryend-ltx}{2020/08/19 v1.0a} - *{package}{rerunfilecheck}{2022-07-10 v1.10} - *{package}{uniquecounter}{2019/12/15 v1.4} - *{package}{scrlayer-scrpage}{2022/10/12 v3.38} - *{package}{scrlayer} {2022/10/12 v3.38} - *{package}{caption} {2023/03/12 v3.6j} - *{package}{caption3} {2023/03/12 v2.4} - *{file} {caption-koma.sto}{2022/12/27 v2.0c} - *{package}{showlabels} {2022/07/18 v1.9.2} - *{package}{./Resources/LaTeXInputs/cctwMoM}{0000/00/00 v0.0} - *{file} {translations-basic-dictionary-english.trsl}{(english v0.0} - *{file} {supp-pdf.mkii}{0000/00/00 v0.0} - *{package}{epstopdf-base}{2020-01-24 v2.11} - *{file} {epstopdf-sys.cfg}{2010/07/13 v1.3} - *{file} {SolvingMicroDSOPs-clean.out}{0000/00/00 v0.0} - *{file} {SolvingMicroDSOPs-clean.out}{0000/00/00 v0.0} - *{package}{bookmark} {2020-11-06 v1.29} - *{file} {bkm-pdftex.def}{2020-11-06 v1.29} - *{file} {umsa.fd} {2013/01/14 v3.01} - *{file} {umsb.fd} {2013/01/14 v3.01} - *{file} {ulasy.fd} {1998/08/17 v2.2e} - *{file} {t1cmtt.fd} {2022/07/10 v2.5l} - *{file} {t1cmss.fd} {2022/07/10 v2.5l} - *{package}{lstlang1} {2023/02/27 v1.9} - *{file} {./Code/Python/snippets/rawsolution.py}{0000/00/00 v0.0} - *{file} {./Figures/discreteApprox.pdf}{0000/00/00 v0.0} - *{file} {./Code/Python/snippets/equiprobable-make.py}{0000/00/00 v0.0} - *{file} {./Code/Python/snippets/equiprobable-max-using.py}{0000/00/00 v0.0} - *{file} {./Figures/PlotcTm1Simple.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotVTm1Simple.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotOTm1RawVSInt.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotComparecTm1AB.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotuPrimeVSOPrime.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotOPRawVSFOC.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotcTm1ABC.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVInvVSGothC.pdf}{0000/00/00 v0.0} - *{file} {ueuf.fd} {2013/01/14 v3.01} - *{file} {./Figures/GothVVSGothCInv.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotComparecTm1AD.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVInvVSGothCEEE.pdf}{0000/00/00 v0.0} - *{file} {./Figures/GothVVSGothCInvEEE.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotCFuncsConverge.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotctMultContr.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotRiskySharetOfat.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotTimeVaryingParam.pdf}{0000/00/00 v0.0} - *{file} {./Figures/PlotMeanMedianSCFcollegeGrads.pdf}{0000/00/00 v0.0} - *{file} {./Figures/Sensitivity.pdf}{0000/00/00 v0.0} - *{file} {./Figures/SMMcontour.pdf}{0000/00/00 v0.0} -} diff --git a/docs/SolvingMicroDSOPs-clean.pdf b/docs/SolvingMicroDSOPs-clean.pdf deleted file mode 100644 index e94369020..000000000 Binary files a/docs/SolvingMicroDSOPs-clean.pdf and /dev/null differ diff --git a/docs/SolvingMicroDSOPs-clean.tex b/docs/SolvingMicroDSOPs-clean.tex deleted file mode 100644 index 2c48818b7..000000000 --- a/docs/SolvingMicroDSOPs-clean.tex +++ /dev/null @@ -1,1668 +0,0 @@ -% -*- mode: LaTeX; TeX-PDF-mode: t; -*- -\documentclass[titlepage, headings=optiontotocandhead]{Resources/texmf-local/tex/latex/econtex} -\newcommand{\texname}{SolvingMicroDSOPs} % Keyname for the paper -\usepackage{./.econtexRoot} % Set paths (like, \LaTeXInputs) - -% specific to this paper -\usepackage{\LaTeXInputs/local-macros} % defns for this project -\usepackage{\packages/econark} % econark defns -\usepackage{\packages/llorracc-handouts} % allow references to llorracc-handouts -\usepackage{\LaTeXInputs/owner} % llorracc or econ-ark? -\usepackage{\LaTeXInputs/local-packages} % LaTeX config in Resources/LaTeXInputs -%\input{unicode-subs-declare}% When unicode char encountered, translate - -% Characters to replace for prettification - - -% Uncomment the line below to make a file with unicode compilable -%\usepackage{\LaTeXInputs/unicode-subs-private} % sub unicode for default - -% booleans control whether certain features are on or off: -% Controls for which of various variant versions to create - -\provideboolean{ctwVersion}\setboolean{ctwVersion}{false}\newcommand{\ctw}{\ifthenelse{\boolean{ctwVersion}}} % {cctw} -\provideboolean{trpVersion}\setboolean{trpVersion}{false}\newcommand{\trp}{\ifthenelse{\boolean{trpVersion}}} % {trp} -% \setboolean{trpVersion}{true} % {trp} -\setboolean{trpVersion}{false} % {trp} - -% Draft mode puts \labels of figs, tables, eqns in margin -\provideboolean{draftmode}\setboolean{draftmode}{true} -% \setboolean{draftmode}{false} -\newcommand{\Draft}{\ifthenelse{\boolean{draftmode}}} -\Draft{\usepackage{showlabels} - \renewcommand{\showlabelsetlabel}[1]{\tiny #1} -}{} - -% Include or exclude Method of Moderation material -\provideboolean{MoMVersion}\setboolean{MoMVersion}{true} -%\setboolean{MoMVersion}{false} -\newcommand{\MoM}{\ifthenelse{\boolean{MoMVersion}}} - -% Get extra style stuff for cctwMoM -\MoM{ % {cctw} - \usepackage{\LaTeXInputs/cctwMoM} % {cctw} -}{} % {cctw} - -% Versions with or without permanent shocks -% Seems to be defunct - remove -\provideboolean{PermShkVersion}\setboolean{PermShkVersion}{true} -\setboolean{PermShkVersion}{false} -\newcommand{\PermShkOn}{\ifthenelse{\boolean{PermShkVersion}}} - -% MPCMatch version does Hermite polynomials for the interpolation -% that match both the slope and the intercept at the gridpoints -\provideboolean{MPCMatchVersion}\setboolean{MPCMatchVersion}{true} -\newcommand{\MPCMatch}{\ifthenelse{\boolean{MPCMatchVersion}}} - -% margin notes -- to be deleted -\provideboolean{MyNotes}\setboolean{MyNotes}{true} -\setboolean{MyNotes}{false} - -% Show things that need fixing -\provideboolean{ToFix}\setboolean{ToFix}{true} -% \setboolean{ToFix}{false} -\newcommand{\Fix}{\ifthenelse{\boolean{ToFix}}} - -% Show or hide the time subscripts for -\provideboolean{hidetime}\setboolean{hidetime}{true} -% \setboolean{hidetime}{false} -\newcommand{\timehide}{\ifthenelse{\boolean{hidetime}}} - -\provideboolean{verbon}\setboolean{verbon}{true} -\newcommand{\onverb}{\ifthenelse{\boolean{verbon}}} - -\setboolean{showPageHead}{true} -% \econtexSetup sets boolean variable 'Web' to true if making html not pdf -\ifthenelse{\boolean{Web}}{ % then - \setboolean{showPageHead}{false} % no pages, so no page head, on web -}{ % else not for web - \usepackage{scrlayer-scrpage} % Package for page headers if PDF - \automark[section]{section} - \usepackage{caption} % allow suppression of appendix figures in NoAppendix PDF -} - - - -\hypersetup{colorlinks=true, - pdfauthor={Christopher D. Carroll }, - pdftitle={Solution Methods for Microeconomic Dynamic Stochastic Optimization Problems}, - pdfsubject={Dynamic Stochastic Optimization Theory; Lecture Notes}, - pdfkeywords={Numerical Methods, Software, Computational Economics, Bellman}, - pdfcreator = {pdflatex}, - plainpages=false, - pdfpagelabels, - colorlinks=true, - citecolor=magenta -} - -\bibliographystyle{\econtexBibStyle} - -\begin{document} - -% Redefine \onlyinsubfile command defined in local.sty file: -% This lets any submaterial called from this doc know that it is not standalone -%\renewcommand{\onlyinsubfile}[1]{}\renewcommand{\notinsubfile}[1]{#1} - -\pagenumbering{roman} - -\title{Solution Methods for Microeconomic \\ Dynamic Stochastic Optimization Problems} - -\author{Christopher D. Carroll\authNum} - -\keywords{Dynamic Stochastic Optimization, Method of Simulated Moments, Structural Estimation, Indirect Inference} -\jelclass{E21, F41} - -\date{2024-04-20} -\maketitle -\footnotesize - -\noindent Note: The GitHub repo {\SMDSOPrepo} associated with this document contains python code that produces all results, from scratch, except for the last section on indirect inference. The numerical results have been confirmed by showing that the answers that the raw python produces correspond to the answers produced by tools available in the {\ARKurl} toolkit, more specifically those in the {\HARKrepo} which has full {\HARKdocs}. The MSM results at the end have have been superseded by tools in the {\EMDSOPrepo}. - -\normalsize - -\hypertarget{abstract}{} -\begin{abstract} - These notes describe tools for solving microeconomic dynamic stochastic optimization problems, and show how to use those tools for efficiently estimating a standard life cycle consumption/saving model using microeconomic data. No attempt is made at a systematic overview of the many possible technical choices; instead, I present a specific set of methods that have proven useful in my own work (and explain why other popular methods, such as value function iteration, are a bad idea). Paired with these notes is Python code that solves the problems described in the text. -\end{abstract} - -% \ifthenelse{\boolean{Web}}{}{ -\begin{footnotesize} - \begin{center} - \begin{tabbing} - \texttt{~~~~PDF:~} \= \= {\urlPDF} \\ - \texttt{~Slides:~} \> \> {\urlSlides} \\ - \texttt{~~~~Web:~} \> \> {\urlHTML} \\ - \texttt{~~~Code:~} \> \> {\urlCode} \\ - \texttt{Archive:~} \> \> {\urlRepo} \\ - \texttt{~~~~~~~~~} \> \> \textit{(Contains LaTeX code for this document and software producing figures and results)} - \end{tabbing} - \end{center} -\end{footnotesize} -% } -\begin{authorsinfo} - \name{Carroll: Department of Economics, Johns Hopkins University, Baltimore, MD, \\ - \href{mailto:ccarroll@jhu.edu}{\texttt{ccarroll@jhu.edu}}} -\end{authorsinfo} - -\thanksFooter{The notes were originally written for my Advanced Topics in Macroeconomic Theory class at Johns Hopkins University; instructors elsewhere are welcome to use them for teaching purposes. Relative to earlier drafts, this version incorporates several improvements related to new results in the paper \href{http://econ-ark.github.io/BufferStockTheory}{``Theoretical Foundations of Buffer Stock Saving''} (especially tools for approximating the consumption and value functions). Like the last major draft, it also builds on material in ``The Method of Endogenous Gridpoints for Solving Dynamic Stochastic Optimization Problems'' published in \textit{Economics Letters}, available at \url{http://www.econ2.jhu.edu/people/ccarroll/EndogenousArchive.zip}, and by including sample code for a method of simulated moments estimation of the life cycle model \textit{a la} \cite{gpLifecycle} and Cagetti~\citeyearpar{cagettiWprofiles}. Background derivations, notation, and related subjects are treated in my class notes for first year macro, available at \url{http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption}. I am grateful to several generations of graduate students in helping me to refine these notes, to Marc Chan for help in updating the text and software to be consistent with \cite{carrollEGM}, to Kiichi Tokuoka for drafting the section on structural estimation, to Damiano Sandri for exceptionally insightful help in revising and updating the method of simulated moments estimation section, and to Weifeng Wu and Metin Uyanik for revising to be consistent with the `method of moderation' and other improvements. All errors are my own. This document can be cited as \cite{SolvingMicroDSOPs} in the references.} - -\titlepagefinish -%\setcounter{page}{1} - -\thispagestyle{empty} -\ifpdf % The table of contents does not work if not in pdf mode -\tableofcontents \addtocontents{toc}{\vspace{1em}}\newpage -\fi -\newpage\pagenumbering{arabic} - -\hypertarget{introduction}{} -\section{Introduction}\label{sec:introduction} - - These lecture notes provide a gentle introduction to a particular set of solution tools for the canonical consumption-saving/portfolio allocation problem. Specifically, the notes describe and solve optimization problems for a consumer facing uninsurable idiosyncratic risk to nonfinancial income (e.g., labor or transfer income), first without and then with optimal portfolio choice,\footnote{See \cite{merton:restat} and \cite{samuelson:portfolio} for a solution to the problem of a consumer whose only risk is rate-of-return risk on a financial asset; the combined case (both financial and nonfinancial risk) is solved below, and much more closely resembles the case with only nonfinancial risk than it does the case with only financial risk.} with detailed intuitive discussion of various mathematical and computational techniques that, together, speed the solution by many orders of magnitude. The problem is solved with and without liquidity constraints, and the infinite horizon solution is obtained as the limit of the finite horizon solution. After the basic consumption/saving problem with a deterministic interest rate is described and solved, an extension with portfolio choice between a riskless and a risky asset is also solved. Finally, a simple example shows how to use these methods (via the statistical `method of simulated moments' (MSM for short)) to estimate structural parameters like the coefficient of relative risk aversion (\textit{a la} Gourinchas and Parker~\citeyearpar{gpLifecycle} and Cagetti~\citeyearpar{cagettiWprofiles}). - - -\hypertarget{the-problem}{} -\section{The Problem}\label{sec:the-problem} - -The usual analysis of dynamic stochastic programming problems packs a great many events (intertemporal choice, stochastic shocks, intertemporal returns, income growth, the taking of expectations, time discounting, and more) into a complex decision in which the agent makes an optimal choice simultaneously taking all these elements into account. For the dissection here, we will be careful to break down everything that happens into distinct operations so that each element can be scrutinized and understood in isolation. - -We are interested in the behavior a consumer who begins {period} $t$ with a certain amount of `capital' $\kLvl_{t}$, which is immediately rewarded by a return factor $R_{t}$ with the proceeds deposited in a \textbf{b}ank \textbf{b}alance: -\begin{equation}\begin{gathered}\begin{aligned}\label{eq:bLvl} - \bLvl_{t} & = \kLvl_{t}R_{t}. - \end{aligned}\end{gathered}\end{equation} - -Simultaneously with the realization of the capital return, the consumer also receives noncapital income $\yLvl_{t}$, which is determined by multiplying the consumer's `permanent income' $\pLvl_{t}$ by a transitory shock $\TranShkEmp_{t}$: -\begin{equation}\begin{gathered}\begin{aligned} - \yLvl_{t} & = \pLvl_{t}\TranShkEmp_{t} \label{eq:yLvl} - \end{aligned}\end{gathered}\end{equation} -whose whose expectation is 1 (that is, before realization of the transitory shock, the consumer's expectation is that actual income will on average be equal to permanent income $\pLvl_{t}$). - -The combination of bank balances $\bLvl$ and income $\yLvl$ define's the consumer's `market resources' (sometimes called `cash-on-hand,' following~\cite{deatonUnderstandingC}): -\begin{equation}\begin{gathered}\begin{aligned} - \mLvl_{t} & = \bLvl_{t}+\yLvl_{t} \label{eq:mLvl}, - \end{aligned}\end{gathered}\end{equation} -available to be spent on consumption $\cLvl_{t}$ for a consumer subject to a liquidity constraint that requires $\cLvl \leq \mLvl$ (we are not imposing such a constraint yet - see subsection~\ref{subsec:LiqConstrSelfImposed} below for further discussion). - -The consumer's goal is to maximize discounted utility from consumption over the rest of a lifetime ending at date $T$: -% chktex-file 36 - \begin{equation}\label{eq:MaxProb} - \max~\Ex_{t}\left[\sum_{n=0}^{T-t}\beta^{n} \uFunc(\cLvl_{t+n})\right]. - \end{equation} -Income evolves according to: - \begin{equation}\begin{gathered}\begin{aligned} - \pLvl_{t+1} = G_{t+1}\pLvl_{t} & \text{~~ -- permanent labor income dynamics} \label{eq:permincgrow} - \\ \log ~ \TranShkEmp_{t+n} \sim ~\Nrml(-\std_{\TranShkEmp}^{2}/2,\std_{\TranShkEmp}^{2}) & \text{~~ -- lognormal transitory shocks}~\forall~n>0 . - \end{aligned}\end{gathered}\end{equation} - -Equation \eqref{eq:permincgrow} indicates that we are allowing for a predictable average profile of income growth over the lifetime $\{G\}_{0}^{T}$ (to capture typical career wage paths, pension arrangements, etc).\footnote{For expositional and pedagogical purposes, this equation assumes that there are no shocks to permanent income (though they are trivial to add). A large literature finds that, in reality, permanent (or at least extremely highly persistent) shocks exist and are quite large; such shocks therefore need to be incorporated into any `serious' model (that is, one that hopes to match and explain empirical data), but the treatment of permanent shocks clutters the exposition without adding much to the intuition, so permanent shocks are omitted from the analysis until the last section of the notes, which shows how to match the model with empirical micro data. For a full treatment of the theory including permanent shocks, see \cite{BufferStockTheory}.} Finally, the utility function is of the Constant Relative Risk Aversion (CRRA), form, $\uFunc(\bullet) = \bullet^{1-\rho}/(1-\rho)$. - -It is well known that this problem can be rewritten in recursive (Bellman) form: - \begin{equation}\begin{gathered}\begin{aligned} - v_{t}(\mLvl_{t},\pLvl_{t}) & = \max_{\cCtrl}~ \uFunc(\cCtrl) + \beta \Ex_{t}[ v_{t+1}(\mLvl_{t+1},\pLvl_{t+1})]\label{eq:vrecurse} - \end{aligned}\end{gathered}\end{equation} -subject to the Dynamic Budget Constraint (DBC) implicitly defined by equations~\eqref{eq:bLvl}-\eqref{eq:mLvl} and to the transition equation that defines next period's initial capital as this period's end-of-period assets: -\begin{equation}\begin{gathered}\begin{aligned} - \kLvl_{t+1} & = \aLvl_{t}. \label{eq:transitionstate} - \end{aligned}\end{gathered}\end{equation} - - - -\hypertarget{normalization}{} -\section{Normalization}\label{sec:normalization} - -The single most powerful method for speeding the solution of such models is to redefine the problem in a way that reduces the number of state variables (if at all possible). In the consumption context, the obvious idea is to see whether the problem can be rewritten in terms of the ratio of various variables to permanent noncapital (`labor') income $\pLvl_{t}$ (henceforth for brevity, `permanent income.') - -In the last {period} of life $T$, there is no future, $\vLvl_{T+1} = 0$, so the optimal plan is to consume everything: -\begin{equation}\begin{gathered}\begin{aligned} - \vLvl_{t}(\mLvl_{t},\pLvl_{t}) & = \frac{\mLvl_{t}^{1-\rho}}{1-\rho}. \label{eq:levelTm1} - \end{aligned}\end{gathered}\end{equation} -Now define nonbold variables as the bold variable divided by the level of permanent income in the same period, so that, for example, $m_{t}=\mLvl_{t}/\pLvl_{t}$; and define $v_{t}(m_{t}) = \uFunc(m_{t})$.\footnote{Nonbold value is bold value divided by $\pLvl^{1-\rho}$ rather than $\pLvl$.} For our CRRA utility function, $\uFunc(xy)=x^{1-\rho}\uFunc(y)$, so (\ref{eq:levelTm1}) can be rewritten as -\begin{equation}\begin{gathered}\begin{aligned} - \vLvl_{t}(\mLvl_{t},\pLvl_{t}) & = \pLvl_{t}^{1-\rho}\frac{{m}_{t}^{1-\rho}}{1-\rho} \\ - & = (\pLvl_{\prd-1}G_{t})^{1-\rho}\frac{{m}_{t}^{1-\rho}}{1-\rho} \\ - &= \pLvl_{\prd-1}^{1-\rho}G_{t}^{1-\rho}v_{t}(m_{t}). \label{eq:vT} - \end{aligned}\end{gathered}\end{equation} - -Now define a new optimization problem: - \begin{equation}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{{c}_{t}} ~~ \uFunc(c_{t})+{\beta}\Ex_{t}[ G_{t+1}^{1-\rho}v_{t+1}(m_{t+1})] \label{eq:vNormed} \\ - & \text{s.t.} \\ - a_{t} & = m_{t}-c_{t} \\ - k_{t+1} & = a_{t} \\ - b_{t+1} & = \underbrace{\left(R/G_{t+1}\right)}_{\equiv \mathcal{R}_{t+1}}k_{t+1} \\ - m_{t+1} & = b_{t+1}+\TranShkEmp_{t+1}, - \end{aligned}\end{gathered}\end{equation} -where division by $G$ in second-to-last equation yields a normalized return factor $\mathcal{R}$ which is the consequence of the fact that we have divided $t+1$ level variables by $\pLvl_{t+1}=G_{t+1}\pLvl_{t}$. - -Then it is easy to see that for $t=T-1$, -\begin{equation*}\begin{gathered}\begin{aligned} - \vLvl_{t}(\mLvl_{t},\pLvl_{t}) & = \pLvl_{t+1}^{1-\rho}v_{t}(m_{t}) - \end{aligned}\end{gathered}\end{equation*} -and so on back to all earlier periods. Hence, if we solve the problem \eqref{eq:vNormed} which has only a single state variable $m_{\prd-1}$, we can obtain the levels of the value function, consumption, and all other variables from the corresponding permanent-income-normalized solution objects by multiplying each by $\pLvl_{t}$, e.g.\ $\cFunc_{t}(\mLvl_{t},\pLvl_{t})=\pLvl_{t}\cFunc_{t}(\mLvl_{t}/\pLvl_{t})$ (or, for the value function, $\vLvl _{t}(\mLvl_{t},\pLvl_{t}) = \pLvl_{t}^{1-\rho}v_{t}(m_{t}))$. We have thus reduced the problem from two continuous state variables to one (and thereby enormously simplified its solution). - -For future reference it will also be useful to write the problem \eqref{eq:vNormed} in the traditional way, by substituting $b_{t+1},k_{t+1},$ and $a_{t}$ into $m_{t+1}$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{c} ~~ \uFunc(c)+ \beta \Ex_{t}[ G_{t+1}^{1-\rho}v_{t+1}(\overbrace{(m_{t}-c)(R/G_{t+1})+\TranShkEmp_{t+1}}^{m_{t+1}})] \label{eq:vusual}. - \end{aligned}\end{gathered}\end{equation} - - - -\hypertarget{notation}{} -\section{Notation}\label{sec:notation} - -\subsection{Periods, Stages, Steps} - -The problem specified above assumes that the agent has only one decision problem to solve in any {period}. In practice, it is increasingly common to model agents who have multiple choice {stage}s per {period}; an agent's problem might have, say, a consumption decision (call it the $\cFunc$ {stage}), a labor supply {stage} (call it $\labor$) and a choice of what proportion $\Shr$ of their assets to invest in a risky asset (the portfolio-choice {stage}). - -The modeler might well want to explore whether the order in which the {stage}s are solved makes any difference, either to the substantive results or to aspects of the computational solution like speed and accuracy. - -If, as in section \ref{sec:the-problem}, we hard-wire into the solution code for each {stage} an assumption that its successor {stage} will be something in particular (say, the consumption {stage} assumes that the portfolio choice is next), then if we want to change the order of the {stage}s (say, labor supply after consumption, followed by portfolio choice), we will need to re-hard-wire each of the stages to know particular things about its new successor (for example, the specifics of the distribution of the rate of return on the risky asset would need to be known by whatever {stage} precedes the portfolio choice {stage}). - -But one of the cardinal insights of Bellman's (1957, ``Dynamic Programming'') original work is that \emph{everything that matters} for the solution to the current problem is encoded in a `continuation-value function.' %that incorporates \texttt{everything about the future} that is important to solution of the present stage. %This point is important for a number of reasons, but here we will focus on one problem of ignoring it. Actual solution of the maximization problem as specified in \eqref{eq:vNormed} requires the current agent to have knowledge not only of the successor value function, but also of other aspects of the problem like the distributions of the future period's stochastic shocks. So any solution to the problem that directly uses in \eqref{eq:vNormed} will need to hard-wire into itself the specifics of the successor problem. - -Using Bellman's insight, we describe here a framework for isolating the {stage} problems within a {period} from each other, and the {period} from its successors in any future {period}; the advantage of this is that the isolated {stage} and {period} problems will then be `modular': We can solve them in any order \textit{without changing any code}. After considering the {stage}-order $[\ell,\cFunc,\Shr]$, the modeler can costlessly reorder the {stage}s to consider, say, the order $[\ell,\Shr,\cFunc]$.\footnote{As long as the beginning-of-{stage} and end-of-{stage} value functions for the {stage}s all depend on the same state variables; see the discussion in section \ref{sec:multiple-control-variables} for further discussion.} - -\subsection{Steps} - -The key to the framework is distinguishing, within each {stage}'s Bellman problem, three {steps}: - -\begin{enumerate} -\item \textbf{\Arrival}: Incoming state variables (e.g., $k$) are known, but any shocks associated with the period have not been realized and decision(s) have not yet been made -\item \textbf{\Decision}: All exogenous variables (like income shocks, rate of return shocks, and predictable income growth $G$) have been realized (so that, e.g., $m$'s value is known) and the agent solves the optimization problem -\item \textbf{\Continuation}: After all decisions have been made, their consequences are measured by evaluation of the continuing-value function at the values of the `outgoing' state variables (sometimes called `post-state' variables). -\end{enumerate} - -%In the standard treatment in the literature, the (implicit) default assumption is that the {step} where the agent is solving a decision problem is the unique {step} at which the problem is defined. This is what was done above, when (for example) in \eqref{eq:vNormed} we related the value $\vFunc$ of the current decision to the expectation of the future value $\vFunc_{\prd+1}$. Here, instead, we want to encapsulate the current {stage}'s problem as a standalone object, which is solved by taking as given an exogenously-provided continuation-value function (in our case, $\vEndStp(a)$). - -When we want to refer to a specific {step} in the {stage} we will do so by using an indicator which identifies that {step}. Here we use the consumption {stage} problem described above to exemplify the usage: -\begin{center} -% \mbox{% - \begin{tabular}{r|c|c|l|l} - {Step} & Indicator & State & Usage & Explanation \\ \hline - {\Arrival} & $ \arvl $ & $k$ & $\vBegStp(k)$ & value at entry to {stage} (before shocks) \\ - {\Decision}(s) & (blank) & $m$ & $\vMidStp(m)$ & value of {stage}-decision (after shocks) \\ - {\Continuation} & $ \cntn $ & $a$ & $\vEndStp(a)$ & value at exit (after decision) \\ \hline - \end{tabular} -% } - \end{center} - - Notice that the value functions at different {step}s of the {stage} have distinct state variables. Only $k$ is known at the beginning of the period, and other variables take on their values with equations like $b = k \mathcal{R}$ and $m = b+\TranShkEmp.$ We will refer to such within-the-{stage} creation of variables as {evolutions}.% Thus, the consumption problem has two {evolutions}: from $\kNrm$ to $\mNrm$ and from $\mNrm$ to $\aNrm$. - -\subsection{Transitions} -v - In the backward-induction world of Bellman solutions, to solve the problem of a particular {period} we must start with an end-of-{period} value function, which we designate by including the {period} indicator in the subscript: - \begin{equation}\begin{gathered}\begin{aligned} - \vEndPrd(a) & \mapsto \beta \vBegPrdNxt(\overbrace{a}^{=k}), \label{eq:trns-single-prd} - \end{aligned}\end{gathered}\end{equation} -and we are not done solving the problem of the entire {period} until we have constructed a beginning-of-{period} value function $\vBegPrd(k)$. - -Once we are inside a {stage}, we will also need an end-of-{stage} value function. For the last {stage} in a {period} the end-of-{stage} function is taken to be end-of-{period} value function: - \begin{equation}\begin{gathered}\begin{aligned} - \vEndStg(a) \mapsto \vEndPrd(a). - \end{aligned}\end{gathered}\end{equation} - -One way to describe this is that when we are considering the solution to the current {stage}, we will be working with what, in computer programming, is called a `local function' $\vEndStg(a)$ whose value at the beginning of the {stage}-solution algorithm has been initialized to the value of a previously-computed `global function' $\vEndPrd(a)$ that had already been constructed by mapping itself to $\beta \vBegPrdNxt$ (equation \eqref{eq:trns-single-prd}). -\hypertarget{decision-problem}{} - -\subsection{The Decision Problem in the New Notation}\label{subsec:decision-problem} - -The {\Decision} problem can now be written much more cleanly than in equation \eqref{eq:vNormed}: - \begin{equation}\begin{gathered}\begin{aligned} - v(m) & = \max_{c}~ \uFunc(c) + v_{\cntn}(\overbrace{m-\cFunc}^{a}) \label{eq:vMid} - \end{aligned}\end{gathered}\end{equation} -whose first order condition with respect to $c$ is -\begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c) &= \vEndStp^{a}(m-c) \label{eq:upEqbetaOp} -\end{aligned}\end{gathered}\end{equation} -which is mathematically equivalent to the usual Euler equation for consumption. (We will reuse this formulation when we turn to section~\ref{subsec:egm}.) - -Having defined these notational conventions, we are now ready to move to substance. - -\begin{comment} - % - \subsection{Implementation in Python} - - The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. - - \subsubsection{Useful auxilliary files} - - In this exercise, two primary user-defined modules are frequently imported and utilized. The first is the \texttt{gothic\_class} module, which contains functions describing the end-of-period value functions found in equations \eqref{eq:vBegStp} - \eqref{eq:vEnd} (and the corresponding first and second derivatives). %The advantage of defining functions in the code which decompose the consumer's optimal behavior in a given period will become evident in section \ref{subsec:transformation} - - The \texttt{resources} module is also used repeatedly throughout the notebook. This file has three primary objectives: (i) providing functions that discretize the continuous distributions from the theoretical model that describe the uncertainty a consumer faces, (ii) defining the utility function over consumption under a number of specifications, and (iii) enhancing the grid of end-of-period assets for which functions (such as those from the \texttt{gothic\_class} module) will be defined. These objectives will be discussed in greater detail and with respect to the numerical methods used to the problem in subsequent sections of this document. -\end{comment} - - -%\input{sec_the-usual-theory-input} - -\hypertarget{solving-the-next-to-last-period}{} -\hypertarget{solving-the-next}{} -\section{Solving the Next-to-Last Period}\label{sec:solving-the-next} - -To reduce clutter, we now temporarily assume that $G_{t}=1$ for all $t$, so that the $G$ terms from the earlier derivations disappear, and setting $t=T$ the problem in the second-to-last period of life can now be expressed as -\begin{equation}\begin{gathered}\begin{aligned} - v_{\MidPrdLsT}(\mStte) & = \max_{\cCtrl} ~~ \uFunc(\cCtrl) + - \vEndPrdLsT(\overbrace{\mStte-\cCtrl}^{\aStte}) - \label{eq:vEndTm1} -\end{aligned}\end{gathered}\end{equation} -where -\begin{equation*}\begin{gathered}\begin{aligned} - v_{\EndPrdLsT}(\aStte) & = \beta v_{\BegPrd}(\aStte) -\\ & = \beta \Ex_{\BegPrd} \left[\PermGroFacAdjV v_{\MidPrd}(\underbrace{\aStte \mathcal{R}_{t} + \TranShkEmp_{t}}_{{m}_{t}})\right] - \end{aligned}\end{gathered}\end{equation*} - -% \begin{equation*}\begin{gathered}\begin{aligned} -% \vFunc_{\prdLsT}(\mStte) & = \max_{\cCtrl} ~~ \uFunc(\cCtrl) -% + \DiscFac \Ex_{\EndPrdLsT} \left[\PermGroFacAdjV \vFunc_{\MidPrd}(\underbrace{(\mStte-\cCtrl)\RNrm_{\prdT} + \TranShkEmp_{\prdT}}_{{m}_{\prdT}})\right]. -% \end{aligned}\end{gathered}\end{equation*} - - -Using (0) $t=T$; (1) $v_{t}(m)=\uFunc(m)$; (2) the definition of $\uFunc(m)$; and (3) the definition of the expectations operator, %\newcommand{\TranShkEmpDummy}{\vartheta} -\begin{equation}\begin{gathered}\begin{aligned} - v_{\BegPrd}(\aStte) & = \PermGroFacAdjV\int_{0}^{\infty} \frac{\left(\aStte \mathcal{R}_{t}+ \TranShkEmpDummy\right)^{1-\rho}}{1-\rho} d\FDist(\TranShkEmpDummy) \label{eq:NumDefInt} - \end{aligned}\end{gathered}\end{equation} -where $\FDist(\TranShkEmp)$ is the cumulative distribution function for ${\TranShkEmp}$. - -\lstset{basicstyle=\ttfamily\footnotesize,breaklines=true,language=Python,frame=single} -\lstinputlisting{./Code/Python/snippets/rawsolution.py} - -This maximization problem implicitly defines a `local function' $\cFunc_{\prd-1}(m)$ that yields optimal consumption in period $t-1$ for any specific numerical level of resources like $m=1.7$.% (When we need to use this function from some context outside of the local context in which it was solved, we can reference by its absolute index, $\cFunc_{\prdT-1}$). - -But because there is no general analytical solution to this problem, for any given $m$ we must use numerical computational tools to find the $\cCtrl$ that maximizes the expression. This is excruciatingly slow because for every potential $c$ to be considered, a definite integral over the interval $(0,\infty)$ must be calculated numerically, and numerical integration is \textit{very} slow (especially over an unbounded domain!). - -\hypertarget{discretizing-the-distribution}{} -\subsection{Discretizing the Distribution} -Our first speedup trick is therefore to construct a discrete approximation to the lognormal distribution that can be used in place of numerical integration. That is, we want to approximate the expectation over $\TranShkEmp$ of a function $g(\TranShkEmp)$ by calculating its value at set of $n_{\TranShkEmp}$ points $\TranShkEmp_{i}$, each of which has an associated probability weight $w_{i}$: -\begin{equation*}\begin{gathered}\begin{aligned} - \Ex[g(\TranShkEmp)] & = \int_{\TranShkEmpMin}^{\TranShkEmpMax}(\TranShkEmpDummy)d\FDist(\TranShkEmpDummy) \\ - & \approx \sum_{\TranShkEmp = 1}^{n}w_{i}g(\TranShkEmp_{i}) - \end{aligned}\end{gathered}\end{equation*} -(because adding $n$ weighted values to each other is enormously faster than general-purpose numerical integration). - -Such a procedure is called a `quadrature' method of integration; \cite{Tanaka2013-bc} survey a number of options, but for our purposes we choose the one which is easiest to understand: An `equiprobable' approximation (that is, one where each of the values of $\TranShkEmp_{i}$ has an equal probability, equal to $1/n_{\TranShkEmp}$). - -We calculate such an $n$-point approximation as follows. - -Define a set of points from $\sharp_{0}$ to $\sharp_{n_{\TranShkEmp}}$ on the $[0,1]$ interval -as the elements of the set $\sharp = \{0,1/n,2/n, \ldots,1\}$.\footnote{These points define intervals that constitute a partition of the domain of $\FDist$.} Call the inverse of the $\TranShkEmp$ distribution $\FDist^{-1}_{\phantom{\TranShkEmp}}$, and define the -points $\sharp^{-1}_{i} = \FDist^{-1}_{\phantom{\TranShkEmp}}(\sharp_{i})$. Then -the conditional mean of $\TranShkEmp$ in each of the intervals numbered 1 to $n$ is: -\begin{equation}\begin{gathered}\begin{aligned} - \TranShkEmp_{i} \equiv \Ex[\TranShkEmp | \sharp_{i-1}^{-1} \leq \TranShkEmp < \sharp_{i}^{-1}] & = \int_{\sharp^{-1}_{i-1}}^{\sharp^{-1}_{i}} \vartheta ~ d\FDist_{\phantom{\TranShkEmp}}(\vartheta) , - \end{aligned}\end{gathered}\end{equation} -and when the integral is evaluated numerically for each $i$ the result is a set of values of $\TranShkEmp$ that correspond to the mean value in each of the $n$ intervals. - -The method is illustrated in Figure~\ref{fig:discreteapprox}. The solid continuous curve represents -the ``true'' CDF $\FDist(\TranShkEmp)$ for a lognormal distribution such that $\Ex[\TranShkEmp] = 1$, $\sigma_{\TranShkEmp} = 0.1$. The short vertical line segments represent the $n_{\TranShkEmp}$ -equiprobable values of $\TranShkEmp_{i}$ which are used to approximate this -distribution.\footnote{More sophisticated approximation methods exist - (e.g.\ Gauss-Hermite quadrature; see \cite{kopecky2010finite} for a discussion of other alternatives), but the method described here is easy to understand, quick to calculate, and has additional advantages briefly described in the discussion of simulation below.} - \hypertarget{discreteApprox}{} - \begin{figure} - \includegraphics[width=0.8\textwidth]{\econtexRoot/Figures/discreteApprox} - \caption{Equiprobable Discrete Approximation to Lognormal Distribution $\FDist$} - \label{fig:discreteapprox} - \end{figure} - - -Because one of the purposes of these notes is to connect the math to the code that solves the math, we display here a brief snippet from the notebook that constructs these points. - - -\lstset{basicstyle=\ttfamily\footnotesize,breaklines=true,language=Python,frame=single} -\lstinputlisting{./Code/Python/snippets/equiprobable-make.py}\nopagebreak - - \begin{equation}\begin{gathered}\begin{aligned} - v_{{\prdm}_\cntn}(\aStte) & = \beta \PermGroFacAdjV\left(\frac{1}{n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}} \frac{\left(\mathcal{R}_{t} \aStte + \TranShkEmp_{i}\right)^{1-\rho}}{1-\rho} \label{eq:vDiscrete} - \end{aligned}\end{gathered}\end{equation} - -We now substitute our approximation \eqref{eq:vDiscrete} for $\vEndPrdLsT(a)$ in \eqref{eq:vEndTm1} which is simply the sum of $n_{\TranShkEmp}$ numbers and is therefore easy to calculate (compared to the full-fledged numerical integration \eqref{eq:NumDefInt} that it replaces). - -% so we can rewrite the maximization problem that defines the middle step of period {$\prdLst$} as -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\MidPrdLsT}(\mStte) & = \max_{\cCtrl} -% \left\{ -% \frac{\cCtrl^{1-\CRRA}}{1-\CRRA} + -% \vFunc_{\MidPrd}(\mStte-\cCtrl) -% \right\}. -% \label{eq:vEndTm1} -% \end{aligned}\end{gathered}\end{equation} - -\lstinputlisting{./Code/Python/snippets/equiprobable-max-using.py} - -\begin{comment} - In the {\SMDSOPntbk} notebook, the section ``Discretization of the Income Shock Distribution'' provides code that instantiates the \texttt{DiscreteApproximation} class defined in the \texttt{resources} module. This class creates a 7-point discretization of the continuous log-normal distribution of transitory shocks to income by utilizing seven points, where the mean value is $-.5 \sigma^2$, and the standard deviation is $\sigma = .5$. - - A close look at the \texttt{DiscreteApproximation} class and its subclasses should convince you that the code is simply a computational implementation of the mathematical description of equiprobable discrete approximation in this section. Moreover, the Python code generates a graph of the discretized distribution depicted in \ref{fig:discreteapprox}. -\end{comment} - -\hypertarget{the-approximate-consumption-and-value-functions}{} -\subsection{The Approximate Consumption and Value Functions} - -Given any particular value of $\mStte$, a numerical maximization tool can now find the $\cCtrl$ that solves \eqref{eq:vEndTm1} in a reasonable amount of time. - -\begin{comment} - % The {\SMDSOPntbk} notebook follows a series of steps to achieve this. Initially, parameter values for the coefficient of relative risk aversion (CRRA, $\rho$), the discount factor ($\beta$), the permanent income growth factor ($\PermGroFac$), and the risk-free interest rate ($R$ are specified in ``Define Parameters, Grids, and the Utility Function.'') - - % After defining the utility function, the `natural borrowing constraint' is defined as $\underline{a}_{\prdT-1}=-\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$, which will be discussed in greater depth in section \ref{subsec:LiqConstrSelfImposed}. %Following the reformulation of the maximization problem, an instance of the \texttt{gothic\_class} is created using the specifications and the discretized distribution described in the prior lines of code; this is required to provide the numerical solution. -\end{comment} - -The notebook code responsible for computing an estimated consumption function begins in ``Solving the Model by Value Function Maximization,'' where a vector containing a set of possible values of market resources $m$ is created (in the code, various $m$ vectors have names beginning {\mVec}; in these notes we will use boldface italics to represent vectors, so we can refer to our collection of $m$ points as $\vctr{m}$ with values indexed by brackets: $\vctr{m}[1]$ is the first entry in the vector, up to a last entry $\vctr{m}[-1]$; we arbitrarily (and suboptimally) pick the first five integers as our five {\mVec} gridpoints (in the code, \code{mVec\_int}= $\{0.,1.,2.,3.,4.\}$)). - -% Finally, the previously computed values of optimal $c$ and the grid of market resources are combined to generate a graph of the approximated consumption function for this specific instance of the problem. To reduce the computational challenge of solving the problem, the process is evaluated only at a small number of gridpoints. - - -\hypertarget{an-interpolated-consumption-function}{} -\subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} - -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $c$ that corresponds to a linear `connect-the-dots' interpolation of the value of $c$ from the values of the two nearest computed $\{m,c\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % AL: Please provide href for your interpolation package - -This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\cFunc}_{\MidStpLsT}(\mStte)$. %When called with an $\mStte$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mStte$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mStte$. - -Figures \ref{fig:PlotcTm1Simple} and~\ref{fig:PlotVTm1Simple} show -plots of the constructed $\Aprx{\cFunc}_{\prd-1}$ and $\Aprx{v}_{\prd-1}$. While the $\Aprx{\cFunc}_{\prd-1}$ function looks very smooth, the fact that the $\Aprx{v}_{\prd-1}$ function is a set of line segments is very evident. This figure provides the beginning of the intuition for why trying to approximate the value function directly is a bad idea (in this context).\footnote{For some problems, especially ones with discrete choices, value function approximation is unavoidable; nevertheless, even in such problems, the techniques sketched below can be very useful across much of the range over which the problem is defined.} - -\hypertarget{PlotcTm1Simple}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotcTm1Simple}} - \caption{$\cFunc_{T-1}(\mStte)$ (solid) versus $\Aprx{\cFunc}_{T-1}(\mStte)$ (dashed)} - \label{fig:PlotcTm1Simple} -\end{figure} - -\hypertarget{PlotvTm1Simple}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotVTm1Simple}} - \caption{$v_{T-1}$ (solid) versus $\Aprx{v}_{T-1}(\mStte)$ (dashed)} - \label{fig:PlotVTm1Simple} -\end{figure} - - -\hypertarget{interpolating-expectations}{} -\subsection{Interpolating Expectations} - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Good approximation in the sense that increasing the number of points makes no discernable difference.}}{} - -Piecewise linear `spline' interpolation as described above works well for generating a good approximation to the true optimal consumption function. However, there is a clear inefficiency in the program: Since it uses equation \eqref{eq:vEndTm1}, for every value of $\mStte$ the program must calculate the utility consequences of various possible choices of $\cCtrl$ (and therefore $a_{\prd-1}$) as it searches for the best choice. - -For any given index $j$ in $\vctr{m}[j]$, the algorithm, as it searches for the corresponding optimal $a$, the algorithm will end up calculating $v_{\EndPrdLsT}(\tilde{a})$ for many $\tilde{a}$ values close to the optimal $a_{\prd-1}$. Indeed, even when searching for the optimal $a$ for a \emph{different} $m$ (say $\vctr{m}[k]$ for $k \neq j$) the search process might compute $v_{\EndPrdLsT}(a)$ for an $a$ close to the correct optimal $a$ for $\vctr{m}[j]$. But if that difficult computation does not correspond to the exact solution to the $\vctr{m}[k]$ problem, it is discarded. - -To avoid solving the problem independently over and over again for multitudes of values of $a$ that are close to each other, we can employ the same interpolation technique used above to construct a direct numerical approximation to the value function: Define a vector of possible values for end-of-period assets at time $t-1$, $\vctr{a}$ (\code{aVec} in the code). Next, construct $\vctr{v} = v_{\MidStpLsT}(\vctr{a})$ using equation (\ref{eq:vDiscrete}); then construct an approximation $\Aprx{v}_{({\prd-1})_\cntn}(a)$ by passing the vectors $\vctr{a}$ and $\vctr{v}$ as arguments to a piecewise-linear interpolator (e.g., the one in \texttt{scipy.interpolate}).% -% (These lists contain the points of the $\vctr{a}_{{\prdT-1}}$ and $\vctr{v}_{{\prdT-1}}$ vectors, respectively.) - -The notebook section ``Interpolating Expectations,'' now interpolates the expected value of \textit{ending} the period with a given amount of assets.\footnote{What we are doing here is closely related to `the method of parameterized expectations' of \cite{denHaanMarcet:parameterized}; the only difference is that our method is essentially a nonparametric version.} %The problem is solved in the same block with the remaining lines of code. - -Figure~\ref{fig:PlotOTm1RawVSInt} compares the true value function to the approximation produced by following the interpolation procedure; the approximated and exact functions are of course identical at the gridpoints of $\vctr{a}$ and they appear reasonably close except in the region below $\mStte=1$. - -\hypertarget{PlotOTm1RawVSInt}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotOTm1RawVSInt}} - \caption{End-Of-Period Value $v_{({\prd-1})_\cntn}(a_{\prd-1})$ (solid) versus $\Aprx{v}_{({T-1})_\cntn}(a_{T-1})$ (dashed)} - \label{fig:PlotOTm1RawVSInt} -\end{figure} - -\hypertarget{PlotComparecTm1AB}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotComparecTm1AB}} - \caption{$\cFunc_{T-1}(\mStte)$ (solid) versus $\Aprx{\cFunc}_{T-1}(\mStte)$ (dashed)} - \label{fig:PlotComparecTm1AB} -\end{figure} - -\Fix{\marginpar{\tiny In all figs, replace gothic h with notation corresponding to the lecture notes.}} - -% \ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Don't skip the 2-3-3-4 example in the text - it will be used again in a moment.}}{} -Nevertheless, the consumption rule obtained when the approximating $\Aprx{v}_{({\prd-1})_\cntn}(a_{\prd-1})$ is used instead of $v_{({\prd-1})_\cntn}(a_{\prd-1})$ is surprisingly bad, as shown in figure \ref{fig:PlotComparecTm1AB}. For example, when $\mStte$ goes from 2 to 3, $\Aprx{\cFunc}_{\prd-1}$ goes from about 1 to about 2, yet when $\mStte$ goes from 3 to 4, $\Aprx\cCtrl$ goes from about 2 to about 2.05. The function fails even to be concave, which is distressing because Carroll and Kimball~\citeyearpar{ckConcavity} prove that the correct consumption function is strictly concave in a wide class of problems that includes this one. - -\hypertarget{value-function-versus-first-order-condition}{} -\subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} - -Loosely speaking, our difficulty reflects the fact that the -consumption choice is governed by the \textit{marginal} value function, -not by the \textit{level} of the value function (which is the object that -we approximated). To understand this point, recall that a quadratic -utility function -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Intuitively speaking, if one's goal is to accurately capture behavior that is governed by marginal utility or the marginal value function, numerical techniques that approximate the \textit{marginal} value function are likely to work better.}} exhibits -risk aversion because with a stochastic $c$, -\begin{equation} - \Ex[-(c - \cancel{c})^{2}] < - (\Ex[c] - \cancel{c})^{2} -\end{equation} -(where $\cancel{c}$ is the `bliss point' which is assumed always to exceed feasible $c$). However, unlike the CRRA utility function, -with quadratic utility the consumption/saving \textit{behavior} of consumers -is unaffected by risk since behavior is determined by the first order condition, which -depends on \textit{marginal} utility, and when utility is quadratic, marginal utility is unaffected -by risk: -\begin{equation} - \Ex[-2(c - \cancel{c})] = - 2(\Ex[c] - \cancel{c}). -\end{equation} - -Intuitively, if one's goal is to accurately capture choices -that are governed by marginal value, -numerical techniques that approximate the \textit{marginal} value -function will yield a more accurate approximation to -optimal behavior than techniques that approximate the \textit{level} -of the value function. - -The first order condition of the maximization problem in period $T-1$ is: - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(\cCtrl) & = \beta \Ex_{\cntn(T-1)} [\PermGroFacAdjMu R \uFunc^{c}(c_{t})] %\label{eq:focraw} - \\ \cCtrl^{-\rho} & = R \beta \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \PermGroFacAdjMu\left(R (\mStte-\cCtrl) + \TranShkEmp_{i}\right)^{-\rho} \label{eq:FOCTm1}. - \end{aligned}\end{gathered}\end{equation} -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Go from the first to the last equation in \eqref{eq:FOCTm1} by substituting $\uFunc(c)=c^{-\rho}$ and use the approximation to the integral.}}{} -\hypertarget{PlotuPrimeVSOPrime}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotuPrimeVSOPrime}} - \caption{$\uFunc^{c}(c)$ versus $v_{({T-1})_\cntn}^{a}(3-c), v_{({T-1})_\cntn}^{a}(4-c), \Aprx{v}_{({T-1})_\cntn}^{a}(3-c), \Aprx{v}_{({T-1})_\cntn}^{a}(4-c)$} - \label{fig:PlotuPrimeVSOPrime} -\end{figure} - -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package. %Notice that the use of \texttt{u.prime} and \texttt{gothic.VP\_Tminus1} is possible since they are already defined in the \texttt{resources} and \texttt{gothic\_class} modules. - -The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} -shows the value of $\cCtrl^{-\rho}$ for our baseline parameter values -for $0 \leq \cCtrl \leq 4$ (the horizontal axis). The solid -upward-sloping curve shows the value of the RHS of (\ref{eq:FOCTm1}) -as a function of $\cCtrl$ under the assumption that $\mStte=3$. -Constructing this figure is time-consuming, because for every -value of $\cCtrl$ plotted we must calculate the RHS of -(\ref{eq:FOCTm1}). The value of $\cCtrl$ for which the RHS and LHS -of (\ref{eq:FOCTm1}) are equal is the optimal level of consumption -given that $\mStte=3$, so the intersection of the downward-sloping -and the upward-sloping curves gives the (approximated) optimal value of $\cCtrl$. -As we can see, the two curves intersect just below $\cCtrl=2$. -Similarly, the upward-sloping dashed curve shows the expected value -of the RHS of (\ref{eq:FOCTm1}) under the assumption that $\mStte=4$, -and the intersection of this curve with $\uFunc^{c}(\cCtrl)$ yields the -optimal level of consumption if $\mStte=4$. These two curves -intersect slightly below $\cCtrl=2.5$. Thus, increasing $\mStte$ -from 3 to 4 increases optimal consumption by about 0.5. - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Flip back to Figure - 4 to make the point that $\Aprx{\vEnd}^{a}$ is a step - function.}}{} Now consider the derivative of our function -$\Aprx{v}_{({\prd-1})}(a_{\prd-1})$. Because we have constructed -$\Aprx{v}_{({\prd-1})}$ as a linear interpolation, the slope of -$\Aprx{v}_{({\prd-1})}(a_{\prd-1})$ between any two adjacent -points $\{\vctr{a}[i],\vctr{},\vctr{a}[{i+1}]\}$ is constant. The -level of the slope immediately below any particular gridpoint is -different, of course, from the slope above that gridpoint, a fact -which implies that the derivative of -$\Aprx{v}_{({\prd-1})_\cntn}(a_{\prd-1})$ follows a step function. - -The solid-line step function in Figure \ref{fig:PlotuPrimeVSOPrime} -depicts the actual value of -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(3-\cCtrl)$. When we attempt to find -optimal values of $\cCtrl$ given $\mStte$ using -$\Aprx{v}_{({\prd-1})_\cntn}(a_{\prd-1})$, the numerical optimization -routine will return the $\cCtrl$ for which $\uFunc^{c}(\cCtrl) = -\Aprx{v}^{a}_{({\prd-1})_\cntn}(\mStte-\cCtrl)$. Thus, for -$\mStte=3$ the program will return the value of $\cCtrl$ for -which the downward-sloping $\uFunc^{c}(\cCtrl)$ curve intersects with the -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(3-\cCtrl)$; as the diagram shows, -this value is exactly equal to 2. Similarly, if we ask the routine -to find the optimal $\cCtrl$ for $\mStte=4$, it finds the point -of intersection of $\uFunc^{c}(\cCtrl)$ with -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(4-\cCtrl)$; and as the diagram shows, -this intersection is only slightly above 2. Hence, this figure -illustrates why the numerical consumption function plotted earlier -returned values very close to $\cCtrl=2$ for both $\mStte=3$ and -$\mStte=4$. - -We would obviously obtain much better estimates of the point of intersection between $\uFunc^{c}(\cCtrl)$ and $v_{({\prd-1})_\cntn}^{a}(\mStte-\cCtrl)$ if our estimate of $\Aprx{v}^{a}_{({\prd-1})_\cntn}$ were not a step function. In fact, we already know how to construct linear interpolations to functions, so the obvious next step is to construct a linear interpolating approximation to the \textit{expected marginal value of end-of-period assets function} at the points in $\vctr{a}$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{({\prd-1})_\cntn}^{a}(\vctr{a}) & = \beta R \PermGroFacAdjMu \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \left(\mathcal{R}_{t} \vctr{a} + \TranShkEmp_{i}\right)^{-\rho} \label{eq:vEndPrimeTm1} - \end{aligned}\end{gathered}\end{equation} -yielding $\vctr{v}{^{a}_{({\prd-1})_\cntn}}$ (the vector of expected end-of-period-$(T-1)$ marginal values of assets corresponding to \code{aVec}), %$\{\{\vctr{a}}\code{_{\prdT-1}},\vFunc_{({\prdT-1})_\cntn}^{a}(\vctr{{a}[1]}_{\prdT-1}\},\{\vctr{a}_{(T-1)},\vFunc_{({\prdT-1})_\cntn}^{a}\}\ldots\}$ -and construct -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$ as the linear -interpolating function that fits this set of points. - -\hypertarget{PlotOPRawVSFOC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotOPRawVSFOC}} - \caption{$v_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$ versus $\Aprx{v}_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$} - \label{fig:PlotOPRawVSFOC} -\end{figure} - - -% This is done by making a call to the \texttt{InterpolatedUnivariateSpline} function, passing it \code{aVec} and \texttt{vpVec} as arguments. Note that in defining the list of values \texttt{vpVec}, we again make use of the predefined \texttt{gothic.VP\_Tminus1} function. These steps are the embodiment of equation~(\ref{eq:vEndPrimeTm1}), and construct the interpolation of the expected marginal value of end-of-period assets as described above. - -The results are shown in Figure \ref{fig:PlotOPRawVSFOC}. The linear interpolating approximation looks roughly as good (or bad) for the \textit{marginal} value function as it was for the level of the value function. However, Figure \ref{fig:PlotcTm1ABC} shows that the new consumption function (long dashes) is a considerably better approximation of the true consumption function (solid) than was the consumption function obtained by approximating the level of the value function (short dashes). - -\hypertarget{PlotcTm1ABC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotcTm1ABC}} - \caption{$\cFunc_{\prd-1}(\mStte)$ (solid) Versus Two Methods for Constructing $\Aprx{\cFunc}_{\prd-1}(\mStte)$} - \label{fig:PlotcTm1ABC} -\end{figure} - -\hypertarget{transformation}{} -\subsection{Transformation}\label{subsec:transformation} - -Even the new-and-improved consumption function diverges notably from the true -solution, especially at lower values of $m$. That is because the -linear interpolation does an increasingly poor job of capturing the -nonlinearity of $v_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$ at -lower and lower levels of $a$. - -This is where we unveil our next trick. To understand the logic, -start by considering the case where $\mathcal{R}_{t} = \beta = -G_{t} = 1$ and there is no uncertainty -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Go over this - carefully.}}{} (that is, we know for sure that income next period -will be $\TranShkEmp_{t} = 1$). The final Euler equation (recall that we are still assuming that $t=T$) is then: -\begin{equation}\begin{gathered}\begin{aligned} - c_{\prd-1}^{-\rho} & = c_{t}^{-\rho}. - \end{aligned}\end{gathered}\end{equation} - -In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $t$ or the current period $t-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $\mStte$ and `human wealth' (the PDV of future income) of $h_{\prd-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). - -The well-known optimal solution is to spend half of total lifetime resources in period $t-1$ and the remainder in period $t (=T)$. Since total resources are known with certainty to be $\mStte+h_{\prd-1}= \mStte+1$, and since $v_{\MidStp}^{m}(\mStte) = \uFunc^{c}(\cCtrl)$, this implies that\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Crucial point: this is \textit{marginal} value function in period $t-1$, which we were trying to approximate with a linear interpolating function earlier.}}{} -\begin{equation} - v^{m}_{\MidStpLsT}(\mStte) = \left(\frac{\mStte+1}{2}\right)^{-\rho} \label{eq:vPLin}. -\end{equation} -Of course, this is a highly nonlinear function. However, if we raise both sides of \eqref{eq:vPLin} to the power $(-1/\rho)$ the result is a linear function: -\begin{equation}\begin{gathered}\begin{aligned} - % \vInv^{m}_{\prdT-1}(\mStte) \equiv - \left[v^{m}_{\MidStpLsT}(\mStte)\right]^{-1/\rho} & = \frac{\mStte+1}{2} . - \end{aligned}\end{gathered}\end{equation} -This is a specific example of a general phenomenon: A theoretical literature discussed in~\cite{ckConcavity} establishes that under perfect certainty, if the period-by-period marginal utility function is of the form $c_{t}^{-\rho}$, the marginal value function will be of the form $(\gamma m_{t}+\zeta)^{-\rho}$ for some constants $\{\gamma,\zeta\}$. This means that if we were solving the perfect foresight problem numerically, we could always calculate a numerically exact (because linear) interpolation. - -To put the key insight in intuitive terms, the nonlinearity we are facing springs in large part from the fact that the marginal value function is highly nonlinear. But we have a compelling solution to that problem, because the nonlinearity springs largely from the fact that we are raising something to the power $-\rho$. In effect, we can `unwind' all of the nonlinearity owing to that operation and the remaining nonlinearity will not be nearly so great. Specifically, applying the foregoing insights to the end-of-period value function $v^{a}_{\MidStpLst}(a)$, we can define an `inverse marginal value' function -\begin{equation}\begin{gathered}\begin{aligned} - \vInv_{t_\cntn}^{a}(a) & \equiv \left(v^{a}_{t_\cntn}(a)\right)^{-1/\rho} \label{eq:cGoth} - \end{aligned}\end{gathered}\end{equation} -which would be linear in the perfect foresight case.\footnote{There is a corresponding inverse for the value function: $\vInv_{t_\cntn}(a_{t})=((1-\rho)v_{t_\cntn})^{1/(1-\rho)}$, and for the marginal marginal value function etc.} We then construct a piecewise-linear interpolating approximation to the $\vInv_{t}^{a}$ function, $\Aprx{\vInv}_{t_\cntn}^{a}(a_{t})$, and for any $a$ that falls in the range $\{\vctr{a}[1],\vctr{a}[-1]\}$ we obtain our approximation of marginal value from: -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{v}_{t}^{a}(a) & = - [\Aprx{\vInv}_{t}^{a}(a)]^{-\rho} - \end{aligned}\end{gathered}\end{equation} - -The most interesting thing about all of this, though, is that the $\vInv^{a}_{t}$ function has another interpretation. Recall our point in \eqref{eq:upEqbetaOp} that $\uFunc^{c}(c_{t}) = \vEndStp^{a}(m_{t}-c_{t})$. Since with CRRA utility $\uFunc^{c}(c)=c^{-\rho}$, this can be rewritten -and inverted -\begin{equation}\begin{gathered}\begin{aligned} - (c_{t})^{-\rho} & = \vEndStp^{a}(a_{t}) - \\ c_{t} & = \left(\vEndPrd^{a}(a)\right)^{-1/\rho}. - \end{aligned}\end{gathered}\end{equation} - -What this means is that for any given $a$, if we can calculate the marginal value associated with ending the period with that $a$, then we can learn the level of $c$ that the consumer must have chosen if they ended up with that $a$ as the result of an optimal unconstrained choice. This leads us to an alternative interpretation of $\vInv^{a}$. It is the function that reveals, for any ending $a$, how much the agent must have consumed to (optimally) get to that $a$. We will therefore henceforth refer to it as the `consumed function:' -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\cFunc}_{t_\cntn}(a_{t}) & \equiv \Aprx{\vInv}^{a}_{t_\cntn}(a_{t}) \label{eq:consumedfn}. - \end{aligned}\end{gathered}\end{equation} - -%\renewcommand{\prd}{T} -Thus, for example, for period $\prdLsT$ our procedure is to calculate the vector of $\vctr{c}$ points on the consumed function: -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{c} & = \cFunc_{(\prdLsT)_\cntn}(\vctr{a}) \label{eq:consumedfnvecs} - \end{aligned}\end{gathered}\end{equation} -with the idea that we will construct an approximation of the consumed function $\Aprx{\cFunc}_{(\prdLsT)_\cntn}$ as the interpolating function connecting these $\{\vctr{a},\vctr{c}\}$ points. - -\hypertarget{the-natural-borrowing-constraint-and-the-a-lower-bound}{} -\subsection{The Natural Borrowing Constraint and the $a_{\prdLsT}$ Lower Bound} \label{subsec:LiqConstrSelfImposed} - -%\renewcommand{\prd}{T} -This is the appropriate moment to ask an awkward question: How should an interpolated, approximated `consumed' function like $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a_{\prdLsT})$ be extrapolated to return an estimated `consumed' amount when evaluated at an $a_{\prdLsT}$ outside the range spanned by $\{\vctr{a}[1],...,\vctr{a}[n]\}$? - - -For most canned piecewise-linear interpolation tools like \href{https://docs.scipy.org/doc/scipy/tutorial/interpolate.html}{scipy.interpolate}, when the `interpolating' function is evaluated at a point outside the provided range, the algorithm extrapolates under the assumption that the slope of the function remains constant beyond its measured boundaries (that is, the slope is assumed to be equal to the slope of nearest piecewise segment \emph{within} the interpolated range); for example, if the bottommost gridpoint is $\aVecMin = \vctratm[1]$ and the corresponding consumed level is $\cMin = \cFunc_{(\prdLsT)_\cntn}(a_1)$ we could calculate the `marginal propensity to have consumed' $\varkappa_{1}= -\Aprx{\cFunc}_{(\prdLsT)_\cntn}^{a}(\aVecMin)$ and construct the approximation as the linear extrapolation below $\vctratm[1]$ from: -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\cFunc}_{(\prdLsT)_\cntn}(a) & \equiv \cMin + (a-\aVecMin)\varkappa_{1} \label{eq:ExtrapLin}. - \end{aligned}\end{gathered}\end{equation} - -To see that this will lead us into difficulties, consider what happens to the true (not approximated) $v^{a}_{(\prdLsT)_\cntn}(a_{\prdLsT})$ as $a_{\prdLsT}$ approaches a quantity we will call the `natural borrowing constraint': $\NatBoroCnstra_{\prdLsT}=-\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$. From -\eqref{eq:vEndPrimeTm1} we have -\begin{equation}\begin{gathered}\begin{aligned} - \lim_{a \downarrow \NatBoroCnstra_{\prdLsT}} v^{a}_{(\prdLsT)_\cntn}(a) - & = \lim_{a \downarrow \NatBoroCnstra_{\prdLsT}} \beta R \PermGroFacAdjMu \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \left( a \mathcal{R}_{t}+ \TranShkEmp_{i}\right)^{-\rho}. - \end{aligned}\end{gathered}\end{equation} - -But since $\TranShkEmpMin=\TranShkEmp_{1}$, exactly at $a=\NatBoroCnstra_{\prdLsT}$ the first term in the summation would be $(-\TranShkEmpMin+\TranShkEmp_{1})^{-\rho}=1/0^{\rho}$ which is infinity. The reason is simple: $-\NatBoroCnstra_{\prdLsT}$ is the PDV, as of $\prdLsT$, of the \emph{minimum possible realization of income} in $t$ ($\mathcal{R}_{t}\NatBoroCnstra_{\prdLsT} = -\TranShkEmp_{1}$). Thus, if the consumer borrows an amount greater than or equal to $\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$ (that is, if the consumer ends $\prdLsT$ with $a_{\prdLsT} \leq -\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$) and then draws the worst possible income shock in period $t$, they will have to consume zero in period $t$, which yields $-\infty$ utility and $+\infty$ marginal utility. - -As \cite{zeldesStochastic} first noticed, this means that the consumer faces a `self-imposed' (or, as above, `natural') borrowing constraint (which springs from the precautionary motive): They will never borrow an amount greater than or equal to $\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$ (that is, assets will never reach the lower bound of $\NatBoroCnstra_{\prdLsT}$). The constraint is `self-imposed' in the precise sense that if the utility function were different (say, Constant Absolute Risk Aversion), the consumer might be willing to borrow more than $\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$ because a choice of zero or negative consumption in period $t$ would yield some finite amount of utility.\footnote{Though it is very unclear what a proper economic interpretation of negative consumption might be -- this is an important reason why CARA utility, like quadratic utility, is increasingly not used for serious quantitative work, though it is still useful for teaching purposes.} - -%\providecommand{\aMin}{\underline{\aNrm}} -This self-imposed constraint cannot be captured well when the $v^{a}_{(\prdLsT)_\cntn}$ function is approximated by a piecewise linear function like $\Aprx{v}^{m}_{(\prdLsT)_\cntn}$, because it is impossible for the linear extrapolation below $\aMin$ to correctly predict $v^{a}_{(\prdLsT)_\cntn}(\NatBoroCnstra_{\prdLsT})=\infty.$ %To see what will happen instead, note first that if we are approximating $\vFunc^{a}_{(\prdLsT)_\cntn}$ the smallest value in \code{aVec} must be greater than $\NatBoroCnstra_{\prdLsT}$ (because the expectation for any $a_{\prdLsT} \leq \NatBoroCnstra_{\prdLsT}$ is undefined). - -% When the approximating $\vFunc^{a}_{(\prdLsT)_\cntn}$ function is evaluated at some value less than the first element in \code{aVec}, a piecewise linear approximating function will linearly extrapolate the slope that characterized the lowest segment of the piecewise linear approximation (between \texttt{aVec[1]} and \texttt{aVec[2]}), a procedure that will return a positive finite number, even if the requested $a_{\prdLsT}$ point is below $\NatBoroCnstra_{\prdLsT}$. This means that the precautionary saving motive is understated, and by an arbitrarily large amount as the level of assets approaches its true theoretical minimum $\NatBoroCnstra_{\prdLsT}$. - -%\renewcommand{\prd}{T} -So, the marginal value of saving approaches infinity as $a \downarrow \NatBoroCnstra_{\prdLsT}=-\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$. But this implies that $\lim_{a \downarrow \NatBoroCnstra_{\prdLsT}} \cFunc_{(\prdLsT)_\cntn}(a) = (v^{a}_{(\prdLsT)_\cntn}(a))^{-1/\rho} = 0$; that is, as $a$ approaches its `natural borrowing constraint' minimum possible value, the corresponding amount of worst-case $c$ must approach \textit{its} lower bound: zero. - -The upshot is a realization that all we need to do to address these problems is to prepend each of the $\vctr{a}_{\code{\prdLsT}}$ and $\vctr{c}_{\code{\prdLsT}}$ from \eqref{eq:consumedfnvecs} with an extra point so that the first element in the mapping that produces our interpolation function is $\{\NatBoroCnstra_{\prdLsT},0.\}$. This is done in section ``The Self-Imposed `Natural' Borrowing Constraint and the $a_{\prdLsT}$ Lower Bound'' of the notebook.%which can be seen in the defined lists \texttt{aVecBot} and \texttt{cVec3Bot}. - -\Fix{\marginpar{\tiny The vertical axis should be relabeled - not gothic c anymore, instead $\vInv^{a}$}}{} - -\hypertarget{GothVInvVSGothC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVInvVSGothC}} - \caption{True $\vInv^{a}_{(\prdLsT)_\cntn}(a)$ vs its approximation $\Aprx{\vInv}^{a}_{(\prdLsT)_\cntn}(a)$} - \label{fig:GothVInvVSGothC} -\end{figure} -% \caption{True $\cFunc_{(\prdLsT)_\cntn}(\aNrm)$ vs its approximation $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(\aNrm)$} - -Figure\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny True $\cEndFunc$ is solid, linear approx is dashed.}}{} \ref{fig:GothVInvVSGothC} shows the result. The solid line calculates the exact numerical value of the consumed function $\cFunc_{(\prdLsT)_\cntn}(a)$ while the dashed line is the linear interpolating approximation $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a).$ This figure illustrates the value of the transformation: The true function is close to linear, and so the linear approximation is almost indistinguishable from the true function except at the very lowest values of $a$. - -Figure~\ref{fig:GothVVSGothCInv} similarly shows that when we generate $\Aprx{\Aprx{v}}_{(\prdLsT)_\cntn}^{a}(a)$ using our augmented $[\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a)]^{-\rho}$ (dashed line) we obtain a \textit{much} closer approximation to the true marginal value function $v^{a}_{(\prdLsT)_\cntn}(a)$ (solid line) than we obtained in the previous exercise which did not do the transformation (Figure~\ref{fig:PlotOPRawVSFOC}).\footnote{The vertical axis label uses $\mathfrak{v}^{\prime}$ as an alternative notation for what in these notes we designate as $v^{a}_{\EndStpLsT}$). This will be fixed.} -\Fix{\marginpar{\tiny fix the problem articulated in the footnote}}{} - -\hypertarget{GothVVSGothCInv}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVVSGothCInv}} - \caption{True $v^{a}_{(\prdLsT)_\cntn}(a)$ vs. $\Aprx{\Aprx{v}}_{(\prdLsT)_\cntn}^{a}(a)$ Constructed Using $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a)$} - \label{fig:GothVVSGothCInv} -\end{figure} - -\hypertarget{the-method-of-endogenous-gridpoints}{} -\subsection{The Method of Endogenous Gridpoints (`EGM')}\label{subsec:egm} - -The solution procedure we articulated above for finding $\cFunc_{\prdLsT}(m)$ still requires us, for each point in $\vctr{m}\code{_{\prdLsT}}$, to use a numerical rootfinding algorithm to search for the value of $\cCtrl$ that solves $\uFunc^{c}(\cCtrl) = v^{a}_{(\prdLsT)_\cntn}(m-\cCtrl)$. Though sections \ref{subsec:transformation} and \ref{subsec:LiqConstrSelfImposed} developed a highly efficient and accurate procedure to calculate $\Aprx{v}^{a}_{(\prdLsT)_\cntn}$, those approximations do nothing to eliminate the need for using a rootfinding operation for calculating, for an arbitrary $m$, the optimal $c$. And rootfinding is a notoriously computation-intensive (that is, slow!) operation. - -Fortunately, it turns out that there is a way to completely skip this slow rootfinding step. The method can be understood by noting that we have already calculated, for a set of arbitrary values of $\vctr{a}=\vctr{a}\code{_{\prdLsT}}$, the corresponding $\vctr{c}$ values for which this $\vctr{a}$ is optimal. - -% (greater than its lower bound value $\aVecMin$) will be associated with \textit{some} marginal valuation as of the continuation ($\cntn$) step of $\prdLsT$ (that is, at the end of the period), and the further observation that it is trivial to find the value of $c$ that yields the same marginal valuation, using the first order condition, -% \begin{equation}\begin{gathered}\begin{aligned} -% \uFunc^{c}({\vctr{\cNrm}\code{_{\prdLsT}}}) & = -% \vFunc^{a}_{(\prdLsT)_\cntn}(\vctr{a}_{\prdLsT}) \label{eq:eulerTm1} -% \end{aligned}\end{gathered}\end{equation} -% by using the inverse of the marginal utility function, -% \begin{equation}\begin{gathered}\begin{aligned} -% c^{-\CRRA} & = \mu -% \\ c & = \mu^{-1/\CRRA} -% \end{aligned}\end{gathered}\end{equation} -% which yields the level of consumption that corresponds to marginal utility of $\mu.$ -% Using this to invert both sides of \eqref{eq:eulerTm1}, we get -% \begin{equation}\begin{gathered}\begin{aligned} -% {\vctr{\cNrm}\code{_{\prdLsT}}} & = \left(\vFunc^{a}_{(\prdLsT)_\cntn}(\vctr{a}_{\prdLsT})\right)^{-1/\CRRA} -% % \\ & = (\vFunc^{a}_{(\prdLsT)_\cntn}(a_{T-1,i}))^{-1/\CRRA} -% % \\ & \equiv \cFunc_{(\prdLsT)_\cntn}(a_{T-1,i}) -% % \\ & \equiv \cFunc_{(\prdLsT)_\cntn,i} -% \end{aligned}\end{gathered}\end{equation} -% where the $\cntn$ emphasizes that these are points on the `consumed' function (that is, the function that reveals how much an optimizing consumer must have consumed in order to have ended the period with $a_{T-1}$). - -But with mutually consistent values of $\vctr{c}\code{_{\prdLsT}}$ and $\vctr{a}\code{_{\prdLsT}}$ (consistent, in the sense that they are the unique optimal values that correspond to the solution to the problem), we can obtain the $\vctr{m}\code{_{\prdLsT}}$ vector that corresponds to both of them from -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{m}\code{_{\prdLsT}} & = {\vctr{c}\code{_{\prdLsT}}+\vctr{a}\code{_{\prdLsT}}}. - \end{aligned}\end{gathered}\end{equation} - -\ifthenelse{\boolean{ToFix}}{\marginpar{\tiny Rename gothic class, maybe to: EndPrd. Also, harmonize the notation in the notebook in the paper - for example, everwhere in the text we use cNrm for normalized consumption, but for some reason it is capital C in the gothic function.}}{} - -These $m$ gridpoints are ``endogenous'' in contrast to the usual solution method of specifying some \textit{ex-ante} (exogenous) grid of values of $\vctr{m}$ and then using a rootfinding routine to locate the corresponding optimal consumption vector $\vctr{c}$. - - -This routine is performed in the ``Endogenous Gridpoints'' section of the notebook. First, the \texttt{gothic.C\_Tminus1} function is called for each of the pre-specfied values of end-of-period assets stored in \code{aVec}. These values of consumption and assets are used to produce the list of endogenous gridpoints, stored in the object \texttt{mVec\_egm}. With the $\vctr{\cFunc}$ values in hand, the notebook can generate a set of $\vctr{m}\code{_{\prdLsT}}$ and ${\vctr{c}\code{_{\prdLsT}}}$ pairs that can be interpolated between in order to yield $\Aprx{\cFunc}_{\MidStpLsT}(m)$ at virtually zero computational cost!\footnote{This is the essential point of \cite{carrollEGM}.} %This is done in the final line of code in this block, and the following code block produces the graph of the interpolated consumption function using this procedure. - -\hypertarget{PlotComparecTm1AD}{} -One might worry about whether the $\{{m},c\}$ points obtained in this way will provide a good representation of the consumption function as a whole, but in practice there are good reasons why they work well (basically, this procedure generates a set of gridpoints that is naturally dense right around the parts of the function with the greatest nonlinearity). -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotComparecTm1AD}} - \caption{$\cFunc_{\prdLsT}(m)$ (solid) versus $\Aprx{\cFunc}_{\prdLsT}(m)$ (dashed)} - \label{fig:ComparecTm1AD} -\end{figure} -Figure~\ref{fig:ComparecTm1AD} plots the actual consumption function $\cFunc_{\prdLsT}$ and the approximated consumption function $\Aprx{\cFunc}_{\prdLsT}$ derived by the method of endogenous grid points. Compared to the approximate consumption functions illustrated in Figure~\ref{fig:PlotcTm1ABC}, $\Aprx{\cFunc}_{\prdLsT}$ is quite close to the actual consumption function. - - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Different transformation for $v$ than for $v^{a}$.}}{} - -\hypertarget{improving-the-a-grid}{} -\subsection{Improving the $a$ Grid}\label{subsec:improving-the-a-grid} - -Thus far, we have arbitrarily used $a$ gridpoints of $\{0.,1.,2.,3.,4.\}$ (augmented in the last subsection by $\NatBoroCnstra_{\prdLsT}$). But it has been obvious from the figures that the approximated $\Aprx{\cFunc}_{(\prdLsT)_\cntn}$ function tends to be farthest from its true value at low values of $a$. Combining this with our insight that $\NatBoroCnstra_{\prdLsT}$ is a lower bound, we are now in position to define a more deliberate method for constructing gridpoints for $a$ -- a method that yields values that are more densely spaced at low values of $a$ where the function is more nonlinear. - -A pragmatic choice that works well is to find the values such that (1) the last value \textit{exceeds the lower bound} by the same amount $\bar a$ as our original maximum gridpoint (in our case, 4.); (2) we have the same number of gridpoints as before; and (3) the \textit{multi-exponential growth rate} (that is, $e^{e^{e^{...}}}$ for some number of exponentiations $n$ -- our default is 3) from each point to the next point is constant (instead of, as previously, imposing constancy of the absolute gap between points). - -\hypertarget{GothVInvVSGothCEEE}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVInvVSGothCEEE}} - \caption{$\cFunc_{(\prdLsT)_\cntn}(a)$ versus - $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a)$, Multi-Exponential \code{aVec}} - \label{fig:GothVInvVSGothCEE} -\end{figure} - - -\hypertarget{GothVVSGothCInvEEE}{} -\begin{figure} - \includegraphics[width=6in]{\FigDir/GothVVSGothCInvEEE} - \caption{$v^{a}_{(\prdLsT)_\cntn}(a)$ vs. - $\Aprx{\Aprx{v}}_{(\prdLsT)_\cntn}^{a}(a)$, Multi-Exponential \code{aVec}} - \label{fig:GothVVSGothCInvEE} -\end{figure} - -Section ``Improve the $\mathbb{A}_{grid}$'' begins by defining a function which takes as arguments the specifications of an initial grid of assets and returns the new grid incorporating the multi-exponential approach outlined above. - - -Notice that the graphs depicted in Figures~\ref{fig:GothVInvVSGothCEE} and \ref{fig:GothVVSGothCInvEE} are notably closer to their respective truths than the corresponding figures that used the original grid. - -\subsection{Program Structure} - -In section ``Solve for $c_t(m)$ in Multiple Periods,'' the natural and artificial borrowing constraints are combined with the endogenous gridpoints method to approximate the optimal consumption function for a specific period. Then, this function is used to compute the approximated consumption in the previous period, and this process is repeated for some specified number of periods. - -The essential structure of the program is a loop that iteratively solves for consumption functions by working backward from an assumed final period, using the dictionary \texttt{cFunc\_life} to store the interpolated consumption functions up to the beginning period. Consumption in a given period is utilized to determine the endogenous gridpoints for the preceding period. This is the sense in which the computation of optimal consumption is done recursively. - -For a realistic life cycle problem, it would also be necessary at a -minimum to calibrate a nonconstant path of expected income growth over the -lifetime that matches the empirical profile; allowing for such -a calibration is the reason we have included the $\{G\}_{t}^{T}$ -vector in our computational specification of the problem. - -\hypertarget{results}{} -\subsection{Results} - -The code creates the relevant $\Aprx{\cFunc}_{t}(m)$ functions for any period in the horizon, at the given values of $m$. Figure \ref{fig:PlotCFuncsConverge} shows $\Aprx{\cFunc}_{T-n}(m)$ for $n=\{20,15,10,5,1\}$. At least one feature of this figure is encouraging: the consumption functions converge as the horizon extends, something that \cite{BufferStockTheory} shows must be true under certain parametric conditions that are satisfied by the baseline parameter values being used here. - -\hypertarget{PlotCFuncsConverge}{} -\begin{figure} - \includegraphics[width=6in]{\FigDir/PlotCFuncsConverge} - \caption{Converging $\Aprx{\cFunc}_{T-n}(m)$ Functions as $n$ Increases} - \label{fig:PlotCFuncsConverge} -\end{figure} - - - -%\MoM{\input{sec_method-of-moderation-input}}{} -% Habits go here: \input{./Subfiles-private/Habits} -\hypertarget{the-infinite-horizon}{} -\section{The Infinite Horizon}\label{sec:the-infinite-horizon} - -All of the solution methods presented so far have involved period-by-period iteration from an assumed last period of life, as is appropriate for life cycle problems. However, if the parameter values for the problem satisfy certain conditions (detailed in \cite{BufferStockTheory}), the consumption rules (and the rest of the problem) will converge to a fixed rule as the horizon (remaining lifetime) gets large, as illustrated in Figure~\ref{fig:PlotCFuncsConverge}. Furthermore, Deaton~\citeyearpar{deatonLiqConstr}, Carroll~\citeyearpar{carroll:brookings,carrollBSLCPIH} and others have argued that the `buffer-stock' saving behavior that emerges under some further restrictions on parameter values is a good approximation of the behavior of typical consumers over much of the lifetime. Methods for finding the converged functions are therefore of interest, and are dealt with in this section. - -Of course, the simplest such method is to solve the problem as -specified above for a large number of periods. This is feasible, but -there are much faster methods. - -\subsection{Convergence} - -In solving an infinite-horizon problem, it is necessary to have some -metric that determines when to stop because a solution that is `good -enough' has been found. - -A natural metric is defined by the unique `target' level of wealth that \cite{BufferStockTheory} proves -will exist in problems of this kind \href{https://llorracc.github.io/BufferStockTheory#GICNrm}{under certain conditions}: The $\mTrgNrm$ such that -\begin{equation} - \Ex_t [{m}_{t+1}/m_t] = 1 \mbox{~if~} m_t = \mTrgNrm \label{eq:mTrgNrmet} -\end{equation} -where the accent is meant to signify that this is the value -that other $m$'s `point to.' - -Given a consumption rule $\cFunc(m)$ it is straightforward to find -the corresponding $\mTrgNrm$. So for our problem, a solution is declared -to have converged if the following criterion is met: -$\left|\mTrgNrm_{t+1}-\mTrgNrm_{t}\right| < \epsilon$, where $\epsilon$ is -a very small number and defines our degree of convergence tolerance. - -Similar criteria can obviously be specified for other problems. -However, it is always wise to plot successive function differences and -to experiment a bit with convergence criteria to verify that the -function has converged for all practical purposes. - -\begin{comment} % at suggestion of WW, this section was removed as unnecessary for the current model, which solves for the converged rule very fast - \subsection{The Last Period} - - For the last period of a finite-horizon lifetime, in the absence of a - bequest motive it is obvious that the optimal policy is to spend - everything. However, in an infinite-horizon problem there is no last - period, and the policy of spending everything is obviously very far - from optimal. Generally speaking, it is much better to start off with - a `last-period' consumption rule and value function equal to those - corresponding to the infinite-horizon solution to the perfect - foresight problem (assuming such a solution is known). - - For the perfect foresight infinite horizon consumption problem, - the solution is - \begin{equation}\begin{gathered}\begin{aligned} - \bar{\cFunc}(m_{t}) & = \overbrace{(1-R^{-1}(R - \beta)^{1/\rho})}^{\equiv - \underline{\kappa}}\left[{m}_{t}-1+\left(\frac{1}{1-1/R}\right)\right] - \label{eq:pfinfhorc} - \end{aligned}\end{gathered}\end{equation} - where $\underline{\kappa}$ is the MPC in the - infinite-horizon perfect foresight problem. In our baseline problem, - we set $G = \pLvl_{t} = 1$. It is straightforward to show that the - infinite-horizon perfect-foresight value function and marginal value - function are given by - \begin{equation}\begin{gathered}\begin{aligned} - \bar{v}(m_{t}) - & = \left(\frac{\bar{\cFunc}(m_{t})^{1-\rho}}{ - (1-\rho)\underline{\kappa} }\right) - \\ \bar{v}^{m}(m_{t}) & = (\bar{\cFunc}(m_{t}))^{-\rho} - \\ \Opt{v}^{m}(a_{t}) & = \beta R G_{t+1}^{-\rho} \bar{v}^{m}(\mathcal{R}_{t+1} a_{t}+1). - \end{aligned}\end{gathered}\end{equation} - - % WW delete the text on 2011-06-21 because we no longer start from the infinite horizon perfect foresight solution. - % If we choose to pursue that starting point, we need to derive the optimist's and pessimist's consumption function, - % when the last period is given by the infinite horizon perfect-foresight solution. That will change the program significantly. - % In our case, with \epsilon being 10^(-4), iteration requires only 51 periods, and 0.032 minutes. -\end{comment} - -\begin{comment}% At suggestion of WW this section was deleted because the technique is obvious and can be captured by the footnote that has been added - \subsection{Coarse Then Fine \code{aVec} } - - The speed of each iteration is directly proportional to the number - of gridpoints at which the problem must be solved. Therefore - reducing the number of points in \code{aVec} can increase - the speed of solution greatly. Of course, this also decreases the - accuracy of the solution. However, once the converged solution is - obtained for a coarse \code{aVec}, the density of the grid - can be increased and iteration can continue until a converged - solution is found for the finer \code{aVec}. - % WW delete the text on 2011-06-21 because we no longer need a finer \code{aVec}. I add a footnote in next subsection instead. - - \subsection{Coarse then Fine \texttt{$\TranShkEmp$Vec}} - - The speed of solution is roughly proportionate\footnote{It is also - true that the speed of each iteration is directly proportional to - the number of gridpoints in \code{aVec}, at which the problem must - be solved. However given our method of moderation, now the problem - could be solved very precisely based on five gridpoints only. Hence - we do not pursue the process of ``Coarse then Fine \code{aVec}.''} - to the number of points used in approximating the distribution of - shocks. At least 3 gridpoints should probably be used as an initial - minimum, and my experience is that increasing the number of gridpoints - beyond 7 generally yields only very small changes in the solution. The program - \texttt{multiperiodCon\_infhor.m} - begins with three gridpoints, and then solves for successively finer - \texttt{$\TranShkEmp$Vec}. -\end{comment} - -\hypertarget{multiple-control-variables}{} -\section{Multiple Control Variables}\label{sec:multiple-control-variables} -We now consider how to solve problems with multiple control variables. - -\subsection{Theory}\label{subsec:MCTheory} - -The new portfolio-share control variable is captured by the archaic Greek character \href{https://en.wikipedia.org/wiki/Stigma_(ligature)}{`stigma'}; it represents the share $\Shr$ of their disposable assets the agent invests in the risky asset (conventionally, the stock market). Designating the return factor for the risky asset as $\Risky$ and the share of the portfolio invested in $\Risky$ as $\Shr$, the realized portfolio rate of return $\Rport$ as a function of the share $\Shr$ is: -\begin{equation}\begin{gathered}\begin{aligned} - \Rport(\Shr) &= R+(\Risky-R)\Shr \label{eq:Shr}. - \end{aligned}\end{gathered}\end{equation} -If we imagine the portfolio share decision as being made simultaneously with the $c$ decision, the traditional way of writing the problem is (substituting the budget constraint): -\begin{equation}\begin{gathered}\begin{aligned} - v_{t}(m) & = \max_{\{\cFunc,\Shr\}} ~~ \uFunc(c) + \ExMidPrd[\beta v_{t+1}((m-c)\Rport(\Shr) + {\TranShkEmp}_{t+1})] \label{eq:Bellmanundated} - \end{aligned}\end{gathered}\end{equation} -where we have deliberately omitted the {period}-designating subscripts for $\Shr$ and the return factors to highlight the point that, once the consumption and $\Shr$ decisions have been made, it makes no difference to this equation whether the risky return factor $\Risky$ is revealed a nanosecond before the end of the current {period} or a nanosecond after the beginning of the successor {period}. - -%But as a notational choice, there is good reason to designate the realization as happening in $t+1$: A standard way of motivating stochastic returns and wages is to attribute them to ``productivity shocks'' and to assume that the productivity shock associated with a date is the one that affects the production function for that date. - -%\renewcommand{\prd}{t} % For the rest of the doc, use generic t vs t+1 - -\begin{comment} - Designating the return factor for the risky asset as $\Risky_{t+1}$, and using $\Shr_{t}$ to represent the proportion of the portfolio invested in this asset before the return is realized after the beginning of $t+1$, corresponding to an assumption that the consumer cannot be `net short' and cannot issue net equity), the overall return on the consumer's portfolio between $t$ and $t+1$ will be: - \begin{equation}\begin{gathered}\begin{aligned} - \Rport_{t+1} & = R(1-\Shr_{t}) + \Risky_{t+1}\Shr_{t} \label{eq:return1} - \\ & = R + (\Risky_{t+1}-R) \Shr_{t} %\label{eq:return2} - \end{aligned}\end{gathered}\end{equation} - and the maximization problem is - \begin{equation*}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{\{{c}_{t},\Shr_{t}\}} ~~ \uFunc(c_{t}) + \beta - \ExEndStp[{v}_{t+1}(m_{t+1})] - \\ & \text{s.t.} \nonumber - \\ \Rport_{t+1} & = R + (\Risky_{t+1}-R) \Shr_{t} - \\ m_{t+1} & = (m_{t}-c_{t})\Rport_{t+1} + \TranShkEmp_{t+1} - \\ 0 \leq & \Shr_{t} \leq 1, \label{eq:noshorts} - \end{aligned}\end{gathered}\end{equation*} - - The first order condition with respect to $c_{t}$ is almost identical to that in the single-control problem, equation (\ref{eq:upceqEvtp1}); the only difference is that the nonstochastic interest factor $R$ is now replaced by the portfolio return ${\Rport}_{t+1}$, - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \beta \ExEndStp [{\Rport}_{t+1} v^{m}_{t+1}(m_{t+1})] \label{eq:valfuncFOCRtilde}, - \end{aligned}\end{gathered}\end{equation} - and the Envelope theorem derivation remains the same, yielding the Euler equation for consumption - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \ExEndStp[\beta {\Rport}_{t+1} \uFunc^{c}(c_{t+1})]. \label{eq:EulercRiskyR} - \end{aligned}\end{gathered}\end{equation} - - The first order condition with respect to the risky portfolio share is - \begin{equation}\begin{gathered}\begin{aligned} - 0 & = \ExEndStp[{v}_{\MidStpNxt}^{m}(m_{t+1})(\Risky_{t+1}-R){a}_{t}] \notag - \\ & = \ExEndStp\left[\uFunc^{c}\left(\cFunc_{t+1}(m_{t+1})\right)(\Risky_{t+1}-R)\right]{a}_{t} - \\ & = \ExEndStp\left[\uFunc^{c}\left(\cFunc_{t+1}(m_{t+1})\right)(\Risky_{t+1}-R)\right], \label{eq:FOCw} - \end{aligned}\end{gathered}\end{equation} - where the last line follows because $0/a_{t}=0$. - - As before, we define $\vEnd$ as a function that yields the expected $t+1$ value of ending period $t$ with assets $a_{t}$. However, now that there are two control variables, the expectation must be defined as a function of the chosen values of both of those variables, because expected end-of-period value will depend not just on how much the agent saves, but also on how the saved assets are allocated between the risky and riskless assets. Thus we define - \begin{equation*}\begin{gathered}\begin{aligned} - \vMidStp(a_{t},\Shr_{t}) & = \beta v_{\arvlstepShr}(m_{t+1}) - \end{aligned}\end{gathered}\end{equation*} - which has derivatives - \begin{equation}\begin{gathered}\begin{aligned} - \vMidStp^a & = \ExEndStp[\beta {\Rport}_{t+1}v_{t+1}^{m}(m_{t+1})] = \ExEndStp[\beta {\Rport}_{t+1}{\uFunc}_{t+1}^{c}(\cFunc_{t+1}(m_{t+1}))] - \end{aligned}\end{gathered}\end{equation} - \begin{equation}\begin{gathered}\begin{aligned} - \vMidStp^{\Shr} & = \ExEndStp[\beta (\Risky_{t+1}-R){v}_{t+1}^{m}(m_{t+1}) ]a_{t} = \ExEndStp[\beta (\Risky_{t+1}-R){\uFunc}_{t+1}^{c}(\cFunc_{t+1}(m_{t+1})) ]a_{t} \notag - \end{aligned}\end{gathered}\end{equation} - implying that the first order conditions (\ref{eq:EulercRiskyR}) and - (\ref{eq:FOCw}) can be rewritten - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \vMidStp^{a}(m_{t}-c_{t},\Shr_{t}) \label{eq:FOCc} - \end{aligned}\end{gathered}\end{equation} - and - \begin{equation}\begin{gathered}\begin{aligned} - 0 & = v^{\Shr}_{\vMidStpStgShr}(a_{t},\Shr_{t}). \label{eq:FOCShr} - \end{aligned}\end{gathered}\end{equation} -\end{comment} - -\hypertarget{stages-within-a-period}{} -\subsection{{Stage}s Within a {Period}}\label{subsec:stageswithin} - -Solving simultaneously for the two variables $\Shr$ and $c$ can be computationally challenging. Fortunately, there is a simple solution: Break the problem into two `{stage}s'\footnote{cite mnw and ael papers.} -which we will call the `consumption {stage} $\cFunc$' and the `portfolio {stage} $\Shr$.' These could come in either order in the {period}: We designate the `portfolio choice first, then consumption' version by $[\Shr,\cFunc]$ and the `consumption choice first, then portfolio' as $[\cFunc,\Shr]$. - -In a problem with multiple {stages}, if we want to refer to a sub-{step} of a particular {stage} -- say, the {\Arrival} {stage} of the portfolio {stage} -- we simply add a {stage}-indicator subscript (in square brackets) to the notation we have been using until now. That is, the {\Arrival} {stage} of the portfolio problem would be $v_{_\arvl[\Shr]}$. - -\hypertarget{revised-consumers-problem}{} -\subsubsection{The (Revised) Consumer's Problem}\label{subsubsec:revised-consumers-problem} - -A slight modification to the consumer's problem specified earlier is necessary to make the {stage}s of the problem completely modular. The difficulty with the earlier formulation is that it assumed that asset returns occurred in the middle {step} of the consumption problem. Our revised version of the consumption problem takes as its input state the amount of bank balances that have resulted from any prior portfolio decision. The problem is therefore: - \begin{equation}\begin{gathered}\begin{aligned} - v_{[\cFunc]}(m) & = \max_{c} ~~ \uFunc(c)+ v_{[\cFunc]_{_\cntn}}(\underbrace{m-c}_{a}) -\\ v_{_\arvl[\cFunc]}(b) & = \Ex_{_\arvl[\cFunc]}\left[v_{[c]}(\overbrace{b+\TranShkEmp}^{m})\right] \label{eq:vBalances} - \end{aligned}\end{gathered}\end{equation} - - -\hypertarget{subsubsec:investors-problem}{} -\subsubsection{The Investor's Problem}\label{subsubsec:investors-problem} - -Consider the standalone problem of an `investor' whose continuation-value function $v_{[\Shr]_\cntn}$ depends on how much wealth $\acute{w}$ they end up after the realization of the stochastic $\Risky$ return. The expected value that the investor will obtain from any combination of initial $w$ and their optimal choice of the portfolio share $\Shr$ is the expectation of the continuation-value function over the wealth that results from the portfolio choice: -\begin{equation}\begin{gathered}\begin{aligned} - v_{_\arvl[\Shr]}(w) = & \max_{\Shr}~ \Ex_{\BegStp[\Shr]}\left[v_{[\Shr]_{_\cntn}}\overbrace{\left(\Rport(\Shr){w}\right)}^{\acute{w}}\right] \label{eq:vMidStpShr} - \end{aligned}\end{gathered}\end{equation} -where we have omitted any {period} designator like $t$ for the {period} in which this problem is solved because, with the continuation-value function defined already as $v_{[\Shr]_\cntn}(\acute{w})$, the problem is self-contained. The solution to this problem will yield an optimal $\Shr$ decision rule $\optml{\Shr}(w).$ Finally, we can specify the value of an investor `arriving' with $w$ as the expected value that will be obtained when the investor invests optimally, generating the \textit{ex ante} optimal stochastic portfolio return factor $\optml{\Rport}(w)=\Rport(\optml{\Shr}(w))$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{[\Shr]{_\arvl}}(w) = & \Ex_{_\arvl}[v_{[\Shr]_\cntn}](\overbrace{\optml{\Rport}(w)}^{\acute{w}})]. -\end{aligned}\end{gathered}\end{equation} - -The reward for all this notational investment is that it is now clear that \emph{exactly the same code} for solving the portfolio share problem can be used in two distinct problems: a `beginning-of-period-returns' model and an `end-of-period-returns' model. - -\hypertarget{beginning-returns}{} -\subsubsection{The `beginning-of-period returns' Problem}\label{subsubsec:beginning-returns} -The beginning-returns problem effectively just inserts a portfolio choice that happens at a {stage} immediately before the consumption {stage} in the optimal consumption problem described in \eqref{eq:vBalances}, for which we had a beginning-of-{stage} value function $v_{_\arvl[\cFunc]}(b)$. The agent makes their portfolio share decision within the {stage} but (obviously) before the risky returns $\Risky$ for the {period} have been realized. So the problem's portfolio-choice {stage} also takes $k$ as its initial state and solves the investor's problem outlined in section~\ref{subsubsec:investors-problem} above: -\begin{equation}\begin{gathered}\begin{aligned} - v_{[\Shr]_\arvl}(k) & = \Ex_{[\Shr]_\arvl}[v_{[\Shr]_{_\cntn}}(\underbrace{k\optml{\Rport}}_{b})] -\\v_{[\Shr]_\cntn}(b) & = v_{_\arvl[\cFunc]}(b) - \end{aligned}\end{gathered}\end{equation} - -Since in this setup bank balances have been determined before the consumption problems starts, we need to rewrite the consumption {stage} as a function of bank balances that will have resulted from the portfolio investment $b$, combined with the income shocks $\TranShkEmp$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{_\arvl[\cFunc]}(b) = & \max_{\cFunc}~ \uFunc(c) + \Ex_{_\arvl[\cFunc]}[v_{[\cFunc]_\cntn}(\underbrace{\overbrace{b+\TranShkEmp}^{m}-c}_{a})] - \end{aligned}\end{gathered}\end{equation} -where, because the consumption {stage} is the last {stage} in the {period}, the continuatibon-value function for the $\cFunc$ {stage} is just the continuation-value function for the period as a whole: -\begin{equation}\begin{gathered}\begin{aligned} - v_{[\cFunc]_\cntn}(a) = & v_{t_\cntn}(a) - \end{aligned}\end{gathered}\end{equation} -(and recall that $v_{t_\cntn}(a)$ is exogenously provided as an input to the {period}'s problem via the transition equation assumed earlier: $v_{t_\cntn}(a)=\beta v_{_\arvl(t+1)}(a)$). - -\subsubsection{The `end-of-period-returns' Problem} - -If the portfolio share and risky returns are realized at the end of the {period}, we need to move the portfolio choice {stage} to immediately before the point at which returns are realized (and after the $\cFunc$ choice has been made). The problem is the same as the portfolio problem defined above, except that the input for the investment {stage} is the assets remaining after the consumption choice: $a$. So, the portfolio {stage} of the problem is -\begin{equation}\begin{gathered}\begin{aligned} - v_{_\arvl[\Shr]}(a) = & \Ex_{_\arvl[\Shr]}[v_{[\Shr]_{_\cntn}}(\underbrace{a\optml{\Rport}}_{k})] %= \Ex_{[\cFunc]_\arvl}[\vFunc_{}(\kNrm)] - \end{aligned}\end{gathered}\end{equation} -where we are designating the post-realization result of the investment as $k$, and since the $\Shr$-{stage} is the last {stage} of the problem the end-of-{stage} $k$ becomes the end-of-{period} $k_{t}.$ - -The `state transition' equation between $t$ and $t+1$ is simply $b_{t+1} = k_{t}$ and the continuation-value function transition is $v_{t_\cntn}(k) \mapsto \beta v_{_\arvl(t+1)}(k)$ which reflects the above-mentioned point that there is no substantive difference between the two problems (their $v_{[\cFunc]}(m)$ value functions and $\cFunc(m)$ functions will be identical). - -(Note that we are assuming that there will be only one consumption function in the period, so no {stage} subscript is necessary to pick out `the consumption function'). - -\subsubsection{Numerical Solution} -While the investor's problem cannot be solved using the endogenous gridpoints method,\footnote{Because $\vShrEnd$ is not invertible with respect to $\Shr$, see [references to MNW and AEL's work].} -we can solve it numerically for the optimal $\Shr$ at a vector of $\vctr{a}$ ({\aVecCode} in the code) and then construct an approximated optimal portfolio share function $\Aprx{\optml{\Shr}}(a)$ as the interpolating function among the members of the $\{\vctr{a},\vctr{\Shr}\}$ mapping. Having done this, we can now calculate a vector of values and marginal values that correspond to $\aVec$: -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{v} & = v_{_\arvl[\Shr]}(\vctr{a}) \label{eq:vShrEnd} -\\ \vctr{v}^a & = v^{a}_{_\arvl[\Shr]}(\vctr{a}). - \end{aligned}\end{gathered}\end{equation} - -With the $\vctr{v}^{a}$ approximation described in hand, we can construct our approximation to the consumption function using \emph{exactly the same EGM procedure} that we used in solving the problem \emph{without} a portfolio choice (see \eqref{eq:cGoth}): -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{c} & \equiv \left(\vctr{v}^{a}\right)^{-1/\rho} \label{eq:cVecPort}, - \end{aligned}\end{gathered}\end{equation} -which, following a procedure identical to that in the EGM subsection \ref{subsec:egm}, yields an approximated consumption function $\Aprx{\cFunc}_{t}(m)$. Thus, again, we can construct the consumption function at nearly zero cost (once we have calculated $\vctr{v}^{a}$). - -\hypertarget{the-point}{} - -\subsubsection{The Point}\label{subsubsec:the-point} - -The upshot is that all we need to do is change some of the transition equations and we can use the same solution code (both for the $\Shr$-stage and the $\cFunc$-stage) to solve the problem with either assumption (beginning-of-period or end-of-period) about the timing of portfolio choice. There is even an obvious notation for the two problems: $v_{_\arvl t[\Shr{c}]}$ can be the {period}-arrival value function for the version where the portfolio share is chosen at the beginning of the period, and $v_{_\arvl t[{c}\Shr]}$ is {period}-arrival value for the the problem where the share choice is at the end. - -What is the benefit of writing effectively the identical problem in two different ways? There are several: -\begin{itemize} -\item It demonstrates that, if they are carefully constructed, Bellman problems can be ``modular'' - \begin{itemize} - \item In a life cycle model one might want to assume that at at some ages agents have a portfolio choice and at other ages they do not. The consumption problem makes no assumption about whether there is a portfolio choice decision (before or after the consumption choice), so there would be zero cost of having an age-varying problem in which you drop in whatever choices are appropriate to the life cycle stage. - \end{itemize} -\item It emphasizes the flexibilty of choice a modeler has to date variables arbitrarily. In the specific example examined here, there is a strong case for preferring the beginning-returns specification because we typically think of productivity or other shocks at date $t$ affecting the agent's state variables before the agent makes that period's choices. It would be awkward and confusing to have a productivity shock dated $t-1$ effectively applying for the problem being solved at $t$ (as in the end-returns specification) -\item It may help to identify more efficient solution methods - \begin{itemize} - \item For example, under the traditional formulation in equation \eqref{eq:Bellmanundated} it might not occur to a modeler that the endogenous gridpoints solution method can be used, because when portfolio choice and consumption choice are considered simultaneously the EGM method breaks down because the portfolio choice part of the problem is not susceptible to EGM solution. But when the problem is broken into two simpler problems, it becomes clear that EGM can still be applied to the consumption problem even though it cannot be applied to the portfolio choice problem - \end{itemize} -\end{itemize} - -% the problem needs to be altered to bring the {step}s involving the realization of risky returns into {period} $\prd$; the variable with which the agent ends the period is now $\bNrm_{\prd}$ and to avoid confusion with the prior model in which we assumed $k_{\prd+1}={a}_{\prd}$ we will now define $\kappa_{\prd+1}={\bNrm}_{\prd}$. The continuation-value function for the $[\Shr]$ {stage} now becomes -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\prd[\Shr]_\cntn}(a_{\prd}) & = \DiscFac \vFunc_{[\cFunc]_\arvl(\prd+1)}({\kappa}_{\prd+1}) -% \end{aligned}\end{gathered}\end{equation} -% while the dynamic budget constraint for $m$ changes to -% \begin{equation}\begin{gathered}\begin{aligned} -% m_{\prd} & = {\kappa}_{\prd}+\TranShkEmp_{\prd} -% \end{aligned}\end{gathered}\end{equation} -% and the problem in the decision step is now -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\prd}(m) & = \max_{c}~~\uFunc(c)+\Ex_{\prd}[\vFunc_{\prd_\cntn}(m-c)] -% \end{aligned}\end{gathered}\end{equation} -% while value as a function of $\mNrm$ in the arrival step is now -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{_{\arvl}\prd}({\kappa}_{\prd}) & = \Ex_{_\arvl\prd}[\vFunc_{\prd}(m)] -% \end{aligned}\end{gathered}\end{equation} -% which, \textit{mutatis mutandis}, is the same as in \eqref{eq:vNormed}. - - - - -% The second stage in the period will be the solution to the problem of a consumer solving an optimal portfolio choice problem before having made their consumption decision. - -% We continue to assume that the consumer enters period $t$ with the single state variable, $k_{\prd}.$ But (as before) the assumption is that this is before the $t$-dated shocks have been realized. It is at this stage that the consumer makes their portfolio choice, knowing the degree of riskiness of the rate of return but not its period-$t$ realization. Designating the `share-choice' stage by the control variable $\Shr$ which is the proportion of the portfolio to invest in the risky asset, %the problem's FOC in the new notation is (compare to \eqref{eq:FOCShr}): - -% It will be convenient to designate a stage within a period by naming a given stage in period $\prd$ after the control variable chosen in the middle step of the stage; in this case $\prd[\Shr]$. The consumer's problem at the $\Shr$ stage is -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\arvlstepShr}(a_{\prd}) & = \max_{\Shr}~\vMidStpStgShr(a_{\prd},\Shr_{\prd}) \label{eq:vMidStpShr} -% \end{aligned}\end{gathered}\end{equation} -% whose FOC in the new notation is (compare to \eqref{eq:FOCShr}): -% \begin{equation}\begin{gathered}\begin{aligned} -% 0 & = \vShrMid(a_{\prd},\Shr_{\prd}). \label{eq:vShrEnd} -% \end{aligned}\end{gathered}\end{equation} - -\subsection{Application}\label{subsec:MCApplication} - - -In specifying the stochastic process for $\Risky_{t+1}$, we follow the common practice of assuming that returns are lognormally distributed, $\log \Risky \sim \Nrml(\eprem+r-\sigma^{2}_{\risky}/2,\sigma^{2}_{\risky})$ where $\eprem$ is the equity premium over the thin returns $r$ available on the riskless asset.\footnote{This guarantees that $\Ex[\Risky] = \EPrem$ is invariant to the choice of $\sigma^{2}_{\eprem}$; see \handoutM{LogELogNorm}.} - -As with labor income uncertainty, it is necessary to discretize the rate-of-return risk in order to have a problem that is soluble in a reasonable amount of time. We follow the same procedure as for labor income uncertainty, generating a set of $n_{\risky}$ equiprobable shocks to the rate of return; in a slight abuse of notation, we will designate the portfolio-weighted return (contingent on the chosen portfolio share in equity, and potentially contingent on any other aspect of the consumer's problem) simply as $\Rport_{i,j}$ (where dependence on $i$ is allowed to permit the possibility of nonzero correlation between the return on the risky asset and the $\TranShkEmp$ shock to labor income (for example, in recessions the stock market falls and labor income also declines). - - -The direct expressions for the derivatives of $\vEnd$ are -\begin{equation}\begin{gathered}\begin{aligned} - \vEndStp^{a}(a_{t},\Shr_{t}) & = \beta \left(\frac{1}{n_{\risky} n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}}\sum_{j=1}^{n_{\risky} }\Rport_{i,j} \left(\cFunc_{t+1}(\Rport_{i,j}a_{t}+\TranShkEmp_{i})\right)^{-\rho} - \\ \vEndStp^{\Shr}(a_{t},\Shr_{t}) & = \beta \left(\frac{1}{n_{\risky} n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}}\sum_{j=1}^{n_{\risky} }(\Risky_{i,j}-R)\left(\cFunc_{t+1}(\Rport_{i,j}a_{t}+\TranShkEmp_{i})\right)^{-\rho}. - \end{aligned}\end{gathered}\end{equation} - -Writing these equations out explicitly makes a problem very apparent: For every different combination of $\{{a}_{t},\Shr_{t}\}$ that the routine wishes to consider, it must perform two double-summations of $n_{\risky} \times n_{\TranShkEmp}$ terms. Once again, there is an inefficiency if it must perform these same calculations many times for the same or nearby values of $\{{a}_{t},\Shr_{t}\}$, and again the solution is to construct an approximation to the (inverses of the) derivatives of the $\vEnd$ function. - -Details of the construction of the interpolating approximations are given below; assume for the moment that we have the approximations $\Aprx{v}_{\EndStp}^{a}$ and $\Aprx{v}_{\EndStp}^{\Shr}$ in hand and we want to proceed. As noted above in the discussion of \eqref{eq:Bellmanundated}, nonlinear equation solvers can find the solution to a set of simultaneous equations. Thus we could ask one to solve -\begin{equation}\begin{gathered}\begin{aligned} - c_{t}^{-\rho} & = \Aprx{v}^{a}_{{t_\cntn}}(m_{t}-c_{t},\Shr_{t}) %\label{eq:FOCwrtcMultContr} - \\ 0 & = \Aprx{v}^{\Shr}_{{t_\cntn}}(m_{t}-c_{t},\Shr_{t}) \label{eq:FOCwrtw} - \end{aligned}\end{gathered}\end{equation} -simultaneously for $c$ and $\Shr$ at the set of potential $m_{t}$ values defined in {\mVec}. However, as noted above, multidimensional constrained -maximization problems are difficult and sometimes quite slow to -solve. - -There is a better way. Define the problem -%\providecommand{\Opt}{} -%\renewcommand{\Opt}{\tilde} -%\providecommand{\vOpt}{} -%\renewcommand{\vOpt}{\overset{*}{\vFunc}} -\begin{equation}\begin{gathered}\begin{aligned} - \Opt{v}_{{t_\cntn}}(a_{t}) & = \max_{\Shr_{t}} ~~ \vEndStp(a_{t},\Shr_{t}) - \\ & \text{s.t.} \nonumber - \\ 0 \leq & \Shr_{t} \leq 1 - \end{aligned}\end{gathered}\end{equation} -where the tilde over $\Opt{v}(a)$ indicates that this is the $v$ that has been optimized with respect to all of the arguments other than the one still present ($a_{t}$). We solve this problem for the set of gridpoints in \code{aVec} and use the results to construct the interpolating function $\Aprx{\Opt{v}}_{t}^{a}(a_{t})$.\footnote{A faster solution could be obtained by, for each element in \code{aVec}, computing $\vEndStp^{\Shr}(m_{t}-c_{t},\Shr)$ of a grid of values of $\Shr$, and then using an approximating interpolating function (rather than the full expectation) in the \texttt{FindRoot} command. The associated speed improvement is fairly modest, however, so this route was not pursued.} With this function in hand, we can use the first order condition from the single-control problem -\begin{equation*}\begin{gathered}\begin{aligned} - c_{t}^{-\rho} & = \Aprx{\Opt{v}}_{t}^{a}(m_{t}-c_{t}) - \end{aligned}\end{gathered}\end{equation*} -to solve for the optimal level of consumption as a function of $m_{t}$ using the endogenous gridpoints method described above. Thus we have transformed the multidimensional optimization problem into a sequence of two simple optimization problems. - -Note the parallel between this trick and the fundamental insight of dynamic programming: Dynamic programming techniques transform a multi-period (or infinite-period) optimization problem into a sequence of two-period optimization problems which are individually much easier to solve; we have done the same thing here, but with multiple dimensions of controls rather than multiple periods. - -\hypertarget{implementation}{} -\subsection{Implementation} - -Following the discussion from section \ref{subsec:MCTheory}, to provide a numerical solution to the problem -with multiple control variables, we must define expressions that capture the expected marginal value of end-of-period -assets with respect to the level of assets and the share invested in risky assets. This is addressed in ``Multiple Control Variables.'' - - - -% Having the \texttt{GothicMC} subclass available, we can proceed with implementing the steps laid out in section \ref{subsec:MCApplication} to solve the problem at hand. Initially, the two distributions that capture the uncertainty faced by consumers in this scenario are discretized. Subsequently, the \texttt{GothicMC} class is invoked with the requisite arguments to create an instance that includes the necessary functions to depict the first-order conditions of the consumer's problem. Following that, an improved grid of end-of-period assets is established. - -% Here is where we can see how the approach described in section \ref{subsec:MCApplication} is reflected in the code. For the terminal period, the optimal share of risky assets is determined for each point in \texttt{aVec\_eee}, and then the endogenous gridpoints method is employed to compute the optimal consumption level given that the share in the risky asset has been chosen optimally. It's worth noting that this solution takes into account the possibility of a binding artificial borrowing constraint. Lastly, the interpolation process is executed for both the optimal consumption function and the optimal share of the portfolio in risky assets. These values are stored in their respective dictionaries (\texttt{mGridPort\_life}, \texttt{cGridPort\_life}, and \texttt{ShrGrid\_life}) and utilized to conduct the recursive process outlined in the `Recursion' section, thus yielding the numerical solution for all earlier periods. - -\hypertarget{results-with-multiple-controls}{} -\subsection{Results With Multiple Controls}\label{subsec:results-with-multiple-controls} - -Figure~\ref{fig:PlotctMultContr} plots the $t-1$ consumption function generated by the program; qualitatively it does not look much different from the consumption functions generated by the program without portfolio choice. - -But Figure~\ref{fig:PlotRiskySharetOfat} which plots the optimal portfolio share as a function of the level of assets, exhibits several interesting features. First, even with a coefficient of relative risk aversion of 6, an equity premium of only 4 percent, and an annual standard deviation in equity returns of 15 percent, the optimal choice is for the agent to invest a proportion 1 (100 percent) of the portfolio in stocks (instead of the safe bank account with riskless return $R$) is at values of $a_{t}$ less than about 2. Second, the proportion of the portfolio kept in stocks is \textit{declining} in the level of wealth - i.e., the poor should hold all of their meager assets in stocks, while the rich should be cautious, holding more of their wealth in safe bank deposits and less in stocks. This seemingly bizarre (and highly counterfactual -- see \cite{carroll:richportfolios}) prediction reflects the nature of the risks the consumer faces. Those consumers who are poor in measured financial wealth will likely derive a high proportion of future consumption from their labor income. Since by assumption labor income risk is uncorrelated with rate-of-return risk, the covariance between their future consumption and future stock returns is relatively low. By contrast, persons with relatively large wealth will be paying for a large proportion of future consumption out of that wealth, and hence if they invest too much of it in stocks their consumption will have a high covariance with stock returns. Consequently, they reduce that correlation by holding some of their wealth in the riskless form. - -\hypertarget{PlotctMultContr}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/PlotctMultContr} - \caption{$\cFunc(m_{1})$ With Portfolio Choice} - \label{fig:PlotctMultContr} -\end{figure} - -\hypertarget{PlotRiskySharetOfat}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/PlotRiskySharetOfat} - \caption{Portfolio Share in Risky Assets in First Period $\Shr(a)$} - \label{fig:PlotRiskySharetOfat} -\end{figure} - -\hypertarget{structural-estimation}{} -\section{Structural Estimation}\label{sec:structural-estimation} - -This section describes how to use the methods developed above to -structurally estimate a life-cycle consumption model, following -closely the work of -\cite{cagettiWprofiles}.\footnote{Similar structural - estimation exercises have been also performed by - \cite{palumbo:medical} and \cite{gpLifecycle}.} The key idea of -structural estimation is to look for the parameter values (for the -time preference rate, relative risk aversion, or other parameters) -which lead to the best possible match between simulated and empirical -moments. %(The code for the structural estimation is in the self-containedsubfolder \texttt{StructuralEstimation} in the Matlab and {\Mma} directories.) - -\hypertarget{life-cycle-model}{} -\subsection{Life Cycle Model}\label{subsec:life-cycle-model} -\newcommand{\byage}{\hat} - -Realistic calibration of a life cycle model needs to take into account a few things that we omitted from the bare-bones model described above. For example, the whole point of the life cycle model is that life is finite, so we need to include a realistic treatment of life expectancy; this is done easily enough, by assuming that utility accrues only if you live, so effectively the rising mortality rate with age is treated as an extra reason for discounting the future. Similarly, we may want to capture the demographic evolution of the household (e.g., arrival and departure of kids). A common way to handle that, too, is by modifying the discount factor (arrival of a kid might increase the total utility of the household by, say, 0.2, so if the `pure' rate of time preference were $1.0$ the `household-size-adjusted' discount factor might be 1.2. We therefore modify the model presented above to allow age-varying discount factors that capture both mortality and family-size changes (we just adopt the factors used by \cite{cagettiWprofiles} directly), with the probability of remaining alive between $t$ and $t+n$ captured by $\Alive$ and with $\hat{\beta}$ now reflecting all the age-varying discount factor adjustments (mortality, family-size, etc). Using $\beth$ (the Hebrew cognate of $\beta$) for the `pure' time preference factor, the value function for the revised problem is - \begin{equation}\begin{gathered}\begin{aligned} - v_{t}(\pLvl_{t},\mLvl_{t}) & = \max_{\{\cFunc\}_{t}^{T}}~~ \uFunc(\cLvl_{t})+\ExEndPrd\left[\sum_{n=1}^{T-t}\beth^{n} \Alive_{t}^{t+n}\hat{\beta}_{t}^{t+n} \uFunc(\cLvl_{t+n}) \right] \label{eq:lifecyclemax} - \end{aligned}\end{gathered} \end{equation} -subject to the constraints - \begin{equation*}\begin{gathered}\begin{aligned} - \aLvl_{t} & = \mLvl_{t}-\cLvl_{t} - \\ \pLvl_{t+1} & = G_{t+1}\pLvl_{t}\permShk_{t+1} - \\ \yLvl_{t+1} & = \pLvl_{t+1}\TranShkEmp _{t+1} - \\ \mLvl_{t+1} & = R \aLvl_{t}+\yLvl_{t+1} - \end{aligned}\end{gathered}\end{equation*} -where - \begin{equation*}\begin{gathered}\begin{aligned} - \Alive _{t}^{t+n} &:\text{probability to }\Alive\text{ive until age $t+n$ given alive at age $t$} - \\ \hat{\beta}_{t}^{t+n} &:\text{age-varying discount factor between ages $t$ and $t+n$} - \\ \permShk_{t} &:\text{mean-one shock to permanent income} - \\ \beth &:\text{time-invariant `pure' discount factor} - \end{aligned}\end{gathered}\end{equation*} -and all the other variables are defined as in section \ref{sec:the-problem}. - -Households start life at age $s=25$ and live with probability 1 until retirement -($s=65$). Thereafter the survival probability shrinks every year and -agents are dead by $s=91$ as assumed by Cagetti. % Note that in addition to a typical time-invariant discount factor $\beth$, there is a time-varying discount factor $\hat{\DiscFac}_{s}$ in (\ref{eq:lifecyclemax}) which can be used to capture the effect of age-varying demographic variables (e.g.\ changes in family size). - - Transitory and permanent shocks are distributed as follows: - \begin{equation}\begin{gathered}\begin{aligned} - \Xi_{s} & = - \begin{cases} - 0\phantom{/\pZero} & \text{with probability $\pZero>0$} \\ - \TranShkEmp_{s}/\pZero & \text{with probability $(1-\pZero)$, where $\log \TranShkEmp_{s}\thicksim \Nrml(-\sigma_{\TranShkEmp}^{2}/2,\sigma_{\TranShkEmp}^{2})$}\\ - \end{cases}\\ - \log \permShk_{s} &\thicksim \Nrml(-\sigma_{\permShk}^{2}/2,\sigma_{\permShk}^{2}) - \end{aligned}\end{gathered}\end{equation} - where $\pZero$ is the probability of unemployment (and unemployment shocks are turned off after retirement). - -The parameter values for the shocks are taken from Carroll~\citeyearpar{carroll:brookings}, $\pZero=0.5/100$, $\sigma _{\TranShkEmp }=0.1$, and $\sigma_{\permShk}=0.1$.\footnote{Note that $\sigma _{\TranShkEmp}=0.1$ is smaller than the estimate for college graduates estimated in - Carroll and Samwick~\citeyearpar{carroll&samwick:nature} ($=0.197=\sqrt{0.039}$) which is used by Cagetti~\citeyearpar{cagettiWprofiles}. The reason for this choice is that Carroll and Samwick~\citeyearpar{carroll&samwick:nature} themselves argue that their estimate of $\sigma_{\TranShkEmp }$ is almost certainly increased by measurement error.} The income growth profile $G_{t}$ is from Carroll~\citeyearpar{carrollBSLCPIH} and the values of $\Alive_{t}$ and $\hat{\beta}_{t}$ are obtained from Cagetti~\citeyearpar{cagettiWprofiles} (Figure \ref{fig:TimeVaryingParam}).\footnote{The income growth profile is the one used by Caroll for operatives. Cagetti computes the time-varying discount factor by educational groups using the methodology proposed by Attanasio et al.~\citeyearpar{AttanasioBanksMeghirWeber} and the survival probabilities from the 1995 Life Tables (National Center for Health Statistics 1998).} The interest rate is assumed to equal $1.03$. The model parameters are included in Table \ref{table:StrEstParams}. - -\hypertarget{PlotTimeVaryingParam}{} -\begin{figure}[h] - \includegraphics[width=6in]{./Figures/PlotTimeVaryingParam} - \caption{Time Varying Parameters} - \label{fig:TimeVaryingParam} -\end{figure} - -\begin{table}[h] - \caption{Parameter Values}\label{table:StrEstParams} - \begin{center} - \begin{tabular}{ccl} - \hline\hline - $\sigma _{\TranShkEmp}$ & $0.1$ & Carroll~\citeyearpar{carroll:brookings} - \\ $\sigma _{\permShk}$ & $0.1$ & Carroll~\citeyearpar{carroll:brookings} - \\ $\pZero$ & $0.005$ & Carroll~\citeyearpar{carroll:brookings} - \\ $G_{s}$ & figure \ref{fig:TimeVaryingParam} & Carroll~\citeyearpar{carrollBSLCPIH} - \\ $\hat{\beta}_{s},\Alive_{s}$ & figure \ref{fig:TimeVaryingParam} & Cagetti~\citeyearpar{cagettiWprofiles} - \\$R$ & $1.03$ & Cagetti~\citeyearpar{cagettiWprofiles}\\ - \hline - \end{tabular} - \end{center} -\end{table} - -The structural estimation of the parameters $\beth$ and $\rho$ is carried out using -the procedure specified in the following section, which is then implemented in -the \texttt{StructEstimation.py} file. This file consists of two main components. The -first section defines the objects required to execute the structural estimation procedure, -while the second section executes the procedure and various optional -experiments with their corresponding commands. The next section elaborates on the procedure -and its accompanying code implementation in greater detail. - -\subsection{Estimation} - -When economists say that they are performing ``structural estimation'' -of a model like this, they mean that they have devised a -formal procedure for searching for values for the parameters $\beth$ -and $\rho$ at which some measure of the model's outcome (like -``median wealth by age'') is as close as possible to an empirical measure -of the same thing. Here, we choose to match the median of the -wealth to permanent income ratio across 7 age groups, from age $26-30$ -up to $56-60$.\footnote{\cite{cagettiWprofiles} - matches wealth levels rather than wealth to income ratios. We - believe it is more appropriate to match ratios both because the - ratios are the state variable in the theory and because empirical - moments for ratios of wealth to income are not influenced by the - method used to remove the effects of inflation and productivity - growth.} The choice of matching the medians rather the means is -motivated by the fact that the wealth distribution is much more -concentrated at the top than the model is capable of explaining using a single -set of parameter values. This means that in practice one must pick -some portion of the population who one wants to match well; since the -model has little hope of capturing the behavior of Bill Gates, but -might conceivably match the behavior of Homer Simpson, we choose to -match medians rather than means. - -As explained in section \ref{sec:normalization}, it is convenient to work with the normalized version of the model which can be written in Bellman form as: - \begin{equation*}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{{c}_{t}}~~~ \uFunc(c_{t})+\beth\Alive_{t+1}\hat{\beta}_{t+1} - \Ex_{t}[(\permShk_{t+1}G_{t+1})^{1-\rho}v_{t+1}(m_{t+1})] \\ - & \text{s.t.} \nonumber \\ - a_{t} & = m_{t}-c_{t} \nonumber - \\ m_{t+1} & = a_{t}\underbrace{\left(\frac{R}{\permShk_{t+1}G_{t+1}}\right)}_{\equiv \mathcal{R}_{t+1}}+ ~\TranShkEmp_{t+1} - \end{aligned}\end{gathered}\end{equation*} -with the first order condition: - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \beth\Alive_{t+1}\hat{\beta}_{t+1}R \Ex_{t}\left[\uFunc^{c}\left(\permShk_{t+1}G_{t+1}\cFunc_{t+1}\left(a_{t}\mathcal{R}_{t+1}+\TranShkEmp_{t+1}\right)\right)\right]\label{eq:FOCLifeCycle} - . - \end{aligned}\end{gathered}\end{equation} - -The first substantive {step} in this estimation procedure is -to solve for the consumption functions at each age. We need to -discretize the shock distribution and solve for the policy -functions by backward induction using equation (\ref{eq:FOCLifeCycle}) -following the procedure in sections \ref{sec:solving-the-next} and -`Recursion.' The latter routine -is slightly complicated by the fact that we are considering a -life-cycle model and therefore the growth rate of permanent income, -the probability of death, the time-varying discount factor and the -distribution of shocks will be different across the years. We thus -must ensure that at each backward iteration the right parameter -values are used. - -Correspondingly, the first part of the \texttt{StructEstimation.py} file begins by defining the agent type by inheriting from the baseline agent type \texttt{IndShockConsumerType}, with the modification to include time-varying discount factors. Next, an instance of this ``life-cycle'' consumer is created for the estimation procedure. The number of periods for the life cycle of a given agent is set and, following Cagetti, ~\citeyearpar{cagettiWprofiles}, we initialize the wealth to income ratio of agents at age $25$ by randomly assigning the equal probability values to $0.17$, $0.50$ and $0.83$. In particular, we consider a population of agents at age 25 and follow their consumption and wealth accumulation dynamics as they reach the age of $60$, using the appropriate age-specific consumption functions and the age-varying parameters. The simulated medians are obtained by taking the medians of the wealth to income ratio of the $7$ age groups. - -To complete the creation of the consumer type needed for the simulation, a history of shocks is drawn for each agent across all periods by invoking the \texttt{make\_shock\_history} function. This involves discretizing the shock distribution for as many points as the number of agents we want to simulate and then randomly permuting this shock vector as many times as we need to simulate the model for. In this way, we obtain a time varying shock for each agent. This is much more time efficient than drawing at each time from the shock distribution a shock for each agent, and also ensures a stable distribution of shocks across the simulation periods even for a small number of agents. (Similarly, in order to speed up the process, at each backward iteration we compute the consumption function and other variables as a vector at once.) - -With the age-varying consumption functions derived from the life-cycle agent, we can proceed to generate simulated data and compute the corresponding medians. Estimating the model involves comparing these simulated medians with empirical medians, measuring the model's success by calculating the difference between the two. However, before performing the necessary steps of solving and simulating the model to generate simulated moments, it's important to note a difficulty in producing the target moments using the available data. - -Specifically, defining $\xi$ as the set of parameters -to be estimated (in the current case $\xi =\{\rho ,\beth\}$), we could search for -the parameter values which solve - \begin{equation} - \begin{gathered} - \begin{aligned} - \min_{\xi} \sum_{\tau=1}^{7} |\Shr^{\tau} -\mathbf{s}^{\tau}(\xi)| \label{eq:naivePowell} - \end{aligned} - \end{gathered} - \end{equation} -where $\Shr^{\tau }$ and $\mathbf{s}^{\tau}$ are respectively the empirical -and simulated medians of the wealth to permanent income ratio for age group $\tau$. -A drawback of proceeding in this way is that it treats the empirically -estimated medians as though they reflected perfect measurements of the -truth. Imagine, however, that one of the age groups happened to have -(in the consumer survey) four times as many data observations as -another age group; then we would expect the median to be more -precisely estimated for the age group with more observations; yet -\eqref{eq:naivePowell} assigns equal importance to a deviation between -the model and the data for all age groups. - -We can get around this problem (and a variety of others) by instead minimizing a slightly more complex object: - \begin{equation} - \min_{\xi}\sum_{i}^{N}\weight _{i}\left|\Shr_{i}^{\tau }-\mathbf{s}^{\tau}(\xi )\right|\label{eq:StructEstim} - \end{equation} -where $\weight_{i}$ is the weight of household $i$ in the entire -population,\footnote{The Survey of Consumer Finances includes many - more high-wealth households than exist in the population as a whole; - therefore if one wants to produce population-representative - statistics, one must be careful to weight each observation by the - factor that reflects its ``true'' weight in the population.} and -$\Shr_{i}^{\tau }$ is the empirical wealth to permanent income -ratio of household $i$ whose head belongs to age group -$\tau$. $\weight _{i}$ is needed because unequal weight is assigned to -each observation in the Survey of Consumer Finances (SCF). The -absolute value is used since the formula is based on the fact that the -median is the value that minimizes the sum of the absolute deviations -from itself. - -% In the absence of observation specific weights, equation (\ref{eq:MinStructEstim}) can be simplified to require the minimization of the distance between the empirical and simulated medians. - -With this in mind, we turn our attention to the computation -of the weighted median wealth target moments for each age cohort -using this data from the 2004 Survery of Consumer Finances on household -wealth. The objects necessary to accomplish this task are \texttt{weighted\_median} and -\texttt{get\_targeted\_moments}. The actual data are taken from several waves of the SCF and the medians -and means for each age category are plotted in figure \ref{fig:MeanMedianSCF}. -More details on the SCF data are included in appendix \ref{app:scf-data}. - -\hypertarget{PlotMeanMedianSCFcollegeGrads}{} -\begin{figure} - % \includegraphics[width=6in]{./Figures/PlotMeanMedianSCF}} % weird mean value - \includegraphics[width=6in]{./Figures/PlotMeanMedianSCFcollegeGrads} - \caption{Wealth to Permanent Income Ratios from SCF (means (dashed) and medians (solid))} - \label{fig:MeanMedianSCF} -\end{figure} - -We now turn our attention to the the two key functions in this section of the code file. The first, \texttt{simulate\_moments}, executes the solving (\texttt{solve}) and simulation (\texttt{simulation}) steps for the defined life-cycle agent. Subsequently, the function uses the agents' tracked levels of wealth based on their optimal consumption behavior to compute and store the simulated median wealth to income ratio for each age cohort. The second function, \texttt{smmObjectiveFxn}, calls the \texttt{simulate\_moments} function to create the objective function described in (\ref{eq:StructEstim}), which is necessary to perform the SMM estimation. - - -% \begin{equation}\begin{gathered}\begin{aligned} -% \lefteqn{ \texttt{GapEmpiricalSimulatedMedians$[\CRRA,\beth]$:=}} \nonumber \\ -% &[&\texttt{ConstructcFuncLife$[\CRRA,\beth]$;}\nonumber\\ -% &\texttt{Simulate;}\nonumber\\ -% &\sum\limits_{i}^{N}\weight _{i}\left|\Shr_{i}^{\tau }-\mathbf{s}^{\tau}(\xi )\right| \nonumber\\ -% &];&\nonumber -% \end{aligned}\end{gathered}\end{equation} - -Thus, for a given pair of the parameters to be estimated, the single -call to the function \texttt{smmObjectiveFxn} executes the following: -\begin{enumerate} -\item solves for the consumption functions for the life-cycle agent -\item simulates the data and computes the simulated medians -\item returns the value of equation (\ref{eq:StructEstim}) -\end{enumerate} - -We delegate the task of finding the coefficients that minimize the \texttt{smmObjectiveFxn} function to the \texttt{minimize\_nelder\_mead} function, which is defined elsewhere and called in the second part of this file. This task can be quite slow and rather problematic if the \texttt{smmObjectiveFxn} function has very flat regions or sharp features. It is thus wise to verify the accuracy of the solution, for example by experimenting with a variety of alternative starting values for the parameter search. - -The final object defined in this first part of the \texttt{StructEstimation.py} -file is \texttt{calculateStandardErrorsByBootstrap}. As the name suggsts, the -purpose of this function is to compute the standard errors by bootstrap.\footnote{For a - treatment of the advantages of the bootstrap see - Horowitz~\citeyearpar{horowitzBootstrap}} This involves: -\begin{enumerate} -\item drawing new shocks for the simulation -\item drawing a random sample (with replacement) of actual data from the SCF -\item obtaining new estimates for $\rho$ and $\beth$ -\end{enumerate} -We repeat the above procedure several times (\texttt{Bootstrap}) and -take the standard deviation for each of the estimated parameters across the various bootstrap iterations. - -\subsubsection{An Aside to Computing Sensitivity Measures}\label{subsubsec:sensmeas} - - -A common drawback in commonly used structural estimation procedures is a lack of transparency in its estimates. As \cite{andrews2017measuring} notes, a researcher employing such structural empirical methods may be interested in how alternative assumptions (such as misspecification or measurement bias in the data) would ``change the moments of the data that the estimator uses as inputs, and how changes in these moments affect the estimates.'' The authors provide a measure of sensitivity for given estimator that makes it easy to map the effects of different assumptions on the moments into predictable bias in the estimates for non-linear models. - -In the language of \cite{andrews2017measuring}, section \ref{sec:structural-estimation} is aimed at providing an estimator $\xi =\{\rho ,\beth\}$ that has some true value $\xi_0 $ by assumption. Under the assumption $a_0$ of the researcher, the empirical targets computed from the SCF is measured accurately. These moments of the data are precisely what determine our estimate $\hat{\xi}$, which minimizes (\ref{eq:StructEstim}). Under alternative assumptions $a$, such that a given cohort is mismeasured in the survey, a different estimate is computed. Using the plug-in estimate provided by the authors, we can see quantitatively how our estimate changes under these alternative assumptions $a$ which correspond to mismeasurement in the median wealth to income ratio for a given age cohort. - -\subsection{Results} -The second part of the file \texttt{StructEstimation.py} -defines a function \texttt{main} which produces our $\rho$ and -$\beth$ estimates with standard errors using 10,000 simulated -agents by setting the positional arguments \texttt{estimate\_model} and -\texttt{compute\_standard\_errors} to true.\footnote{The procedure is: First we calculate the $\rho$ and - $\beth$ estimates as the minimizer of equation - (\ref{eq:StructEstim}) using the actual SCF data. Then, we apply the - \texttt{Bootstrap} function several times to obtain the standard - error of our estimates.} Results are reported in Table -\ref{tab:EstResults}.\footnote{Differently from Cagetti - ~\citeyearpar{cagettiWprofiles} who estimates a different set of - parameters for college graduates, high school graduates and high - school dropouts graduates, we perform the structural estimation on - the full population.} - - - \begin{table}[h] - \caption{Estimation Results}\label{tab:EstResults} - \center - \begin{tabular}{cc} - \hline - $\rho $ & $\beth$\\ - \hline - $3.69$ & $0.88$\\ - $(0.047)$ & $(0.002)$\\ - \hline - \end{tabular} - \end{table} - -The literature on consumption and saving behavior over the lifecycle in the presenece of labor income uncertainty\footnote{For example, see \cite{gpLifecycle} for an exposition of this.} warns us to be careful in disentangling the effect of time preference and risk aversion when describing the optimal behavior of households in this setting. Since the precautionary saving motive dominates in the early stages of life, the coefficient of relative risk aversion (as well as expected labor income growth) has a larger effect on optimal consumption and saving behavior through their magnitude relative to the interest rate. Over time, life-cycle considerations (such as saving for retirement) become more important and the time preference factor plays a larger role in determining optimal behavior for this cohort. - -Using the positional argument \texttt{compute\_sensitivity}, Figure \ref{fig:PlotSensitivityMeasure} provides a plot of the plug-in estimate of the sensitivity measure described in \ref{subsubsec:sensmeas}. As you can see from the figure the inverse relationship between $\rho$ and $\beth$ over the life-cycle is retained by the sensitivity measure. Specifically, under the alternative assumption that \textit{a particular cohort is mismeasured in the SCF dataset}, we see that the y-axis suggests that our estimate of $\rho$ and $\beth$ change in a predictable way. - -Suppose that there are not enough observations of the oldest cohort of households in the sample. Suppose further that the researcher predicts that adding more observations of these households to correct this mismeasurement would correspond to a higher median wealth to income ratio for this cohort. In this case, our estimate of the time preference factor should increase: the behavior of these older households is driven by their time preference, so a higher value of $\beth$ is required to match the affected wealth to income targets under this alternative assumption. Since risk aversion is less important in explaining the behavior of this cohort, a lower value of $\rho$ is required to match the affected empirical moments. - -To recap, the sensitivity measure not only matches our intuition about the inverse relationship between $\rho$ and $\beth$ over the life-cycle, but provides a quantitative estimate of what would happen to our estimates of these parameters under the alternative assumption that the data is mismeasured in some way. - -\hypertarget{PlotSensitivityMeasure}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/Sensitivity.pdf} - \caption{Sensitivty of Estimates $\{\rho,\beth\}$ regarding Alternative Mismeasurement Assumptions.} - \label{fig:PlotSensitivityMeasure} -\end{figure} - -By setting the positional argument \texttt{make\_contour\_plot} to true, Figure \ref{fig:PlotContourMedianStrEst} shows the contour plot of the \texttt{smmObjectiveFxn} function and the parameter estimates. The contour plot shows equally spaced isoquants of the \texttt{smmObjectiveFxn} function, i.e.\ the pairs of $\rho$ and $\beth$ which lead to the same deviations between simulated and empirical medians (equivalent values of equation (\ref{eq:StructEstim})). Interestingly, there is a large rather flat region; or, more formally speaking, there exists a broad set of parameter pairs which leads to similar simulated wealth to income ratios. Intuitively, the flatter and larger is this region, the harder it is for the structural estimation procedure to precisely identify the parameters. - - -\hypertarget{PlotContourMedianStrEst}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/SMMcontour.pdf} - \caption{Contour Plot (larger values are shown lighter) with $\{\rho,\beth\}$ Estimates (red dot).} - \label{fig:PlotContourMedianStrEst} -\end{figure} - - - -\clearpage\vfill\eject - -\centerline{\LARGE Appendices}\vspace{0.2in} - -\appendix - - -\hypertarget{scf-data}{} -\section{SCF Data}\label{app:scf-data} - -Data used in the estimation is constructed using the SCF 1992, 1995, 1998, 2001 and 2004 waves. The definition of wealth is net worth including housing wealth, but excluding pensions and social securities. The data set contains only households whose heads are aged 26-60 and excludes singles, following Cagetti~\citeyearpar{cagettiWprofiles}.\footnote{Cagetti~\citeyearpar{cagettiWprofiles}\ argues that younger households should be dropped since educational choice is not modeled. Also, he drops singles, since they include a large number of single mothers whose saving behavior is influenced by welfare.} Furthermore, the data set contains only households whose heads are college graduates. The total sample size is 4,774. - -In the waves between 1995 and 2004 of the SCF, levels of \textit{normal} income are reported. The question in the questionnaire is "About what would your income have been if it had been a normal year?" We consider the level of normal income as corresponding to the model's theoretical object $P$, permanent noncapital income. Levels of normal income are not reported in the 1992 wave. Instead, in this wave there is a variable which reports whether the level of income is normal or not. Regarding the 1992 wave, only observations which report that the level of income is normal are used, and the levels of income of remaining observations in the 1992 wave are interpreted as the levels of permanent income. - -Normal income levels in the SCF are before-tax figures. These before-tax permanent income figures must be rescaled so that the median of the rescaled permanent income of each age group matches the median of each age group's income which is assumed in the simulation. This rescaled permanent income is interpreted as after-tax permanent income. Rescaling is crucial since in the estimation empirical profiles are matched with simulated ones which are generated using after-tax permanent income (remember the income process assumed in the main text). Wealth / permanent income ratio is computed by dividing the level of wealth by the level of (after-tax) permanent income, and this ratio is used for the estimation.\footnote{Please refer to the archive code for details of how these after-tax measures of $P$ are constructed.} - - -\vfill\clearpage - -% Allows two (optional) supplements to hard-wired \texname.bib bibfile: -% economics.bib is a default bibfile that supplies anything missing elsewhere -% Add-Refs.bib is an override bibfile that supplants anything in \texfile.bib or economics.bib -\provideboolean{AddRefsExists} -\provideboolean{economicsExists} -\provideboolean{BothExist} -\provideboolean{NeitherExists} -\setboolean{BothExist}{true} -\setboolean{NeitherExists}{true} - -\IfFileExists{\econtexRoot/\texname-Add-Refs.bib}{ - % then - \typeout{References in Add-Refs.bib will take precedence over those elsewhere} - \setboolean{AddRefsExists}{true} - \setboolean{NeitherExists}{false} % Default is true -}{ - % else - \setboolean{AddRefsExists}{false} % No added refs exist so defaults will be used - \setboolean{BothExist}{false} % Default is that Add-Refs and economics.bib both exist -} - -% Deal with case where economics.bib is found by kpsewhich -\IfFileExists{/usr/local/texlive/texmf-local/bibtex/bib/economics.bib}{ - % then - \typeout{References in default global economics.bib will be used for items not found elsewhere} - \setboolean{economicsExists}{true} - \setboolean{NeitherExists}{false} -}{ - % else - \typeout{Found no global database file} - \setboolean{economicsExists}{false} - \setboolean{BothExist}{false} -} - -\ifthenelse{\boolean{showPageHead}}{ %then - \clearpairofpagestyles % No header for references pages - }{} % No head has been set to clear - -\ifthenelse{\boolean{BothExist}}{ - % then use both - \typeout{bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname,economics}} - \bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname,economics} - % else both do not exist -}{ % maybe neither does? - \ifthenelse{\boolean{NeitherExists}}{ - \typeout{bibliography{\texname}} - \bibliography{\texname}}{ - % no -- at least one exists - \ifthenelse{\boolean{AddRefsExists}}{% yes - \typeout{\bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname}} - \bibliography{\econtexRoot/\texname-Add-Refs,\econtexRoot/\texname}} - {% else \texname-Add-Refs does not exist - \typeout{\bibliography{\econtexRoot/\texname,economics}} - \bibliography{ \econtexRoot/\texname,economics}} - } % end of picking the one that exists -} % end of testing whether neither exists - - -\trp{ - \pagebreak - \hypertarget{Appendices}{} % Allows link to [url-of-paper]#Appendices - \ifthenelse{\boolean{Web}}{}{% Web version has no page headers - \chead[Appendices]{Appendices} % but PDF version does - \appendixpage % Reset formatting for appendices - } - \appendix - \addcontentsline{toc}{section}{Appendices} % Say "Appendices" - - \subfile{TRP_aInU} -}{} - - -\end{document}\endinput % \endinput prevents any processing of subsequent stuff - -% Local Variables: -% TeX-master-file: t -% eval: (setq TeX-command-list (assq-delete-all (car (assoc "BibTeX" TeX-command-list)) TeX-command-list)) -% eval: (setq TeX-command-list (assq-delete-all (car (assoc "Biber" TeX-command-list)) TeX-command-list)) -% eval: (setq TeX-command-list (remove '("BibTeX" "%(bibtex) %s" TeX-run-BibTeX nil t :help "Run BibTeX") TeX-command-list)) -% eval: (setq TeX-command-list (remove '("BibTeX" "bibtex %s" TeX-run-BibTeX nil (plain-tex-mode latex-mode doctex-mode ams-tex-mode texinfo-mode context-mode) :help "Run BibTeX") TeX-command-list)) -% eval: (setq TeX-command-list (remove '("BibTeX" "bibtex %s" TeX-run-BibTeX nil t :help "Run BibTeX") TeX-command-list)) -% eval: (add-to-list 'TeX-command-list '("BibTeX" "bibtex %s" TeX-run-BibTeX nil t :help "Run BibTeX") t) -% eval: (add-to-list 'TeX-command-list '("BibTeX" "bibtex %s" TeX-run-BibTeX nil (plain-tex-mode latex-mode doctex-mode ams-tex-mode texinfo-mode context-mode) :help "Run BibTeX") t) -% TeX-PDF-mode: t -% TeX-file-line-error: t -% TeX-debug-warnings: t -% LaTeX-command-style: (("" "pdflatex -output-format=PDF %(file-line-error) %(extraopts) %S%(PDFout)")) -% TeX-source-correlate-mode: t -% TeX-parse-self: t -% TeX-parse-all-errors: t -% eval: (cond ((string-equal system-type "darwin") (progn (setq TeX-view-program-list '(("Skim" "/Applications/Skim.app/Contents/SharedSupport/displayline -b %n %o %b")))))) -% eval: (cond ((string-equal system-type "gnu/linux") (progn (setq TeX-view-program-list '(("Evince" "evince --page-index=%(outpage) %o")))))) -% eval: (cond ((string-equal system-type "gnu/linux") (progn (setq TeX-view-program-selection '((output-pdf "Evince")))))) -% eval: (add-hook 'LaTeX-mode-hook 'turn-on-reftex) -% eval: (setq reftex-plug-into-AUCTeX t) -% coding: utf-8 -% eval: (setq TeX-fold-macro-spec-list t) -% eval: (setq global-prettify-symbols-mode t) -% eval: (add-to-list 'TeX-fold-macro-spec-list '("[f]" ("figure"))) -% eval: (add-to-list 'TeX-fold-macro-spec-list '("[t]" ("table"))) -% eval: (add-to-list 'TeX-fold-env-spec-list '("[comment]" ("comment"))) -% eval: (add-to-list 'TeX-fold-math-spec-list '("[eq]" ("equation"))) -% eval: (add-to-list 'TeX-fold-math-spec-list '("[inline]" ("\\(" "\\)"))) -% eval: (setq prettify-symbols-unprettify-at-point 'right-edge) -% eval: (TeX-fold-buffer) -% End: - diff --git a/docs/SolvingMicroDSOPs-options-clean.tex b/docs/SolvingMicroDSOPs-options-clean.tex deleted file mode 100644 index 2ed0019b2..000000000 --- a/docs/SolvingMicroDSOPs-options-clean.tex +++ /dev/null @@ -1,63 +0,0 @@ -% Controls for which of various variant versions to create - -\provideboolean{ctwVersion}\setboolean{ctwVersion}{false}\newcommand{\ctw}{\ifthenelse{\boolean{ctwVersion}}} % {cctw} -\provideboolean{trpVersion}\setboolean{trpVersion}{false}\newcommand{\trp}{\ifthenelse{\boolean{trpVersion}}} % {trp} -% \setboolean{trpVersion}{true} % {trp} -\setboolean{trpVersion}{false} % {trp} - -% Draft mode puts \labels of figs, tables, eqns in margin -\provideboolean{draftmode}\setboolean{draftmode}{true} -% \setboolean{draftmode}{false} -\newcommand{\Draft}{\ifthenelse{\boolean{draftmode}}} -\Draft{\usepackage{showlabels} - \renewcommand{\showlabelsetlabel}[1]{\tiny #1} -}{} - -% Include or exclude Method of Moderation material -\provideboolean{MoMVersion}\setboolean{MoMVersion}{true} -%\setboolean{MoMVersion}{false} -\newcommand{\MoM}{\ifthenelse{\boolean{MoMVersion}}} - -% Get extra style stuff for cctwMoM -\MoM{ % {cctw} - \usepackage{\LaTeXInputs/cctwMoM} % {cctw} -}{} % {cctw} - -% Versions with or without permanent shocks -% Seems to be defunct - remove -\provideboolean{PermShkVersion}\setboolean{PermShkVersion}{true} -\setboolean{PermShkVersion}{false} -\newcommand{\PermShkOn}{\ifthenelse{\boolean{PermShkVersion}}} - -% MPCMatch version does Hermite polynomials for the interpolation -% that match both the slope and the intercept at the gridpoints -\provideboolean{MPCMatchVersion}\setboolean{MPCMatchVersion}{true} -\newcommand{\MPCMatch}{\ifthenelse{\boolean{MPCMatchVersion}}} - -% margin notes -- to be deleted -\provideboolean{MyNotes}\setboolean{MyNotes}{true} -\setboolean{MyNotes}{false} - -% Show things that need fixing -\provideboolean{ToFix}\setboolean{ToFix}{true} -% \setboolean{ToFix}{false} -\newcommand{\Fix}{\ifthenelse{\boolean{ToFix}}} - -% Show or hide the time subscripts for -\provideboolean{hidetime}\setboolean{hidetime}{true} -% \setboolean{hidetime}{false} -\newcommand{\timehide}{\ifthenelse{\boolean{hidetime}}} - -\provideboolean{verbon}\setboolean{verbon}{true} -\newcommand{\onverb}{\ifthenelse{\boolean{verbon}}} - -\setboolean{showPageHead}{true} -% \econtexSetup sets boolean variable 'Web' to true if making html not pdf -\ifthenelse{\boolean{Web}}{ % then - \setboolean{showPageHead}{false} % no pages, so no page head, on web -}{ % else not for web - \usepackage{scrlayer-scrpage} % Package for page headers if PDF - \automark[section]{section} - \usepackage{caption} % allow suppression of appendix figures in NoAppendix PDF -} - diff --git a/docs/SolvingMicroDSOPs.db b/docs/SolvingMicroDSOPs.db deleted file mode 100644 index b764dde1f..000000000 Binary files a/docs/SolvingMicroDSOPs.db and /dev/null differ diff --git a/docs/SolvingMicroDSOPs.tmp b/docs/SolvingMicroDSOPs.tmp deleted file mode 100644 index 41b58e2cf..000000000 --- a/docs/SolvingMicroDSOPs.tmp +++ /dev/null @@ -1,2 +0,0 @@ - -/* css.sty */ \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs28.html b/docs/SolvingMicroDSOPs28.html deleted file mode 100644 index 5cd9cac0f..000000000 --- a/docs/SolvingMicroDSOPs28.html +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - -
-

27CagettiΒ (2003)Β argues that younger households should be dropped since educational choice is not - modeled. Also, he drops singles, since they include a large number of single mothers whose saving behavior is - influenced by welfare.

- - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs29.html b/docs/SolvingMicroDSOPs29.html deleted file mode 100644 index d2e3adcc8..000000000 --- a/docs/SolvingMicroDSOPs29.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - - -
-

28Please refer to the archive code for details of how these after-tax measures of P  are constructed.

- - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs590x.svg b/docs/SolvingMicroDSOPs590x.svg deleted file mode 100644 index 8e1526d5d..000000000 --- a/docs/SolvingMicroDSOPs590x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs591x.svg b/docs/SolvingMicroDSOPs591x.svg deleted file mode 100644 index f6e4e0ff9..000000000 --- a/docs/SolvingMicroDSOPs591x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs592x.svg b/docs/SolvingMicroDSOPs592x.svg deleted file mode 100644 index 063d4e9be..000000000 --- a/docs/SolvingMicroDSOPs592x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs593x.svg b/docs/SolvingMicroDSOPs593x.svg deleted file mode 100644 index 0c9bbfb2f..000000000 --- a/docs/SolvingMicroDSOPs593x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs594x.svg b/docs/SolvingMicroDSOPs594x.svg deleted file mode 100644 index c8cd1dbc4..000000000 --- a/docs/SolvingMicroDSOPs594x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs595x.svg b/docs/SolvingMicroDSOPs595x.svg deleted file mode 100644 index 8c18abf43..000000000 --- a/docs/SolvingMicroDSOPs595x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs596x.svg b/docs/SolvingMicroDSOPs596x.svg deleted file mode 100644 index 9be3bbd60..000000000 --- a/docs/SolvingMicroDSOPs596x.svg +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs597x.svg b/docs/SolvingMicroDSOPs597x.svg deleted file mode 100644 index 7240b525c..000000000 --- a/docs/SolvingMicroDSOPs597x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs598x.svg b/docs/SolvingMicroDSOPs598x.svg deleted file mode 100644 index 18ccee702..000000000 --- a/docs/SolvingMicroDSOPs598x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs599x.svg b/docs/SolvingMicroDSOPs599x.svg deleted file mode 100644 index 8990dcc02..000000000 --- a/docs/SolvingMicroDSOPs599x.svg +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs600x.svg b/docs/SolvingMicroDSOPs600x.svg deleted file mode 100644 index 84edbf45b..000000000 --- a/docs/SolvingMicroDSOPs600x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/SolvingMicroDSOPs601x.svg b/docs/SolvingMicroDSOPs601x.svg deleted file mode 100644 index 6476b326f..000000000 --- a/docs/SolvingMicroDSOPs601x.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/docs/apndx_scf-data-input-clean.tex b/docs/apndx_scf-data-input-clean.tex deleted file mode 100644 index 7c16ccd22..000000000 --- a/docs/apndx_scf-data-input-clean.tex +++ /dev/null @@ -1,10 +0,0 @@ - -\hypertarget{scf-data}{} -\section{SCF Data}\label{app:scf-data} - -Data used in the estimation is constructed using the SCF 1992, 1995, 1998, 2001 and 2004 waves. The definition of wealth is net worth including housing wealth, but excluding pensions and social securities. The data set contains only households whose heads are aged 26-60 and excludes singles, following Cagetti~\citeyearpar{cagettiWprofiles}.\footnote{Cagetti~\citeyearpar{cagettiWprofiles}\ argues that younger households should be dropped since educational choice is not modeled. Also, he drops singles, since they include a large number of single mothers whose saving behavior is influenced by welfare.} Furthermore, the data set contains only households whose heads are college graduates. The total sample size is 4,774. - -In the waves between 1995 and 2004 of the SCF, levels of \textit{normal} income are reported. The question in the questionnaire is "About what would your income have been if it had been a normal year?" We consider the level of normal income as corresponding to the model's theoretical object $P$, permanent noncapital income. Levels of normal income are not reported in the 1992 wave. Instead, in this wave there is a variable which reports whether the level of income is normal or not. Regarding the 1992 wave, only observations which report that the level of income is normal are used, and the levels of income of remaining observations in the 1992 wave are interpreted as the levels of permanent income. - -Normal income levels in the SCF are before-tax figures. These before-tax permanent income figures must be rescaled so that the median of the rescaled permanent income of each age group matches the median of each age group's income which is assumed in the simulation. This rescaled permanent income is interpreted as after-tax permanent income. Rescaling is crucial since in the estimation empirical profiles are matched with simulated ones which are generated using after-tax permanent income (remember the income process assumed in the main text). Wealth / permanent income ratio is computed by dividing the level of wealth by the level of (after-tax) permanent income, and this ratio is used for the estimation.\footnote{Please refer to the archive code for details of how these after-tax measures of $P$ are constructed.} - diff --git a/docs/apndx_scf-data-input.tex b/docs/apndx_scf-data-input.tex deleted file mode 100644 index 7c16ccd22..000000000 --- a/docs/apndx_scf-data-input.tex +++ /dev/null @@ -1,10 +0,0 @@ - -\hypertarget{scf-data}{} -\section{SCF Data}\label{app:scf-data} - -Data used in the estimation is constructed using the SCF 1992, 1995, 1998, 2001 and 2004 waves. The definition of wealth is net worth including housing wealth, but excluding pensions and social securities. The data set contains only households whose heads are aged 26-60 and excludes singles, following Cagetti~\citeyearpar{cagettiWprofiles}.\footnote{Cagetti~\citeyearpar{cagettiWprofiles}\ argues that younger households should be dropped since educational choice is not modeled. Also, he drops singles, since they include a large number of single mothers whose saving behavior is influenced by welfare.} Furthermore, the data set contains only households whose heads are college graduates. The total sample size is 4,774. - -In the waves between 1995 and 2004 of the SCF, levels of \textit{normal} income are reported. The question in the questionnaire is "About what would your income have been if it had been a normal year?" We consider the level of normal income as corresponding to the model's theoretical object $P$, permanent noncapital income. Levels of normal income are not reported in the 1992 wave. Instead, in this wave there is a variable which reports whether the level of income is normal or not. Regarding the 1992 wave, only observations which report that the level of income is normal are used, and the levels of income of remaining observations in the 1992 wave are interpreted as the levels of permanent income. - -Normal income levels in the SCF are before-tax figures. These before-tax permanent income figures must be rescaled so that the median of the rescaled permanent income of each age group matches the median of each age group's income which is assumed in the simulation. This rescaled permanent income is interpreted as after-tax permanent income. Rescaling is crucial since in the estimation empirical profiles are matched with simulated ones which are generated using after-tax permanent income (remember the income process assumed in the main text). Wealth / permanent income ratio is computed by dividing the level of wealth by the level of (after-tax) permanent income, and this ratio is used for the estimation.\footnote{Please refer to the archive code for details of how these after-tax measures of $P$ are constructed.} - diff --git a/docs/de-macro-python3.py b/docs/de-macro-python3.py deleted file mode 100755 index e81f7ecad..000000000 --- a/docs/de-macro-python3.py +++ /dev/null @@ -1,1109 +0,0 @@ -#!/usr/bin/env python3 -# coding:utf-8 - -# de-macro-python3.py - -# MIT License - -# Copyright (c) 2017 Ash Suzuki - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# PURPOSE - -# This program can eliminate most private macros from a LaTeX file. -# Applications: -# - your publisher has difficulty dealing with many private macros -# - you cooperate with colleagues who do not understand your macros -# - preprocessing before a system like latex2html, which is somewhat -# unpredictable with private macros. - -# DIFFERENCE FROM THE ORIGINAL - -# This program is compatible with Python 3, though the original is not. -# This program is applicable to multibyte characters if encoded with UTF-8. - -# USAGE - -# de-macro-python3 [--defs ] [.tex] [[.tex] ...] - -# Simplest example: de-macro-python3 testament - -# (As you see, the <> is used only in the notation of this documentation, -# you should not type it.) - -# If contains a command \usepackage{-private} -# then the file -private.sty will be read, and its macros will be -# replaced in with their definitions. -# The result is in -clean.tex. - -# Only newcommand, renewcommand, newenvironment, and renewenvironment are -# understood (it does not matter, whether you write new or renew). -# These can be nested but do not be too clever, since I do not -# guarantee the same expansion order as in TeX. - -# FILES - -# .db -# -clean.tex -# -private.sty - -# For speed, a macro database file called .db is created. -# If such a file exists already then it is used. -# If -private.sty is older than .db then it will not -# be used. - -# It is possible to specify another database filename via --defs . -# Then .db will be used. - -# For each , a file -clean.tex will be produced. -# If -clean.tex is newer than .tex then it stays. - -# INPUT COMMAND - -# If a tex file contains a command \input{} or \input -# then .tex is processed recursively, and -clean.tex -# will be inserted into the final output. -# For speed, if -clean.tex is newer than .tex -# then .tex will not be reprocessed. - -# The dependency checking is not sophisticated, so if you rewrite some macros -# then remove all *-clean.tex files! - -import sys, os, re, shelve - -# Utilities - -class No_detail: - strerror = "" - -no_detail = No_detail() - - -class Error(Exception): - # """Base class for exceptions in this module.""" - pass - -class Empty_text_error(Error): - # """Exception raised for errors in the input. - - # Attributes: - # data -- data that was found empty - # message - # """ - - def __init__(self, data, message): - self.data = data - self.message = message - -def warn(error_message, detail = no_detail): - sys.stderr.write(error_message + "\n") - if no_detail != detail: - sys.stderr.write(detail.strerror + "\n") - -def die(error_message, detail = no_detail): - warn(error_message, detail = no_detail) - sys.exit(1) - -def getopt_map(one_letter_opts, long_optlist): - "Turns long options into an option map, using getopt." - import getopt - optlist, args = getopt.getopt(sys.argv[1:], - one_letter_opts, long_optlist) - opt_map = {} - for pair in optlist: opt_map[pair[0]] = pair[1] or 1 - return opt_map, args - -def newer(file1, file2): - - if not os.path.isfile(file1): - return False - - try: - stat_return = os.lstat(file1) - except (OSError, detail): - die("lstat " + file1 + " failed:", detail) - time1 = stat_return.st_mtime - - try: - stat_return = os.lstat(file2) - except (OSError, detail): - die("lstat " + file2 + " failed:", detail) - time2 = stat_return.st_mtime - - return time1 > time2 - -def cut_extension(filename, ext): - # """ - # If filename has extension ext (including the possible dot), - # it will be cut off. - # """ - file = filename - index = filename.rfind(ext) - if 0 <= index and len(file)-len(ext) == index: - file = file[:index] - return file - - -class Stream: - data = None - pos = None - item = None - -def legal(self): - print(f"self.pos: {self.pos}") - print(f"len(self.data): {len(self.data)}") - return 0 <= self.pos and self.pos < len(self.data) - -# def legal(self): -# return 0 <= self.pos and self.pos < len(self.data) - - def uplegal(self): - return self.pos < len(self.data) - - def __init__(self, data_v = None): - self.data = data_v - if self.data: - self.pos = 0 - self.item = self.data[self.pos] - - def next(self): - self.pos += 1 - if self.pos < len(self.data): - self.item = self.data[self.pos] - return self.item - - def reset(self): - if self.data and 0 < len(self.data): - self.pos = 0 - self.item = self.data[0] - return self.item - - -# Basic classes - -blank_re = re.compile(r"\s") -blanked_filename_re = re.compile(r"^\s+(\w*)\s+") -braced_filename_re = re.compile(r"^\s*{\s*(\w*)\s*}") -blank_or_rbrace_re = re.compile(r"[\s}]") -pos_digit_re = re.compile(r"[1-9]") - -def isletter(c, isatletter=False): - if "@" == c: - return isatletter - else: - return c.isalpha() - -class Token: - # """Type 0 means ordinary character, type 1 means escape sequence - # (without the \ ), type 2 means comment. - # """ - simple_ty = 0 - esc_symb_ty = 1 - esc_str_ty = 2 - comment_ty = 3 - - type = simple_ty - val = " " - - def __init__(self, type_v=simple_ty, val_v=" "): - self.type = type_v - self.val = val_v - - def show(self, isatletter=False): - out = "" - if simple_ty == self.type or comment_ty == self.type: - out = self.val - else: - out = "\\" + self.val - return out - - -# Constants - -g_token = Token(0," ") # generic token -simple_ty = g_token.simple_ty -comment_ty = g_token.comment_ty -esc_symb_ty = g_token.esc_symb_ty -esc_str_ty = g_token.esc_str_ty - - - -def detokenize(text): - # """ - # Input is a list of tokens. - # Output is a string. - # """ - out = "" - if 0 == len(text): - return - pos = 0 - out += text[pos].show() - pos += 1 - while pos < len(text): - previtem = text[pos-1] - item = text[pos] - # """Insert a separating space after an escape sequence if it is a - # string and is followed by a letter.""" - if (esc_str_ty == previtem.type - and simple_ty == item.type and isletter(item.val[0], False)): - out += " " - out += item.show() - pos += 1 - return out - -def strip_comments(text): - # """ - # Input is a list of tokens. - # Output is the same list except the comment tokens. - # """ - out = [] - for token in text: - if not comment_ty == token.type: - out.append(token) - return out - -class Group: - # """type 0 means a token, type 1 means contents of a group within {} - # """ - token_ty = 0 - group_ty = 1 - type = token_ty - val = [] # Value is a token list. - - def __init__(self, type_v, val_v): - self.type = type_v - self.val = val_v - - def show(self): - if token_ty == self.type: - return self.val.show() - else: - return "{%s}" % detokenize(self.val) - -# Constants - -g_group = Group(0, []) -token_ty = g_group.token_ty -group_ty = g_group.group_ty - - -def tokenize(in_str): - # """Returns a list of tokens. - # """ - text = [] - isatletter=False - cs = Char_stream(in_str) - cs.reset() - if not cs.legal(): - raise Error("No string to tokenize.") - while cs.uplegal(): - if "%" == cs.item: - comment = cs.scan_comment_token() - text.append(Token(comment_ty, comment)) - elif "\\" != cs.item: - text.append(Token(simple_ty, cs.item)) - cs.next() - else: - cs.next() - name = cs.scan_escape_token(isatletter) - if isletter(name[0], isatletter): - token = Token(esc_str_ty, name) - else: - token = Token(esc_symb_ty, name) - text.append(token) - if "makeatletter" == name: - isatletter=True - elif "makeatother" == name: - isatletter=False - return text - - - -class Command_def: - name = "1" - numargs = 0 - body= "" - - def __init__(self, name_v, numargs_v, body_v): - self.name = name_v - self.numargs = numargs_v - self.body = body_v - - def show(self): - out = "\\newcommand{\\%s}" % (self.name) - if 0 < self.numargs: - out += "[%d]" % self.numargs - out += "{%s}" % detokenize(self.body) - return out - - -class Env_def: - name = "1" - numargs = 0 - begin = "" - end = "" - - def __init__(self, name_v, numargs_v, begin_v, end_v): - self.name = name_v - self.numargs = numargs_v - self.begin = begin_v - self.end = end_v - - def show(self): - out = "\\newenvironment{%s}" % self.name - if 0 < self.numargs: - out += "[%d]" % self.numargs - out += "{%s}" % detokenize(self.begin) - out += "{%s}" % detokenize(self.end) - return out - - -class Command_instance: - name = "1" - args = [] - - def __init__(self, name_v, args_v): - self.name = name_v - self.args = args_v - - def show(self): - out = "\\"+self.name - for arg in self.args: - out += "{%s}" % detokenize(arg) - return out - - -class Env_instance: - name = "1" - args = [] - - def __init__(self, name_v, args_v, body_v): - self.name = name_v - self.args = args_v - self.body = body_v - - def show(self): - out = "\\begin{%s}" % self.name - for arg in self.args: - out += "{%s}" % detokenize(arg) - out += detokenize(self.body) - out += "\\end{%s}" % self.name - return out - -class Char_stream(Stream): - - def scan_escape_token(self, isatletter=False): - # """ - # Starts after the escape sign, assumes that it is scanning a symbol. - # Returns a token-string. - # """ - out = self.item # Continue only if this is a letter. - item = self.next() - if isletter(out, isatletter): - while self.uplegal() and isletter(item, isatletter): - out += item - item = self.next() - return out - - def scan_comment_token(self): - # """ - # Starts at the comment sign %, assumes that it is scanning a comment. - # Returns the whole comment string, - # including the % and all empty space after it. - # """ - comment = "" - while "\n" != self .item: - comment += self.item - self.next() - while self.uplegal() and blank_re.match(self.item): - comment += self.item - self.next() - return comment - - def scan_input_filename(self): - # """We just read an \input token. The next group or word will be - # interpreted as a filename (possibly without .tex). - # Return the filename. - # """ - item = self.item - while self.uplegal() and blank_re.match(self.item): - item = self.next() - if "{" == item: - item = self.next() - file = "" - while self.uplegal() and not blank_or_rbrace_re.match(item): - file += item - item = self.next() - self.next() - return file - - def scan_package_filenames(self): - # """We just read a \usepackage token. The next group will be - # interpreted as a list of filenames (without .sty) separated by commas. - # Return the list. - # """ - item = self.item - while self.uplegal() and blank_re.match(item): - item = self.next() - file = "" - if not "{" == item: - raise Error("\\usepackage not followed by brace.") - item = self.next() - while self.uplegal() and not blank_or_rbrace_re.match(item): - file += item - item = self.next() - self.next() - return file.split(",") - - -class Tex_stream(Stream): - - defs = ({}, {}) - defs_db = "x" - defs_db_file = "x.db" - debug = False - - def smart_tokenize(self, in_str, handle_inputs=False): - # """Returns a list of tokens. - # It may interpret and carry out all \input commands. - # """ - self.data = [] - text = self.data - isatletter=False - cs = Char_stream(in_str) - cs.reset() - if not cs.legal(): - raise Error("No string to tokenize.") - while cs.uplegal(): - if "%" == cs.item: - comment = cs.scan_comment_token() - text.append(Token(comment_ty, comment)) - elif "\\" != cs.item: - text.append(Token(simple_ty, cs.item)) - cs.next() - else: - cs.next() - name = cs.scan_escape_token(isatletter) - if "input" == name and handle_inputs: - file = cs.scan_input_filename() - to_add = self.process_if_newer(file) - text.extend(to_add) - elif "usepackage" == name: - while cs.uplegal() and blank_re.match(cs.item): - cs.next() - if "[" == cs.item: # private packages have no options - text.extend([Token(esc_str_ty, "usepackage"), - Token(simple_ty, "[")]) - cs.next() - continue - files = cs.scan_package_filenames() - i = 0 - while i < len(files): # process private packages - file = files[i] - p = file.rfind("-private") - if p < 0 or not len(file) - len("-private") == p: - i += 1 - continue - defs_db_file = file+".db" - self.add_defs(file) - del files[i:(i+1)] - if files: # non-private packages left - group_content = ",".join(files) - to_add_str = "\\usepackage{%s}" % (group_content) - to_add = tokenize(to_add_str) - text.extend(to_add) - else: - if isletter(name[0], isatletter): - token = Token(esc_str_ty, name) - else: - token = Token(esc_symb_ty, name) - text.append(token) - if "makeatletter" == name: - isatletter=True - elif "makeatother" == name: - isatletter=False - self.reset() - return self.data - - def smart_detokenize(self): - # """ - # Output is a string. - # If the list contains an \input{file} then the content of file - # file-clean.tex replaces it in the output. - # """ - self.reset() - if not self.legal(): - return "" - out = "" - previtem = None - while self.uplegal(): - item = self.item - # """Insert a separating space after an escape sequence if it is a - # string and is followed by a letter.""" - if (None != previtem and esc_str_ty == previtem.type - and simple_ty == item.type and isletter(item.val[0], False)): - out += " " - previtem = item - if not (esc_str_ty == item.type and "input" == item.val): - out += item.show() - self.next() - else: - self.next() - group = self.scan_group() - file = detokenize(group.val) - clean_file = "%s-clean.tex" % (file) - print("Reading file %s" % (clean_file)) - fp = open(clean_file,"r", encoding="utf-8") - content = fp.read() - fp.close() - out += content - return out - - # Basic tex scanning - - def skip_blank_tokens(self): # we also skip comment tokens. - item = self.item - while (self.uplegal() and - (comment_ty == item.type or - (simple_ty == item.type and blank_re.match(item.val)))): - item = self.next() - return item - - def scan_group(self): - """Returns group. - """ - if not self.legal(): - raise Error("No group to scan.") - item = self.item - if not (simple_ty == item.type and "{" == item.val): - return Group(token_ty, [self.item]) - count = 1 - group = [] - item = self.next() - while count and self.uplegal(): - if simple_ty == item.type: - if "{" == item.val: - count += 1 - elif "}" == item.val: - count -= 1 - if count != 0: - group.append(item) - item = self.next() - return Group(group_ty, group) - - # Command and environment definitions - - def scan_command_name(self): - # """Returns name. - # """ - if not self.legal(): - raise Error("No command name to scan.") - item = self.item - name = "" - if item.type in [esc_symb_ty, esc_str_ty]: - name = item.val - else: - if not "{" == item.val: - raise Error("Command definition misses first {.") - self.next() - item = self.skip_blank_tokens() - if not item.type in [esc_symb_ty, esc_str_ty]: - raise Error("Command definition does not begin with control sequence.") - name = item.val - self.next() - item = self.skip_blank_tokens() - if not "}" == item.val: - raise ("Definition for commmand %s misses first }., %s" % - (name, item.val)) - self.next() - self.skip_blank_tokens() - return name - - def scan_numargs(self, name): - # """ - # name is the name of the command or environment definition being - # scanned. - # Starts on a nonblank token. - # Returns numargs - # where numargs is the number of arguments in a command or environment - # definition, - # """ - if not self.legal(): - raise Error("No numargs to scan.") - item = self.item - numargs = 0 - if not simple_ty == item.type: - raise Error("Illegal command or environment definition: ")+name - if "[" == item.val: - if not 4 < len(self.data): - raise Error("Command or environment definition is illegal: ")+name - item = self.next() - if not simple_ty == item.type: - raise Error("Illegal command or environment definition: ")+name - numargs = item.val - if not pos_digit_re.match(numargs): - raise Error("%s must be argument number after %s") % (numargs, name) - numargs = int(numargs) - self.next() - item = self.skip_blank_tokens() - if not simple_ty == item.type: - raise Error("Illegal command definition: ")+name - if "]" != item.val: - raise Error("Illegal command definition: ")+name - self.next() - self.skip_blank_tokens() - return numargs - - def scan_command_def(self): - # """Scan a command definition. - # Return command_def. - # Assumes that the number of arguments is at most 9. - # """ - if not self.legal(): - raise Error("No command definition to scan.") - item = self.item - if not 2 < len(self.data): - raise Error("Command definition is illegal.") - # newcommand or renewcommand - if not item.type in [esc_symb_ty, esc_str_ty]: - raise Error("Command definition should begin with control sequence: ")+item.val - if item.val not in ["newcommand", "renewcommand"]: - raise Error("Command definition should begin with control sequence.") - self.next() - self.skip_blank_tokens() - - cmd_name = self.scan_command_name() - numargs = self.scan_numargs(cmd_name) - - body_group = self.scan_group() - if group_ty != body_group.type: - raise Error("Command body missing: ")+cmd_name - body_val = strip_comments(body_group.val) - return Command_def(cmd_name, numargs, body_val) - - def scan_env_name(self): - # """Starts on a {. - # Returns name. - # """ - if not self.legal(): - raise Error("No environment name to scan.") - item = self.item - if not "{" == item.val: - raise Error("Env. definition begins with %s, not with {") % (item.val) - self.next() - item = self.skip_blank_tokens() - name = "" - if not simple_ty == item.type: - raise ("1. Env. def. begins with cont. seq. %s, not with env.name." - % (item.val)) - while self.uplegal() and not blank_or_rbrace_re.match(item.val): - name += item.val - item = self.next() - if not simple_ty == item.type: - raise ("2. Env. def. begins with cont. seq. %s, not with env.name." - % (item.val)) - item = self.skip_blank_tokens() - if not "}" == item.val: - raise Error("Command definition does not begin with control sequence.") - self.next() - self.skip_blank_tokens() - return name - - def scan_env_def(self): - # """Scan an environment definition. - # Return env_def - # Assumes that the number of arguments is at most 9. - # """ - if not self.legal(): - raise Error("No environment definition to scan.") - item = self.item - if not 7 < len(self.data): - raise Error("Environment definition is illegal.") - pos = 0 - - if not item.type in [esc_symb_ty, esc_str_ty]: - raise ("Env. definition does not begin with control sequence:"+ - item.val) - if item.val not in ["newenvironment", "renewenvironment"]: - raise Error("Env. definition does not begin with control sequence.") - self.next() - self.skip_blank_tokens() - - env_name = self.scan_env_name() - numargs = self.scan_numargs(env_name) - self.skip_blank_tokens() - - begin_group = self.scan_group() - if group_ty != begin_group.type: - raise Error("Begin body missing: ")+env_name - begin_val = strip_comments(begin_group.val) - - self.skip_blank_tokens() - - end_group = self.scan_group() - if group_ty != end_group.type: - raise Error("End body missing:")+env_name - end_val = strip_comments(end_group.val) - - return Env_def(env_name, numargs, begin_val, end_val) - - def scan_defs(self): - if not self.legal(): - raise Error("No definitions to scan.") - self.reset() - command_defs, env_defs = self.defs - while self.uplegal(): - if (esc_str_ty == self.item.type - and self.item.val in ["newcommand", "renewcommand"]): - command_def = self.scan_command_def() - command_defs[command_def.name] = command_def - elif (esc_str_ty == self.item.type and self.item.val - in ["newenvironment", "renewenvironment"]): - env_def = self.scan_env_def() - env_defs[env_def.name] = env_def - else: - self.next() - - # Instances - - def scan_args(self, command_or_env_def): - # """Scan the arguments of a command or environment. - # Return [args]. - # """ - if not self.legal(): - raise Error("No arguments to scan.") - numargs = command_or_env_def.numargs - name = command_or_env_def.name - - args = [] - for i in range(numargs): - arg = [] - if not (simple_ty == self.item.type and "{" == self.item.val): - arg = [self.item] - self.next() - else: - group = self.scan_group() - arg = group.val - args.append(arg) - return args - - def scan_command(self, command_def): - # """Scan the arguments of a command. - # Return command_instance - # """ - if not self.legal(): - raise Error("No command to scan.") - if not self.item.type in [esc_symb_ty, esc_str_ty]: - raise Error("Command does not begin with control sequence.") - name = self.item.val - self.next() - if 0 < command_def.numargs: - self.skip_blank_tokens() - args = self.scan_args(command_def) - else: - args = [] - return Command_instance(name, args) - - def test_env_boundary(self, item): - # """Check whether an environment begin or end follows. - # Return 1 if \begin, -1 if \end, 0 otherwise. - # """ - d = 0 - if esc_str_ty == item.type: - if "begin"==item.val: - d = 1 - elif "end"==item.val: - d = -1 - return d - - def scan_env_begin(self): - # """Scan an environment name. - # Return env_name. - # """ - if not self.legal(): - raise Error("No environment begin to scan.") - item = self.item - if not (esc_str_ty == item.type and "begin" == item.val): - raise Error("Environment does not begin with begin.") - self.next() - name_group = self.scan_group() - name = detokenize(name_group.val) - return name - - def scan_env_end(self): - # """Scan an environment end. - # Return env_name. - # """ - if not self.legal(): - raise Error("No environment end to scan.") - item = self.item - if not (esc_str_ty == item.type and "end" == item.val): - raise Error("Environment does not end with end.") - self.next() - name_group = self.scan_group() - name = detokenize(name_group.val) - return name - - def scan_env_rest(self, env_def): - # """Scanning starts after \begin{envname}. - # Returns env_instance. - # """ - if not self.legal(): - raise Error("No environment rest to scan.") - count = 1 # We are already within a boundary. - args = self.scan_args(env_def) - body = [] - while count and self.uplegal(): - old_pos = self.pos - d = self.test_env_boundary(self.item) - count += d - if 1 == d: - self.scan_env_begin() - elif -1 == d: - self.scan_env_end() - else: - self.next() - if 0 < count: - body.extend(self.data[old_pos : self.pos]) - return Env_instance(env_def.name, args, body) - - # Definitions - - def restore_defs(self): - if os.path.isfile(self.defs_db_file): - print("Using defs db %s" % (self.defs_db_file)) - db_h = shelve.open(self.defs_db) - self.defs = db_h["defs"] - db_h.close() - - def save_defs(self): - db_h = shelve.open(self.defs_db) - if "defs" in db_h: - del db_h["defs"] - db_h["defs"] = self.defs - db_h.close() - - def add_defs(self, defs_file): - defs_file_compl = defs_file + ".sty" - if not os.path.isfile(defs_file_compl): - raise Error("%s does not exist") % (defs_file_compl) - - defs_db_file = self.defs_db_file - if newer(defs_db_file, defs_file_compl): - print("Using defs db %s for %s" % (defs_db_file, defs_file)) - else: - defs_fp = open(defs_file_compl, "r", encoding="utf-8") - defs_str = defs_fp.read() - defs_fp.close() - ds = Tex_stream() - ds.defs = self.defs - defs_text = ds.smart_tokenize(defs_str) - # changing ds.defs will change self.defs - if self.debug: - defs_seen_file = "%s-seen.sty" % (defs_file) - defs_seen_fp = open(defs_seen_file, "w", encoding="utf-8") - out = detokenize(defs_text) - defs_seen_fp.write(out) - defs_seen_fp.close() - ds.scan_defs() - if self.debug: - out = "" - command_defs, env_defs = self.defs - for def_name in command_defs.keys(): - out += command_defs[def_name].show() + "\n" - for def_name in env_defs.keys(): - out += env_defs[def_name].show() +"\n" - print("Definitions after reading %s:" % (defs_file)) - print(out) - - # Applying definitions, recursively - # (maybe not quite in Knuth order, so avoid tricks!) - - def subst_args(self, body, args): - out = [] - pos = 0 - while pos < len(body): - item = body[pos] - if not (simple_ty == item.type and "#" == item.val): - out.append(item) - pos += 1 - continue - pos += 1 - token = body[pos] - argnum = token.val - if not pos_digit_re.match(argnum): - raise Error("# is not followed by number.") - argnum = int(argnum) - if argnum > len(args): - raise Error("Too large argument number.") - arg = args[argnum-1] - out += arg - pos += 1 - return out - - def apply_command_recur(self, command_instance): - command_defs, env_defs = self.defs - name = command_instance.name - command_def = command_defs[name] - - args = command_instance.args - body = command_def.body - result = self.subst_args(body, args) - try: - result = self.apply_all_recur(result) - except (Empty_text_error, e): - raise Error("apply_all_recur fails on command instance %s: %s, %s") % \ - (command_instance.show(), detokenize(e.data), e.message) - return result - - def apply_env_recur(self, env_instance): - command_defs, env_defs = self.defs - name = env_instance.name - env_def = env_defs[name] - - begin, end = env_def.begin, env_def.end - body, args = env_instance.body, env_instance.args - out = self.subst_args(begin, args) + body + self.subst_args(end, args) - return self.apply_all_recur(out) - - - def apply_all_recur(self, data, report=False): - ts = Tex_stream(data) - ts.defs = self.defs - command_defs, env_defs = self.defs - out = [] - progress_step = 10000 - progress = progress_step - if not ts.legal(): - raise Empty_text_error(data, "No text to process.") - while ts.uplegal(): - if self.pos > progress: - if report: - print(self.pos) - progress += progress_step - if not ts.item.type in [esc_symb_ty, esc_str_ty]: - out.append(ts.item) - ts.next() - continue - if 1 == ts.test_env_boundary(ts.item): - old_pos = ts.pos - env_name = ts.scan_env_begin() - if not env_name in env_defs: - out.extend(ts.data[old_pos : ts.pos]) - continue - else: - env_def = env_defs[env_name] - env_instance = ts.scan_env_rest(env_def) - result = ts.apply_env_recur(env_instance) - out.extend(result) - elif not ts.item.val in command_defs: - out.append(ts.item) - ts.next() - continue - else: - command_def = command_defs[ts.item.val] - command_inst = ts.scan_command(command_def) - result = ts.apply_command_recur(command_inst) - out.extend(result) - return out - - - # Processing files - - def process_file(self, file): - # """Returns the new defs. - # """ - file = cut_extension(file, ".tex") - source_file = "%s.tex" % (file) - print("File %s [" % (source_file)) - source_fp = open(source_file, "r", encoding="utf-8") - text_str = source_fp.read() - source_fp.close() - - self.smart_tokenize(text_str, handle_inputs=True) - if not self.data: - raise Error("Empty tokenization result.") - self.reset() - - if self.debug: - source_seen_fname = "%s-seen.tex" % (file) - source_seen_fp = open(source_seen_fname, "w", encoding="utf-8") - source_seen_fp.write(detokenize(self.data)) - source_seen_fp.close() - - self.data = self.apply_all_recur(self.data, report=True) - - result_fname = "%s-clean.tex" % (file) - print("Writing %s [" % (result_fname)) - result_fp = open(result_fname, "w", encoding="utf-8") - result_fp.write(self.smart_detokenize()) - result_fp.close() - print("] file %s" % (result_fname)) - print("] file %s" % (source_file)) - - def process_if_newer(self, file): - # """ - # \input{file} is be added to the token list. - # If the input file is newer it is processed. - # Returns tokenized \input{file}. - # """ - file = cut_extension(file, ".tex") - tex_file = file+".tex" - clean_tex_file = file+"-clean.tex" - if newer(clean_tex_file, tex_file): - print("Using %s." % (clean_tex_file)) - else: - ts = Tex_stream() - ts.data = [] - ts.defs = self.defs - ts.process_file(file) - to_add = "\\input{%s}" % (file) - return tokenize(to_add) - - -# Main - -long_optlist = ["debug","defs="] -options, restargs = getopt_map("x", long_optlist) - -debug = False -if "--debug" in options: - debug = True - -root = restargs[0] -root = cut_extension(root, ".tex") -if "--defs" in options: - defs_root = options["--defs"] -else: - defs_root = "%s" % (root) -defs_db = defs_root -defs_db_file = defs_root+".db" - -ts = Tex_stream() -ts.defs_db = defs_db -ts.defs_db_file = defs_db_file -ts.debug = debug - -ts.restore_defs() -for root in restargs: - ts.process_file(root) - -print("(Re)creating defs db %s" % (defs_db)) -ts.save_defs() diff --git a/docs/econark-clean-clean.sty b/docs/econark-clean-clean.sty new file mode 100644 index 000000000..cec9e0e53 --- /dev/null +++ b/docs/econark-clean-clean.sty @@ -0,0 +1,367 @@ +% Style file with stuff that should be available for any latex doc in Econ-ARK +% \renewcommand{[macroName]}{[value]} does not overwrite [macroName] +% so if used after macroName is already defined, it leaves the macro unchanged + +% Get references right whether compiled as subfile or main file +% https://tex.stackexchange.com/questions/463699/proper-reference-numbers-with-subfiles +\newcommand\labelprefix{} +\newcommand\localorexternallabel[1]{% Removing this comment breaks the command + \expandafter\ifx\csname r@#1\endcsname\relax + \labelprefix + \fi #1% Removing this comment breaks the command +} + +% Appendices and body are subfiles +\RequirePackage{subfiles} +\RequirePackage{xmpincl} % mathstat.dal..ca/~sellinger/pdfa + +% Get all the packages from the American Mathematical Society +\RequirePackage{amsmath,amsfonts,amsmath,amsthm,amssymb} + +% Command to define a label only if it does not yet exist (suppresses misleading +% warnings when material created in subfiles is read in while references already +% exist from master document) +\makeatletter +\renewcommand{\iflabelexists}[3]{\@ifundefined{r@#1}{\G@refundefinedtrue{#3}}{#2}} +\makeatother + +% Home of Econ-ARK +\renewcommand{\ARKurl}{\href{https://econ-ark.org}{{Econ-ARK}}} + +% Define various generically useful terms and items +\renewcommand{\avg}{\bar} +\renewcommand{\cov}{}\renewcommand{\cov}{\textup{cov}} +\renewcommand{\Abve}{\bar} +\renewcommand{\Belw}{\underline} +\renewcommand{\CDF}{\mathcal{F}} +\renewcommand{\GroFac}{\Omega} % Generic for growth factor +\renewcommand{\GroRte}{\omega} % Generic for growth rate +\renewcommand{\Lvl}{\mathbf} % Levels of variables are bold + +% Constrained +\renewcommand{\cnstr}[1]{\grave{#1}} + +\renewcommand{\BalGroFac}{\check} % Balanced growth factor +\renewcommand{\BalGroRte}{\tilde} % Balanced growth rate (log change) +\renewcommand{\TargetNrm}{\hat} % Target +\renewcommand{\ABalLvl}{\BalGroFac{\ALvl}} % m where ALvl grows by PermGroFac +\renewcommand{\MBalLvl}{\BalGroFac{\MNrm}} % m where MLvl grows by PermGroFac +\renewcommand{\mBalLog}{\BalGroRte{\mNrm}} % m where mLog grows by PermGroRte +\renewcommand{\mTrgNrm}{\TargetNrm{\mNrm}} % m where E[m_{t+1}]=m_{t} + +% Levels are boldface +\renewcommand{\aLvl}{\mathbf{a}} +\renewcommand{\bLvl}{\mathbf{b}} +\renewcommand{\cLvl}{\mathbf{c}} +\renewcommand{\dLvl}{\mathbf{d}} +\renewcommand{\eLvl}{\mathbf{e}} +\renewcommand{\fLvl}{\mathbf{f}} +\renewcommand{\gLvl}{\mathbf{g}} +\renewcommand{\hLvl}{\mathbf{h}} +\renewcommand{\iLvl}{\mathbf{i}} +\renewcommand{\jLvl}{\mathbf{j}} +\renewcommand{\kLvl}{\mathbf{k}} +\renewcommand{\mLvl}{\mathbf{m}} +\renewcommand{\nLvl}{\mathbf{n}} +\renewcommand{\pLvl}{\mathbf{p}} +\renewcommand{\qLvl}{\mathbf{q}} +\renewcommand{\rLvl}{\mathbf{r}} +\renewcommand{\sLvl}{\mathbf{s}} +\renewcommand{\tLvl}{\mathbf{t}} +\renewcommand{\uLvl}{\mathbf{u}} +\renewcommand{\vLvl}{\mathbf{v}} +\renewcommand{\wLvl}{\mathbf{w}} +\renewcommand{\xLvl}{\mathbf{x}} +\renewcommand{\yLvl}{\mathbf{y}} +\renewcommand{\zLvl}{\mathbf{z}} + +\renewcommand{\ALvl}{\mathbf{A}} +\renewcommand{\BLvl}{\mathbf{B}} +\renewcommand{\CLvl}{\mathbf{C}} +\renewcommand{\DLvl}{\mathbf{D}} +\renewcommand{\ELvl}{\mathbf{E}} +\renewcommand{\FLvl}{\mathbf{F}} +\renewcommand{\GLvl}{\mathbf{G}} +\renewcommand{\HLvl}{\mathbf{H}} +\renewcommand{\ILvl}{\mathbf{I}} +\renewcommand{\JLvl}{\mathbf{J}} +\renewcommand{\KLvl}{\mathbf{K}} +\renewcommand{\LLvl}{\mathbf{L}} +\renewcommand{\MLvl}{\mathbf{M}} +\renewcommand{\NLvl}{\mathbf{N}} +\renewcommand{\OLvl}{\mathbf{O}} +\renewcommand{\PLvl}{\mathbf{P}} +\renewcommand{\QLvl}{\mathbf{Q}} +\renewcommand{\RLvl}{\mathbf{R}} +\renewcommand{\SLvl}{\mathbf{S}} +\renewcommand{\TLvl}{\mathbf{T}} +\renewcommand{\ULvl}{\mathbf{U}} +\renewcommand{\VLvl}{\mathbf{V}} +\renewcommand{\WLvl}{\mathbf{W}} +\renewcommand{\XLvl}{\mathbf{X}} +\renewcommand{\YLvl}{\mathbf{Y}} +\renewcommand{\ZLvl}{\mathbf{Z}} + +% Functions are Roman not italicized +\renewcommand{\aFunc}{\mathrm{a}} +\renewcommand{\bFunc}{\mathrm{b}} +\renewcommand{\cFunc}{\mathrm{c}} +\renewcommand{\dFunc}{\mathrm{d}} +\renewcommand{\eFunc}{\mathrm{e}} +\renewcommand{\fFunc}{\mathrm{f}} +\renewcommand{\hFunc}{\mathrm{h}} +\renewcommand{\iFunc}{\mathrm{i}} +\renewcommand{\jFunc}{\mathrm{j}} +\renewcommand{\kFunc}{\mathrm{k}} +\renewcommand{\mFunc}{\mathrm{m}} +\renewcommand{\nFunc}{\mathrm{n}} +\renewcommand{\pFunc}{\mathrm{p}} +\renewcommand{\sFunc}{\mathrm{s}} +\renewcommand{\rFunc}{\mathrm{r}} +\renewcommand{\uFunc}{\mathrm{u}} +\renewcommand{\vFunc}{\mathrm{v}} +\renewcommand{\wFunc}{\mathrm{w}} +\renewcommand{\xFunc}{\mathrm{x}} +\renewcommand{\yFunc}{\mathrm{y}} +\renewcommand{\zFunc}{\mathrm{z}} + +\renewcommand{\AFunc}{\mathrm{A}} +\renewcommand{\BFunc}{\mathrm{B}} +\renewcommand{\CFunc}{\mathrm{C}} +\renewcommand{\DFunc}{\mathrm{D}} +\renewcommand{\EFunc}{\mathrm{E}} +\renewcommand{\FFunc}{\mathrm{F}} +\renewcommand{\GFunc}{\mathrm{G}} +\renewcommand{\HFunc}{\mathrm{H}} +\renewcommand{\IFunc}{\mathrm{I}} +\renewcommand{\JFunc}{\mathrm{J}} +\renewcommand{\KFunc}{\mathrm{K}} +\renewcommand{\LFunc}{\mathrm{L}} +\renewcommand{\MFunc}{\mathrm{M}} +\renewcommand{\NFunc}{\mathrm{N}} +\renewcommand{\OFunc}{\mathrm{O}} +\renewcommand{\PFunc}{\mathrm{P}} +\renewcommand{\QFunc}{\mathrm{Q}} +\renewcommand{\RFunc}{\mathrm{R}} +\renewcommand{\SFunc}{\mathrm{S}} +\renewcommand{\TFunc}{\mathrm{T}} +\renewcommand{\UFunc}{\mathrm{U}} +\renewcommand{\VFunc}{\mathrm{V}} +\renewcommand{\WFunc}{\mathrm{W}} +\renewcommand{\XFunc}{\mathrm{X}} +\renewcommand{\YFunc}{\mathrm{Y}} +\renewcommand{\ZFunc}{\mathrm{Z}} +% Ratios to permanent income are normal face +\renewcommand{\aNrm}{a} +\renewcommand{\bNrm}{b} +\renewcommand{\cNrm}{c} +\renewcommand{\dNrm}{d} +\renewcommand{\eNrm}{e} +\renewcommand{\fNrm}{f} +\renewcommand{\hNrm}{h} +\renewcommand{\iNrm}{i} +\renewcommand{\jNrm}{j} +\renewcommand{\kNrm}{k} +\renewcommand{\mNrm}{m} +\renewcommand{\pNrm}{p} +\renewcommand{\rNrm}{s} +\renewcommand{\sNrm}{s} +\renewcommand{\vNrm}{v} +\renewcommand{\yNrm}{y} +\renewcommand{\zNrm}{z} + +\renewcommand{\ANrm}{A} +\renewcommand{\BNrm}{B} +\renewcommand{\CNrm}{C} +\renewcommand{\DNrm}{D} +\renewcommand{\ENrm}{E} +\renewcommand{\FNrm}{F} +\renewcommand{\HNrm}{H} +\renewcommand{\INrm}{I} +\renewcommand{\JNrm}{J} +\renewcommand{\KNrm}{K} +\renewcommand{\MNrm}{M} +\renewcommand{\PNrm}{P} +\renewcommand{\SNrm}{S} +\renewcommand{\VNrm}{V} +\renewcommand{\YNrm}{Y} +\renewcommand{\ZNrm}{Z} + +\renewcommand{\RNrm}{\mathcal{R}} +% Ind and Agg varaibles begin with lower case +\renewcommand{\tranShkInd}{\theta} % +\renewcommand{\tranShk}{\tranShkInd} % +\renewcommand{\tranShkAgg}{\Theta} % +\renewcommand{\permShkInd}{\psi} % +\renewcommand{\permShk}{\permShkInd} % +\renewcommand{\PermShkAgg}{\Psi} % +\renewcommand{\tranShkAgg}{\Theta} % +\renewcommand{\std}{\sigma} +\renewcommand{\tranShkIndStd}{\std_{\tranShkInd}} % +\renewcommand{\tranShkIndVar}{\std^{2}_{\tranShkInd}} % +\renewcommand{\tranShkAggStd}{\std_{\tranShkAgg}} % +\renewcommand{\tranShkAggVar}{\std^{2}_{\tranShkAgg}} % + +% Combo variables (combining Ind and Agg) +\renewcommand{\PermShk}{\mathbf{\Psi}} +\renewcommand{\PermShkStd}{\std_{\PermShk}} +\renewcommand{\PermShkVar}{\std^{2}_{\PermShk}} +\renewcommand{\PermLvl}{\pLvl} +\renewcommand{\PermLvlAgg}{\PLvl} + +% More specialized variables +\renewcommand{\TranShkAll}{\pmb{\xi}} +\renewcommand{\TranShkMin}{\underline{\xi}} +\renewcommand{\TranShkMax}{\overline{\xi}} +\renewcommand{\TranShkStd}{\std_{\TranShk}} +\renewcommand{\TranShkVar}{\std^{2}_{\TranShk}} +\renewcommand{\TranShkEmp}{\pmb{\theta}} +\renewcommand{\TranShkEmpMin}{}\renewcommand{\TranShkEmpMin}{\underline{\TranShkEmp}} +\renewcommand{\TranShkEmpMax}{}\renewcommand{\TranShkEmpMax}{\overline{\TranShkEmp}} +\renewcommand{\IncUnemp}{\mu} % Income in unemployment + +\renewcommand{\permLvlAgg}{\mathrm{P}} % +\renewcommand{\permLvlInd}{\mathrm{p}} % + +\renewcommand{\MPCmin}{{\uline{\kappa}}} +\renewcommand{\MPCmax}{{\bar{\kappa}}} +\renewcommand{\MPCmaxmax}{{\bar{\bar{\kappa}}}} +\renewcommand{\MPCmaxmin}{{\hat{\underline{\kappa}}}} +\renewcommand{\MPCminmin}{{\underline{\kappa}}} +\renewcommand{\Opt}{\tilde} +\renewcommand{\permGroFacAgg}{\mathrm{G}} +\renewcommand{\permGroFacInd}{\mathsf{G}} +\renewcommand{\PermGroFac}{\mathcal{G}} +\renewcommand{\PermGroFacAdj}{\underline{\PermGroFac}} +\renewcommand{\PermGroFacuAdj}{\underline{\underline{\PermGroFac}}} +\renewcommand{\PermGroRte}{g} + +\renewcommand{\Alive}{\mathcal{L}}\renewcommand{\Alive}{\mathcal{L}} +\renewcommand{\RfreeAgg}{}\renewcommand{\RfreeAgg}{\Agg{\Rfree}} + +\renewcommand{\DeprFac}{\daleth} +\renewcommand{\deprRte}{\delta} % +\renewcommand{\DiscFac}{\beta} +\renewcommand{\DiscFacAlt}{\beth} +\renewcommand{\DiscAltuAdj}{{\underline{\underline{\beth}}}} +\renewcommand{\DiscAlt}{}\renewcommand{\DiscAlt}{\beth} +\renewcommand{\DiscFacRaw}{\beta} +\renewcommand{\DiscFacLiv}{\underline{\DiscFacRaw}} +\renewcommand{\discRte}{\vartheta} % + +\renewcommand{\APFac}{\text{\pmb{\Thorn}}} % Former \Pat +\renewcommand{\APFacDefn}{\hyperlink{APFacDefn}{\textrm{APF}}} + +\renewcommand{\GPFacRaw}{\APFac_{\PermGroFac}} +\renewcommand{\GPFacNrm}{\APFac_{\PermGroFacAdj}} +\renewcommand{\RPFac}{\APFac_{\Rfree}} + +\renewcommand{\RPRte}{\text{\thorn}_{\rfree}} +\renewcommand{\GPRte}{\text{\thorn}_{\PermGroRte}} +\renewcommand{\APRte}{\text{\thorn}} + + +\renewcommand{\EPermShkInv}{\Ex[\PermShk^{-1}]} % Formerly EpShkInv +\renewcommand{\InvEPermShkInv}{\underline{\PermShk}} % Formerly InvEpShkInv +\renewcommand{\uInvEuPermShk}{\underline{\underline{\PermShk}}} % Formerly {\uInvEpShkuInv} + +\renewcommand{\RfreeEff}{\bar{\Rfree}} % Blanchard-adjusted interest rate + +\renewcommand{\PopnGroFac}{\Xi} +\renewcommand{\PopnGroRte}{\xi} +\renewcommand{\PopnLvl}{\pmb{\mathrm{N}}} + +\renewcommand{\LivPrb}{\Alive} +\renewcommand{\livPrb}{\ell} + +\renewcommand{\cncl}{} +\renewcommand\cncl[1]{{\cancel{#1}}} + +\renewcommand{\pNotZero}{(1-\pZero)} + +\renewcommand{\CARA}{{\alpha}} +\renewcommand{\CRRA}{\rho} +\renewcommand{\diePrb}{{\mathsf{d}}} % Continuous time death rate +\renewcommand{\DiePrb}{{\mathsf{D}}} % Discrete-time one-period death rate +\renewcommand{\Ex}{{\mathbb{E}}} % Expectations operator defined in econtex.cls +\renewcommand{\Mean}{{\mathbb{M}}} % Mean +\renewcommand{\MPC}{{\kappa}} +\renewcommand{\MPCFunc}{\pmb{\kappa}} +\renewcommand{\pZero}{\wp} + +\renewcommand{\rfree}{\mathsf{r}} % The net return rate on the safe asset +\renewcommand{\Rfree}{\mathsf{R}} % The return factor on the safe asset +\renewcommand{\RSave}{{\underline{\Rfree}}} +\renewcommand{\rsave}{{\underline{\rfree}}} +\renewcommand{\RBoro}{{\bar{\Rfree}}} +\renewcommand{\rboro}{{\bar{\rfree}}} + +\renewcommand{\Risky}{{\mathbf{R}}} % The return factor on the risky asset +\renewcommand{\risky}{{\mathbf{r}}} % The arithmetic return rate E[\Risky] - 1 +\renewcommand{\riskyELog}{\risky} % The arithmetic return rate \Risky - 1 +\renewcommand{\riskyELev}{\boldsymbol{r}} % The geometric return rate \log \Risky +\renewcommand{\riskyshare}{{\varsigma}} +\renewcommand{\riskyvar}{\std^{2}_{\risky}} +\renewcommand{\Rport}{\mathfrak{R}} % Portfolio -weighted return +\renewcommand{\rport}{\mathfrak{r}} + +\renewcommand{\uPPP}{{{\mathrm{u}^{\prime\prime\prime}}}} +\renewcommand{\uPP}{{{\mathrm{u}^{\prime\prime}}}} +\renewcommand{\uP}{{{\mathrm{u}^{\prime}}}} +\renewcommand{\util}{u} + +\renewcommand{\Kap}{{K}} +\renewcommand{\kap}{k} + +\renewcommand{\leiShare}{\zeta} % + +\renewcommand{\MPSmin}{\pZero^{1/\CRRA} \RPFac} +\renewcommand{\MPSmax}{\RPFac} + +\renewcommand{\PDV}{{\mathbb{P}}} % +\renewcommand{\Wage}{{\mathsf{W}}} +\renewcommand{\wage}{{\mathsf{w}}} + +\renewcommand{\TaxLev}{T} +\renewcommand{\Tax}{} +\renewcommand{\TaxFree}{{\cancel{\Tax}}} + +\renewcommand{\Alt}{\grave} + +\renewcommand{\urate}{{\mho}} +\renewcommand{\erate}{{\cancel{\mho}}} +\renewcommand{\unins}{\upsilon} + +\renewcommand{\Labor}{}\renewcommand{\Labor}{\mathrm{L}} +\renewcommand{\labor}{}\renewcommand{\labor}{\ell} + +\renewcommand{\EEndMap}{{\mathsf{E}}} +\renewcommand{\TMap}{\mathscr{T}} + +\renewcommand{\CEndFunc}{{\mathfrak{C}}} +\renewcommand{\cEndFunc}{{\mathfrak{c}}} + +\renewcommand{\uFuncInv}{\rotatebox{180}{$\uFunc$}} +\renewcommand{\muFuncInv}{\rotatebox{180}{$\uFunc$}} + +\renewcommand{\Hi}{\overline} +\renewcommand{\Lo}{\underline} + +\renewcommand{\Rnorm}{{\mathcal{R}}} % Normalized version of riskless return factor +\renewcommand{\rnorm}{{\mathit{r}}} % Normalized version of riskless rate of return + +\renewcommand{\EpremLog}{\varphi} % Not using regular \eprem because want to distinguish between \varphi = log E_{t}[\Phi_{t+1}] and \phi_{t} = E[\log \Phi_{t}] +\renewcommand{\EPrem}{\pmb{\varphi}} % equity premium +\renewcommand{\eprem}{\phi} % log equity premium + +\renewcommand{\weight}{\omega} + +\renewcommand{\FDist}{{\mathcal{F}}} +\renewcommand{\fDist}{{\mathcal{f}}} + +\renewcommand{\aMin}{{\underline{\aNrm}}} + +\renewcommand{\FDist}{}\renewcommand{\FDist}{\mathcal{F}} +\renewcommand{\fDist}{}\renewcommand{\fDist}{\mathcal{f}} + +\renewcommand{\Nrml}{}\renewcommand{\Nrml}{\mathcal{N}} diff --git a/Resources/texmf-local/tex/latex/econark-replace.sty b/docs/econark-clean.sty similarity index 93% rename from Resources/texmf-local/tex/latex/econark-replace.sty rename to docs/econark-clean.sty index c287401a2..da0671170 100644 --- a/Resources/texmf-local/tex/latex/econark-replace.sty +++ b/docs/econark-clean.sty @@ -1,3 +1,4 @@ +% Those things that can be replaced by regular letters, should be % Ratios to permanent income are normal face \renewcommand{\aNrm}{a} \renewcommand{\bNrm}{b} @@ -66,4 +67,5 @@ \newcommand{\prdT}{t} \newcommand{\prd}{t} \newcommand{\trmT}{T} - +\newcommand{\prdLsT}{t-1} +\newcommand{\prdLst}{t-1} diff --git a/docs/sec_introduction-input-clean.tex b/docs/sec_introduction-input-clean.tex deleted file mode 100644 index 4a7b24af8..000000000 --- a/docs/sec_introduction-input-clean.tex +++ /dev/null @@ -1,4 +0,0 @@ -\hypertarget{introduction}{} -\section{Introduction}\label{sec:introduction} - - These lecture notes provide a gentle introduction to a particular set of solution tools for the canonical consumption-saving/portfolio allocation problem. Specifically, the notes describe and solve optimization problems for a consumer facing uninsurable idiosyncratic risk to nonfinancial income (e.g., labor or transfer income), first without and then with optimal portfolio choice,\footnote{See \cite{merton:restat} and \cite{samuelson:portfolio} for a solution to the problem of a consumer whose only risk is rate-of-return risk on a financial asset; the combined case (both financial and nonfinancial risk) is solved below, and much more closely resembles the case with only nonfinancial risk than it does the case with only financial risk.} with detailed intuitive discussion of various mathematical and computational techniques that, together, speed the solution by many orders of magnitude. The problem is solved with and without liquidity constraints, and the infinite horizon solution is obtained as the limit of the finite horizon solution. After the basic consumption/saving problem with a deterministic interest rate is described and solved, an extension with portfolio choice between a riskless and a risky asset is also solved. Finally, a simple example shows how to use these methods (via the statistical `method of simulated moments' (MSM for short)) to estimate structural parameters like the coefficient of relative risk aversion (\textit{a la} Gourinchas and Parker~\citeyearpar{gpLifecycle} and Cagetti~\citeyearpar{cagettiWprofiles}). diff --git a/docs/sec_introduction-input.tex b/docs/sec_introduction-input.tex deleted file mode 100644 index 4a7b24af8..000000000 --- a/docs/sec_introduction-input.tex +++ /dev/null @@ -1,4 +0,0 @@ -\hypertarget{introduction}{} -\section{Introduction}\label{sec:introduction} - - These lecture notes provide a gentle introduction to a particular set of solution tools for the canonical consumption-saving/portfolio allocation problem. Specifically, the notes describe and solve optimization problems for a consumer facing uninsurable idiosyncratic risk to nonfinancial income (e.g., labor or transfer income), first without and then with optimal portfolio choice,\footnote{See \cite{merton:restat} and \cite{samuelson:portfolio} for a solution to the problem of a consumer whose only risk is rate-of-return risk on a financial asset; the combined case (both financial and nonfinancial risk) is solved below, and much more closely resembles the case with only nonfinancial risk than it does the case with only financial risk.} with detailed intuitive discussion of various mathematical and computational techniques that, together, speed the solution by many orders of magnitude. The problem is solved with and without liquidity constraints, and the infinite horizon solution is obtained as the limit of the finite horizon solution. After the basic consumption/saving problem with a deterministic interest rate is described and solved, an extension with portfolio choice between a riskless and a risky asset is also solved. Finally, a simple example shows how to use these methods (via the statistical `method of simulated moments' (MSM for short)) to estimate structural parameters like the coefficient of relative risk aversion (\textit{a la} Gourinchas and Parker~\citeyearpar{gpLifecycle} and Cagetti~\citeyearpar{cagettiWprofiles}). diff --git a/docs/sec_method-of-moderation-input.tex b/docs/sec_method-of-moderation-input.tex deleted file mode 100644 index 4832cced4..000000000 --- a/docs/sec_method-of-moderation-input.tex +++ /dev/null @@ -1,668 +0,0 @@ -\hypertarget{the-method-of-moderation}{} -\subsection{The Method of Moderation}\label{sec:method-of-moderation} - - Unfortunately, this endogenous gridpoints solution is not very - well-behaved outside the original range of gridpoints targeted by - the solution method. (Though other common solution methods are no - better outside their own predefined ranges). - Figure~\ref{fig:ExtrapProblem} demonstrates the point by plotting - the amount of precautionary saving implied by a linear extrapolation - of our approximated consumption rule (the consumption of the perfect - foresight consumer $\cFuncAbove_{\prd-1}$ minus our approximation to - optimal consumption under uncertainty, $\Aprx{\cFunc}_{\prd-1}$). - Although theory proves that precautionary saving is always positive, - the linearly extrapolated numerical approximation eventually - predicts negative precautionary saving (at the point in the figure - where the extrapolated locus crosses the horizontal axis). - - \hypertarget{ExtrapProblemPlot}{} - \begin{figure} - \includegraphics[width=6in]{./Figures/ExtrapProblemPlot} - \caption{For Large Enough $m_{\prd-1}$, Predicted Precautionary Saving is Negative (Oops!)} - \label{fig:ExtrapProblem} - \end{figure} - - This error cannot be fixed by extending the upper gridpoint; in the presence of serious uncertainty, the consumption rule will need to be evaluated outside of \textit{any} prespecified grid (because starting from the top gridpoint, a large enough realization of the uncertain variable will push next period's realization of assets above that top; a similar argument applies below the bottom gridpoint). While a judicious extrapolation technique can prevent this problem from being fatal (for example by carefully excluding negative precautionary saving), the problem is often dealt with using inelegant methods whose implications for the accuracy of the solution are difficult to gauge. - - -%\renewcommand{\prd}{t} % For the rest of the doc, use generic t vs t+1 - - As a preliminary to our solution, define $\hNrm_{\EndStp}$ as end-of-period human wealth (the present discounted value of future labor income) for a perfect foresight version of the problem of a `risk optimist:' a period-$t$ consumer who believes with perfect confidence that the shocks will always take their expected value of \permShkOn {1, $\TranShkEmp_{t+n} = \Ex[\TranShkEmp]=1~\forall~n>0$ and $\permShk_{t+n} = \Ex[\permShk]=1~\forall~n>0$.} {1, $\TranShkEmp_{t+n} = \Ex[\TranShkEmp]=1~\forall~n>0$.} The solution to a perfect foresight problem of this kind takes the form\footnote{For a derivation, see \cite{BufferStockTheory}; $\MPCmin_{\prd}$ is defined therein as the MPC of the perfect foresight consumer with horizon $\trmT-t$.} - \begin{equation}\begin{gathered}\begin{aligned} - \cFuncAbove_{\prd}(\mNrm_{\prd}) & = (\mNrm_{\prd} + \hNrm_{\EndStp})\MPCmin_{\prd} \label{eq:cFuncAbove} - \end{aligned}\end{gathered}\end{equation} - for a constant minimal marginal propensity to consume $\MPCmin_{\prd}$ given below. - - We similarly define $\hEndMin_{\EndStp}$ as `minimal human wealth,' the - present discounted value of labor income if the shocks were to take on - their worst possible value in every future period \permShkOn - {$\TranShkEmp_{t+n} = \TranShkEmpMin ~\forall~n>0$ and $\permShk_{t+n} = - \permShkMin ~\forall~n>0$} {$\TranShkEmp_{t+n} = \TranShkEmpMin - ~\forall~n>0$} (which we define as corresponding to the beliefs of a - `pessimist'). - - \ctw{}{We will call a `realist' the consumer who correctly perceives the true - probabilities of the future risks and optimizes accordingly.} - - A first useful point is that, for the realist, a lower bound for the - level of market resources is $\ushort{m}_{\prd} = -\hEndMin_{\EndStp}$, because - if $m_{\prd}$ equalled this value then there would be a positive finite - chance (however small) of receiving \permShkOn - {$\TranShkEmp_{t+n}=\TranShkEmpMin$ and $\permShk_{t+n}=\permShkMin$} - {$\TranShkEmp_{t+n}=\TranShkEmpMin$} - in - every future period, which would require the consumer to set $c_{\prd}$ - to zero in order to guarantee that the intertemporal budget constraint - holds\ctw{.}{~(this is the multiperiod generalization of the discussion in - section \ref{subsec:LiqConstrSelfImposed} explaining the derivation of the `natural borrowing constraint' for period $\trmT-1$, - $\ushort{a}_{\prd-1}$).} Since consumption of zero yields negative - infinite utility, the solution to realist consumer's problem is not well - defined for values of $m_{\prd} < \ushort{m}_{\prd}$, and the limiting - value of the realist's $c_t$ is zero as $m_{\prd} \downarrow \ushort{m}_{\prd}$. - - Given this result, it will be convenient to define `excess' market - resources as the amount by which actual resources exceed the lower - bound, and `excess' human wealth as the amount by which mean expected human wealth - exceeds guaranteed minimum human wealth: - \begin{equation*}\begin{gathered}\begin{aligned} - \aboveMin \mNrm_{\prd} & = m_{\prd}+\overbrace{\hEndMin_{\EndStp}}^{=-\ushort{m}_{\prd}} - \\ \aboveMin \hNrm_{\EndStp} & = \hNrm_{\EndStp}-\hEndMin_{\EndStp}. - \end{aligned}\end{gathered}\end{equation*} - - We can now transparently define the optimal - consumption rules for the two perfect foresight problems, those of the - `optimist' and the `pessimist.' The `pessimist' perceives human - wealth to be equal to its minimum feasible value $\hEndMin_{\EndStp}$ with certainty, so - consumption is given by the perfect foresight solution - \begin{equation*}\begin{gathered}\begin{aligned} - \cFuncBelow_{\prd}(m_{\prd}) & = (m_{\prd}+\hEndMin_{\EndStp})\MPCmin_{\prd} - \\ & = \aboveMin \mNrm_{\prd}\MPCmin_{\prd} - . - \end{aligned}\end{gathered}\end{equation*} - - The `optimist,' on the other hand, pretends that there is no uncertainty - about future income, and therefore consumes - \begin{equation*}\begin{gathered}\begin{aligned} - \cFuncAbove_{\prd}(m_{\prd}) & = (m_{\prd} +\hEndMin_{\EndStp} - \hEndMin_{\EndStp} + \hNrm_{\EndStp} )\MPCmin_{\prd} - \\ & = (\aboveMin \mNrm_{\prd} + \aboveMin \hNrm_{\EndStp})\MPCmin_{\prd} - \\ & = \cFuncBelow_{\prd}(m_{\prd})+\aboveMin \hNrm_{\EndStp} \MPCmin_{\prd} - . - \end{aligned}\end{gathered}\end{equation*} - - It seems obvious that the spending of the realist will be strictly greater - than that of the pessimist and strictly less than that of the - optimist. Figure~\ref{fig:IntExpFOCInvPesReaOptNeedHiPlot} illustrates the proposition for the consumption rule in period $\trmT-1$. - \hypertarget{IntExpFOCInvPesReaOptNeedHiPlot}{} - \begin{figure} - \includegraphics[width=6in]{./Figures/IntExpFOCInvPesReaOptNeedHiPlot} - \caption{Moderation Illustrated: $\underline{\cFunc}_{\prd-1} < \Aprx{\cFunc}_{\prd-1} < \bar{\cFunc}_{\prd-1}$} - \label{fig:IntExpFOCInvPesReaOptNeedHiPlot} - \end{figure} - - \indent The proof is more difficult than might be imagined, but - the necessary work is done in \cite{BufferStockTheory} so we will take - the proposition as a fact and proceed by manipulating the inequality: - - - \begin{center} - \begin{tabular}{rcl} - $ \aboveMin \mNrm_{\prd} \MPCmin_{\prd} < $ & $ \cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd}) $ $< (\aboveMin \mNrm_{\prd}+\aboveMin \hNrm_{\EndStp})\MPCmin_{\prd} $ - \\ $- \aboveMin \mNrm_{\prd} \MPCmin_{\prd} > $ & $ -\cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd}) $ & $> -(\aboveMin \mNrm_{\prd}+\aboveMin \hNrm_{\EndStp})\MPCmin_{\prd} $ - \\ $ \aboveMin \hNrm_{\EndStp} \MPCmin_{\prd} > $ & $ \bar{\cFunc}_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd})-\cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd}) $ & $> 0$ - \\ $1 > $ & $ \underbrace{\left(\frac{\bar{\cFunc}_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd})-\cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd})}{\aboveMin \hNrm_{\EndStp} \MPCmin_{\prd}}\right)}_{\equiv \Hi{\koppa}_{\prd}} $ & $> 0$ - \end{tabular} - \end{center} - - \noindent where the fraction in the middle of the last inequality is the ratio - of actual precautionary saving (the numerator is the difference - between perfect-foresight consumption and optimal consumption in the - presence of uncertainty) to the maximum conceivable amount of - precautionary saving (the amount that would be undertaken by the - pessimist who consumes nothing out of any future income beyond the perfectly certain component). - - Defining $\mu_{\prd} = - \log \aboveMin \mNrm_{\prd}$ (which can range from $-\infty$ to $\infty$), the object in the middle of the last inequality is - \begin{equation}\begin{gathered}\begin{aligned} - \Hi{\koppa}_{\prd}(\mu_{\prd}) & \equiv \left(\frac{\bar{\cFunc}_{\prd}(\ushort{m}_{\prd}+e^{\mu_{\prd}})-\cFunc_{\prd}(\ushort{m}_{\prd}+e^{\mu_{\prd}})}{\aboveMin \hNrm_{\EndStp} \MPCmin_{\prd}}\right), \label{eq:koppa} - \end{aligned}\end{gathered}\end{equation} - and we now define - \begin{equation}\begin{gathered}\begin{aligned} - \Hi{\chiFunc}_{\prd}(\mu_{\prd}) & = \log \left(\frac{1-\Hi{\koppa}_{\prd}(\mu_{\prd})}{\Hi{\koppa}_{\prd}(\mu_{\prd})}\right) - \\ & = \log \left(1/\Hi{\koppa}_{\prd}(\mu_{\prd})-1\right) \label{eq:chi} - \end{aligned}\end{gathered}\end{equation} - which has the virtue that it is linear in the limit as $\mu_{\prd}$ approaches $+\infty$. - - Given $\Hi{\chiFunc}$, the consumption function can be recovered from - \begin{equation}\begin{gathered}\begin{aligned} - \Hi{\cFunc}_{\prd} & = \bar{\cFunc}_{\prd}-\overbrace{\left(\frac{1}{1+\exp(\Hi{\chiFunc}_{\prd})}\right)}^{=\Hi{\koppa}_{\prd}} \aboveMin \hNrm_{\EndStp} \MPCmin_{\prd}. \label{eq:cFuncHi} - \end{aligned}\end{gathered}\end{equation} - -\mu - Thus, the procedure is to calculate $\Hi{\chiFunc}_{\prd}$ at the points - $\vctr{\mu}_{\prd}$ corresponding to the log of the $\aboveMin - \vctr{m}_{\prd}$ points defined above, and then using these to construct an - interpolating approximation $\Aprx{\Hi{\chiFunc}}_{\prd}$ from which we indirectly obtain our - approximated consumption rule $\Aprx{\Hi{\cFunc}}_{\prd}$ by substituting $\Aprx{\Hi{\chiFunc}}_{\prd}$ for $\Hi{\chiFunc}$ in equation \eqref{eq:cFuncHi}. - - Because this method relies upon the fact that the problem is easy to - solve if the decision maker has unreasonable views (either in the - optimistic or the pessimistic direction), and because the correct - solution is always between these immoderate extremes, we call our - solution procedure the `method of moderation.' - - Results are shown in Figure~\ref{fig:ExtrapProblemSolved}; a reader - with very good eyesight might be able to detect the barest hint of a - discrepancy between the Truth and the Approximation at the far - righthand edge of the figure\ctw{.}{ -- a stark contrast with the calamitous - divergence evident in Figure~\ref{fig:ExtrapProblem}.}{} - \hypertarget{ExtrapProblemSolvedPlot}{} - \begin{figure} - \includegraphics[width=6in]{./Figures/ExtrapProblemSolvedPlot} - \caption{Extrapolated $\Aprx{\Hi{\cFunc}}_{\prd-1}$ Constructed Using the Method of Moderation} - \label{fig:ExtrapProblemSolved} - \end{figure} - -\hypertarget{approximating-the-slope-too}{} -\subsection{Approximating the Slope Too} - - -Until now, we have calculated the level of consumption at various different gridpoints and used linear interpolation\ctw{.}{ (either directly for $\cFunc_{\prd-1}$ or indirectly for, say, $\Hi{\chiFunc}_{\prd-1}$).} But the resulting piecewise linear approximations have the unattractive feature that they are not differentiable at the `kink points' that correspond to the gridpoints where the slope of the function changes discretely. - - - -\cite{BufferStockTheory} proves that the true consumption function for -this problem -is `smooth:' It -exhibits a well-defined unique marginal propensity to consume at every -positive value of $m$. This suggests that we should calculate, not -just the level of consumption, but also the marginal propensity to -consume (henceforth $\MPC$) at each gridpoint, and then find an -interpolating approximation that smoothly matches both the level and the slope -at those points. - -This requires us to differentiate \eqref{eq:koppa} and \eqref{eq:chi}, yielding -\begin{equation}\begin{gathered}\begin{aligned} - \Hi{\koppa}_{\prd}^{\mu}(\mu_{\prd}) & = (\aboveMin \hNrm_{\EndStp} \MPCmin_{\prd})^{-1}e^{\mu_{\prd}}\left(\MPCmin_{\prd}-\overbrace{\cFunc^{\mNrm}_{\prd}(\ushort{m}_{\prd}+e^{\mu_{\prd}})}^{\equiv \MPCFunc_{\prd}(\mNrm_{\prd})}\right) \label{eq:koppaPrime} - \\ \Hi{\chiFunc}_{\prd}^{\mu}(\mu_{\prd}) & = \left(\frac{-\Hi{\koppa}_{\prd}^{\mu}(\mu_{\prd})/\Hi{\koppa}_{\prd}^{2}}{1/\Hi{\koppa}_{\prd}(\mu_{\prd})-1}\right) - \end{aligned}\end{gathered}\end{equation} -and (dropping arguments) with some algebra these can be combined to yield -\begin{equation}\begin{gathered}\begin{aligned} - \Hi{\chiFunc}_{\prd}^{\mu} & = \left(\frac{\MPCmin_{\prd} \aboveMin \mNrm_{\prd} \aboveMin \hNrm_{\EndStp} (\MPCmin_{\prd}-\MPC_{\prd})} - {(\cFuncAbove_{\prd}-\cFunc_{\prd})(\cFuncAbove_{\prd}-\cFunc_{\prd} - \MPCmin_{\prd} \aboveMin \hNrm_{\EndStp})}\right). - \end{aligned}\end{gathered}\end{equation} - -To compute the vector of values of \eqref{eq:koppaPrime} corresponding -to the points in $\vctr{\mu}_{\prd}$, we need the marginal propensities to -consume (designated $\MPC$) at each of the gridpoints, -$\cFunc^{\mNrm}_{\prd}$ (the vector of such values is -$\vctr{\MPC}_{\prd}$). These can be obtained by differentiating the -Euler equation \eqref{eq:upEqbetaOp} (where we define -$\mFunc_{\EndStp}(a) \equiv \cFunc_{\EndStp}(a)+{a}$, and drop the (a) arguments to reduce clutter): -\begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(\cFunc_{\EndStp}) & = \hat{\vFunc}_{\EndStp}^{\aNrm}(\mFunc_{\EndStp}-\cFunc_{\EndStp}), - \end{aligned}\end{gathered}\end{equation} -yielding a marginal propensity to -\textit{have consumed} $\cFunc_{\EndStp}^{\aNrm}$ at each gridpoint: -\begin{equation}\begin{gathered}\begin{aligned} - \uPP(\cEndStp)\cEndStp^\aNrm & = \hat{\vFunc}_{\EndStp}^{\aNrm}(\mFunc_{\EndStp}-\cFunc_{\EndStp}) - \\ \cEndStp^\aNrm & = \hat{\vFunc}^{\aNrm}(\mFunc_{\EndStp}-\cFunc_{\EndStp})/\uPP(\cEndStp) - \end{aligned}\end{gathered}\end{equation} -and the marginal propensity to consume at the beginning of the period is obtained from the marginal propensity to have consumed by differentiating the identity with respect to $\aNrm$: -\begin{equation*}\begin{gathered}\begin{aligned} - \cEndStp & = \mFunc_{\EndStp} - \aNrm - \\ \cEndStp^{\aNrm}+1 & = \mFunc_{\EndStp}^{\aNrm} - \end{aligned}\end{gathered}\end{equation*} -which, together with the chain rule $\cEndStp^\aNrm = \cFunc^{\mNrm}_{\MidStp}\mFunc_{\EndStp}^{\aNrm}$, yields the MPC from -\begin{equation}\begin{gathered}\begin{aligned} - \cFunc^{\mNrm}(\overbrace{\cEndStp^{\aNrm}+1}^{=\mFunc_{\EndStp}^{\aNrm}}) & = \cEndStp^{\aNrm} - \\ \cFunc^\mNrm & = \cEndStp^{\aNrm}/(1+\cEndStp^{\aNrm}) \label{eq:MPCfromMPTHC}. - \end{aligned}\end{gathered}\end{equation} - - -Designating $\Aprx{\Hi{\cFunc}}_{\prd-1}$ as the approximated consumption rule obtained using an interpolating polynomial approximation to $\Hi{\chiFunc}$ that matches both the level and the first derivative at the gridpoints, Figure~\ref{fig:IntExpFOCInvPesReaOptGapPlot} plots the difference between this latest approximation and the true consumption rule for period $\trmT-1$ up to the same large value (far beyond the largest gridpoint) used in prior figures. Of course, at the gridpoints the approximation will exactly match the true function; but this figure illustrates that the approximation is quite accurate far beyond the last gridpoint (which is the last point at which the difference touches the horizontal axis). (We plot here the difference between the two functions rather than the level plotted in previous figures, because in levels the difference between the approximate and the exact function would not be detectable even to the most eagle-eyed reader.) - - - -\hypertarget{IntExpFOCInvPesReaOptGapPlot}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/IntExpFOCInvPesReaOptGapPlot} - \caption{Difference Between True $\cFunc_{\prd-1}$ and $\Aprx{\Hi{\cFunc}}_{\prd-1}$ Is Minuscule} - \label{fig:IntExpFOCInvPesReaOptGapPlot} -\end{figure} - - - - -\hypertarget{value}{} -\subsection{Value} - - - Often it is useful to know the value function as well as the consumption rule. Fortunately, many of the tricks used when solving for the consumption rule have a direct analogue in approximation of the value function. - - Consider the perfect foresight (or ``optimist's'') problem in period $\trmT-1$. Using the fact that in a perfect foresight model the growth factor for consumption is $(\Rfree \DiscFac)^{1/\CRRA}$, we can use the fact that $\cNrm_{\prd} = (\Rfree \DiscFac)^{1/\CRRA} \cNrm_{\prd-1}$ to calculate the value function in period $\trmT-1$: - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vFunc}_{\prd-1}(m_{\prd-1}) & \equiv \uFunc(\cNrm_{\prd-1})+\DiscFac \uFunc(\cNrm_{\prd}) - \\ & = \uFunc(\cNrm_{\prd-1})\left(1+\DiscFac ((\DiscFac\Rfree)^{1/\CRRA})^{1-\CRRA}\right) - % \\ & = \uFunc(\cNrm_{\prd-1})\left(1+\DiscFac (\DiscFac\Rfree)^{1/\CRRA-1}\right) - \\ & = \uFunc(\cNrm_{\prd-1})\left(1+(\DiscFac\Rfree)^{1/\CRRA}/\Rfree\right) - \\ & = \uFunc(\cNrm_{\prd-1})\underbrace{\mbox{PDV}_{\prd}^{T}(\cNrm)/\cNrm_{\prd-1}}_{\equiv \PDVCoverc_{\prd-1}^{T}} - \end{aligned}\end{gathered}\end{equation*} - where $\PDVCoverc_{\prd}^{T}=\mbox{PDV}_{\prd}^{T}(\cNrm)$ is the present discounted value of consumption, normalized by current consumption. Using the fact demonstrated in \cite{BufferStockTheory} that $\PDVCoverc_{\prd}=\MPC^{-1}_{\prd}$, a similar function can be constructed recursively for earlier periods, yielding the general expression \hypertarget{vFuncPF}{} - \begin{equation}\begin{gathered}\begin{aligned} - \bar{\vFunc}_{\prd}(m_{\prd}) & = \uFunc(\bar{\cNrm}_{\prd})\PDVCoverc_{\prd}^{T}\label{eq:vFuncPF} - \\ & = \uFunc(\bar{c}_{\prd}) \MPCmin_{\prd}^{-1} % 20190820 - \\ & = \uFunc((\aboveMin \mNrm_{\prd}+\aboveMin \hNrm_{\EndStp})\MPCmin_{\prd}) \MPCmin_{\prd}^{-1} % 20190820 - \\ & = \uFunc(\aboveMin \mNrm_{\prd}+\aboveMin \hNrm_{\EndStp})\MPCmin_{\prd}^{1-\CRRA} \MPCmin_{\prd}^{-1} % 20190820 - \\ & = \uFunc(\aboveMin \mNrm_{\prd}+\aboveMin \hNrm_{\EndStp})\MPCmin_{\prd}^{-\CRRA} % 20190820 - \end{aligned}\end{gathered}\end{equation} - - This can be transformed as - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vInv}_{\prd} & \equiv \left((1-\CRRA)\bar{\vFunc}_{\prd}\right)^{1/(1-\CRRA)} - \\ & = \cNrm_{\prd}(\PDVCoverc_{\prd}^{T})^{1/(1-\CRRA)} - \\ & = (\aboveMin \mNrm_{\prd}+\aboveMin \hNrm_{\EndStp})\MPCmin_{\prd}^{-\CRRA/(1-\CRRA)} % 20190820 - \end{aligned}\end{gathered}\end{equation*} - \MPCMatch{with derivative - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vInv}_{\prd}^m & = (\mathbb{C}_{\prd}^{T})^{1/(1-\CRRA)}\MPCmin_{\prd}, - \\ & = \MPCmin_{\prd}^{-\CRRA/(1-\CRRA)} % 20190820 - \end{aligned}\end{gathered}\end{equation*}}{} - and since $\PDVCoverc_{\prd}^{T}$ is a constant while the consumption - function is linear, $\bar{\vInv}_{\prd}$ will also be linear. - - We apply the same transformation to the value function for the problem with uncertainty (the ``realist's'' problem)\MPCMatch{ and differentiate}: - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vInv}_{\prd} & = \left((1-\CRRA)\bar{\vFunc}_{\prd}(m_{\prd})\right)^{1/(1-\CRRA)} - \MPCMatch{\\ \bar{\vInv}^{m}_{\prd} & = \left((1-\CRRA)\bar{\vFunc}_{\prd}(m_{\prd})\right)^{-1+1/(1-\CRRA)}\bar{\vFunc}_{\prd}^{m}(m_{\prd})}{} - \end{aligned}\end{gathered}\end{equation*} - and an excellent approximation to the value function can be obtained by - calculating the values of $\bar{\vInv}$ at the same gridpoints used by the - consumption function approximation, and interpolating among those points. - - However, as with the consumption approximation, we can do even better if we - realize that the $\bar{\vInv}$ function for the optimist's problem is - an upper bound for the ${\vInv}$ function in the presence of uncertainty, and the value function - for the pessimist is a lower bound. Analogously to \eqref{eq:koppa}, define an upper-case - \begin{equation}\begin{gathered}\begin{aligned} - \hat{\Koppa}_{\prd}(\mu_{\prd}) & = \left(\frac{\bar{\vInv}_{\prd}(\ushort{m}_{\prd}+e^{\mu_{\prd}})-\vInv_{\prd}(\ushort{m}_{\prd}+e^{\mu_{\prd}})}{\aboveMin \hNrm_{\EndStp} \MPCmin_{\prd} (\PDVCoverc_{\prd}^{T})^{1/(1-\CRRA)}}\right) \label{eq:Koppa} - \end{aligned}\end{gathered}\end{equation} - \MPCMatch{with derivative (dropping arguments) - \begin{equation}\begin{gathered}\begin{aligned} - \hat{\Koppa}_{\prd}^{\mu} & = (\aboveMin \hNrm_{\EndStp} \MPCmin_{\prd} (\PDVCoverc_{\prd}^{T})^{1/(1-\CRRA)})^{-1}e^{\mu_{\prd}}\left(\bar{\vInv}^{m}_{\prd}-\vInv^{m}_{\prd}\right) \label{eq:KoppaPrime} - % \\ & = (\aboveMin \hNrm_{\EndStp} \MPCmin_{\prd})^{-1}e^{\mu_{\prd}}\left((\PDVCoverc_{\prd}^{T})^{1/(1-\CRRA)}\MPCmin_{\prd}-\left((1-\CRRA)\vFunc_{\prd}(m_{\prd})\right)^{-1+1/(1-\CRRA)}\vFunc_{\prd}^{m}(m_{\prd})\right) \notag - \end{aligned}\end{gathered}\end{equation}}{} - and an upper-case version of the $\chiFunc$ equation in \eqref{eq:chi}: - \begin{equation}\begin{gathered}\begin{aligned} - \hat{\Chi}_{\prd}(\mu_{\prd}) & = \log \left(\frac{1-\hat{\Koppa}_{\prd}(\mu_{\prd})}{\hat{\Koppa}_{\prd}(\mu_{\prd})}\right) - \\ & = \log \left(1/\hat{\Koppa}_{\prd}(\mu_{\prd})-1\right) \label{eq:Chi} - \end{aligned}\end{gathered}\end{equation} - \MPCMatch{with corresponding derivative - \begin{equation}\begin{gathered}\begin{aligned} - \hat{\Chi}_{\prd}^{\mu} & = \left(\frac{-\hat{\Koppa}_{\prd}^{\mu}/\hat{\Koppa}_{\prd}^{2}}{1/\hat{\Koppa}_{\prd}-1}\right) - \end{aligned}\end{gathered}\end{equation}}{} - and if we approximate these objects then invert them (as above with - the $\Hi{\koppa}$ and $\Hi{\chiFunc}$ functions) we obtain a very high-quality - approximation to our inverted value function at the same points for - which we have our approximated value function: - \begin{equation}\begin{gathered}\begin{aligned} - \hat{\vInv}_{\prd} & = \bar{\vInv}_{\prd}-\overbrace{\left(\frac{1}{1+\exp(\hat{\Chi}_{\prd})}\right)}^{=\hat{\Koppa}_{\prd}} \aboveMin \hNrm_{\EndStp} \MPCmin_{\prd} (\PDVCoverc_{\prd}^{T})^{1/(1-\CRRA) } - \end{aligned}\end{gathered}\end{equation} - from which we obtain our approximation to the value function\MPCMatch{ and its derivatives~}~as \hypertarget{vHatFunc}{} - \begin{equation}\begin{gathered}\begin{aligned} - \hat{\vFunc}_{\prd} & = \uFunc(\hat{\vInv}_{\prd}) - \\ \hat{\vFunc}^{m}_{\prd} & = \uFunc^{c}(\hat{\vInv}_{\prd}) \hat{\vInv}^{m} - \MPCMatch{\\ \hat{\vFunc}^{mm}_{\prd} & = \uFunc^{c{c}}(\hat{\vInv}_{\prd}) (\hat{\vInv}^{m})^{2} + \uFunc^{c}(\hat{\vInv}_{\prd})\hat{\vInv}^{mm}}{} - . - \end{aligned}\end{gathered}\end{equation} - - Although a linear interpolation that matches the level of $\vInv$ at the gridpoints is simple, a Hermite interpolation that matches both the level and the derivative of the $\bar{\vInv}_{\prd}$ function at the gridpoints has the considerable virtue that the $\bar{\vFunc}_{\prd}$ derived from it numerically satisfies the envelope theorem at each of the gridpoints for which the problem has been solved. - - \MPCMatch{If we use the double-derivative calculated above to produce a higher-order Hermite polynomial, our approximation will also match - marginal propensity to consume at the gridpoints; this would - guarantee that the consumption function generated from the value - function would match both the level of consumption and the - marginal propensity to consume at the gridpoints; the numerical - differences between the newly constructed consumption function and - the highly accurate one constructed earlier would be negligible - within the grid.}{} - - -\hypertarget{refinement-a-tighter-upper-bound}{} -\subsection{Refinement: A Tighter Upper Bound} - \cite{BufferStockTheory} derives an upper limit $\MPCmax_{\prd}$ for the MPC as $m_{\prd}$ - approaches its lower bound. Using this - fact plus the strict concavity of the consumption function yields the - proposition that - \begin{equation}\begin{gathered}\begin{aligned} - \cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd}) & < \MPCmax_{\prd} \aboveMin \mNrm_{\prd}. - \end{aligned}\end{gathered}\end{equation} - - The solution method described above does not guarantee that - approximated consumption will respect this constraint between gridpoints, and a failure to - respect the constraint can occasionally cause computational problems in solving - or simulating the model. Here, we - describe a method for constructing an approximation that always - satisfies the constraint. - - \begin{comment} % Old text needs to be revised or eliminated - That is, the realist's consumption function is bounded from above by both - the \textit{unconstrained} optimist's problem already treated, as well as - by the \textit{constrained} optimist's problem, which is a 45 degree line - originating from $\ushort{m}_{\prd}$ on the $m$-axis, as shown in - Figure~\ref{fig:IntExpFOCInvPesReaOptNeed45Plot}. The same is true for - the value function, as illustrated in Figure - \ref{fig:IntExpFOCInvPesReaOptNeed45ValuePlot}. - - \hypertarget{IntExpFOCInvPesReaOptNeed45Plot}{} - \begin{figure} - \includegraphics[width=6in]{./Figures/IntExpFOCInvPesReaOptNeed45Plot} - \caption{45 Degree Line as Another Upper Bound} - \label{fig:IntExpFOCInvPesReaOptNeed45Plot} - \end{figure} - - \hypertarget{IntExpFOCInvPesReaOptNeed45ValuePlot}{} - \begin{figure} - \includegraphics[width=6in]{./Figures/IntExpFOCInvPesReaOptNeed45ValuePlot} - \caption{A Constrained Optimist's Value Function as Another Upper Bound} - \label{fig:IntExpFOCInvPesReaOptNeed45ValuePlot} - \end{figure} - - \end{comment} - - \newcommand{\mtCusp}{\ensuremath{\mNrm_{\prd}^{\#}}} - % \newcommand{\aboveMin \mtCusp}{\ensuremath{\aboveMin \mNrm_{\prd}^{\#}}} - - Defining $\mtCusp$ as the `cusp' point where the two upper bounds - intersect: - \begin{equation*}\begin{gathered}\begin{aligned} - \left(\aboveMin \mtCusp+\aboveMin \hNrm_{\EndStp}\right)\MPCmin_{\prd} & = \MPCmax_{\prd} \aboveMin \mtCusp \\ - \aboveMin \mtCusp & = \frac{\MPCmin_{\prd}\aboveMin \hNrm_{\EndStp}}{(1-\MPCmin_{\prd})\MPCmax_{\prd}} \\ - \mtCusp & = \frac{\MPCmin_{\prd}\hNrm_{\EndStp}-\hEndMin_{\EndStp}}{(1-\MPCmin_{\prd})\MPCmax_{\prd}}, - \end{aligned}\end{gathered}\end{equation*} - we want to construct a consumption function for $m_{\prd} \in (\ushort{m}_{\prd}, \mtCusp]$ that respects the - tighter upper bound: - \begin{center} - \begin{tabular}{rcl} - $ \aboveMin \mNrm_{\prd} \MPCmin_{\prd} < $ & $ \cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd}) $ $< \MPCmax_{\prd} \aboveMin \mNrm_{\prd} $ - % \\ $-\aboveMin \mNrm_{\prd} \MPCmin_{\prd} > $ & $ -\cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd}) $ & $> -\aboveMin \mNrm_{\prd} $ - \\ $ \aboveMin \mNrm_{\prd}(\MPCmax_{\prd}- \MPCmin_{\prd}) > $ & $ \MPCmax_{\prd} \aboveMin \mNrm_{\prd}-\cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd}) $ & $> 0$ - \\ $1 > $ & $ \left(\frac{\MPCmax_{\prd} \aboveMin \mNrm_{\prd}-\cFunc_{\prd}(\ushort{m}_{\prd}+\aboveMin \mNrm_{\prd})}{\aboveMin \mNrm_{\prd}(\MPCmax_{\prd}- \MPCmin_{\prd})}\right) $ & $> 0$. - \end{tabular} - \end{center} - - Again defining $\mu_{\prd} =\log \aboveMin \mNrm_{\prd}$, the object in the middle of the inequality is - \begin{equation*}\begin{gathered}\begin{aligned} - \Lo{\koppa}_{\prd}(\mu_{\prd}) & \equiv \frac{\MPCmax_{\prd}-\cFunc_{\prd}(\ushort{m}_{\prd}+e^{\mu_{\prd}})e^{-\mu_{\prd}}}{\MPCmax_{\prd}-\MPCmin_{\prd}} \label{eq:koppaL} - \MPCMatch{\\ \Lo{\koppa}^{\mu}_{\prd}(\mu_{\prd}) & = \frac{\cFunc_{\prd}(\ushort{m}_{\prd}+e^{\mu_{\prd}})e^{-\mu_{\prd}}-\MPCFunc_{\prd}^{m}(\ushort{m}_{\prd}+e^{\mu_{\prd}})}{\MPCmax_{\prd}-\MPCmin_{\prd}}}{} . - \end{aligned}\end{gathered}\end{equation*} - - As $m_{\prd}$ approaches - $-\ushort{m}_{\prd}$, $\Lo{\koppa}_{\prd}(\mu_{\prd})$ converges to zero, while as $m_{\prd}$ - approaches $+\infty$, $\Lo{\koppa}_{\prd}(\mu_{\prd})$ approaches $1$. - - As before, we can derive an approximated consumption function; call it $\Aprx{\Lo{\cFunc}}_{\prd}$. This function will clearly do a better job approximating the consumption function for low values of $\mNrm_{\prd}$ while the previous approximation will perform better for high values of $\mNrm_{\prd}$. - - For middling values of $\mNrm$ it is not clear which of these functions will perform better. However, an alternative is available which performs well. Define the highest gridpoint below $\mtCusp$ as $\bar{\check{\mNrm}}_{\prd}^{\#}$ and the lowest gridpoint above $\mtCusp$ as $\ushort{\hat{\mNrm}}_{\prd}^{\#}$. Then there will be a unique interpolating polynomial that matches the level and slope of the consumption function at these two points. Call this function $\tilde{\cFunc}_{\prd}(\mNrm)$. - - Using indicator functions that are zero everywhere except for specified intervals, - \begin{equation*}\begin{gathered}\begin{aligned} - \vctr{1}_{\text{Lo}}(\mNrm) & = 1 \text{~if $ \mNrm \leq \bar{\check{\mNrm}}_{\prd}^{\#} \phantom{< \mNrm < \ushort{\hat{\mNrm}}_{\prd}^{\#} \leq \mNrm}$} - \\ \vctr{1}_{\text{Mid}}(\mNrm) & = 1 \text{~if $\phantom{ \mNrm \leq}~ \bar{\check{\mNrm}}_{\prd}^{\#} < \mNrm < \ushort{\hat{\mNrm}}_{\prd}^{\#} \phantom{\leq \mNrm}$} - \\ \vctr{1}_{\text{Hi}}(\mNrm) & = 1 \text{~if $\phantom{ \mNrm \leq ~\bar{\check{\mNrm}}_{\prd}^{\#} < \mNrm < } \ushort{\hat{\mNrm}}_{\prd}^{\#} \leq \mNrm$} - \end{aligned}\end{gathered}\end{equation*} - we can define a well-behaved approximating consumption function - \begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\cFunc}_{\prd} & = \vctr{1}_{\text{Lo}} \Aprx{\Lo{\cFunc}}_{\prd} + \vctr{1}_{\text{Mid}} \Aprx{\tilde{\cFunc}}_{\prd}+\vctr{1}_{\text{Hi}} \Aprx{\Hi{\cFunc}}_{\prd}. - \end{aligned}\end{gathered}\end{equation} - - This just says that, for each interval, we use the approximation that - is most appropriate. The function is continuous and - once-differentiable everywhere, and is therefore well behaved for - computational purposes. - \begin{comment} - In practice, in our problem the difference due to this refinement is displayed in Figure \ref{fig:IntExpFOCInvPesReaOpt45GapPlot}. - \hypertarget{IntExpFOCInvPesReaOpt45GapPlot}{} - \begin{figure} - \includegraphics[width=6in]{./Figures/IntExpFOCInvPesReaOpt45GapPlot} - \caption{Difference Between $\Aprx{\Hi{\cFunc}}_{L, T-1}$ and $\Aprx{\Hi{\cFunc}}_{H,T-1}$ is Small} - \label{fig:IntExpFOCInvPesReaOpt45GapPlot} - \end{figure} - \end{comment} - - We now construct an upper-bound value function implied for a consumer whose spending behavior is consistent with the refined upper-bound consumption rule. - - For $\mNrm_{\prd} \geq \mNrm_{\prd}^{\#}$, this consumption rule is the same as before, - so the constructed upper-bound value function is also the same. However, for - values $\mNrm_{\prd} < \mNrm_{\prd}^{\#}$ matters are slightly more complicated. - - Start with the fact that at the cusp point, - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vFunc}_{\prd}(\mtCusp) & = \uFunc(\bar{\cNrm}_{\prd}(\mtCusp))\PDVCoverc_{\prd}^T \\ - & = \uFunc(\aboveMin \mtCusp \MPCmax_{\prd})\PDVCoverc_{\prd}^{T} - . - \end{aligned}\end{gathered}\end{equation*} - - But for \textit{all} $\mNrm_{\prd}$, - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vFunc}_{\prd}(\mNrm) & = \uFunc(\bar{\cNrm}_{\prd}(\mNrm))+ \bar{\vEnd}(\mNrm-\bar{\cNrm}_{\prd}(\mNrm)), - \end{aligned}\end{gathered}\end{equation*} - and we assume that for the consumer below the cusp point consumption is given by $\MPCmax \aboveMin \mNrm_{\prd}$ so for $\mNrm_{\prd}< \mtCusp$ - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vFunc}_{\prd}(\mNrm) & = \uFunc( \MPCmax_{\prd} \aboveMin \mNrm)+ \bar{\vEnd}((1-\MPCmax_{\prd})\aboveMin \mNrm), - \end{aligned}\end{gathered}\end{equation*} - which is easy to compute because $\bar{\vEnd}(\aNrm_{\prd}) = \DiscFac \bar{\vFunc}_{\prd+1}(\aNrm_{\prd}\RNrm+1)$ where $\bar{\vFunc}_{\prd}$ is as defined above because a consumer who ends the current period with assets exceeding the lower bound will not expect to be constrained next period. (Recall again that we are merely constructing an object that is guaranteed to be an \textit{upper bound} for the value that the `realist' consumer will experience.) At the gridpoints defined by the solution of the consumption problem can then construct - \begin{equation*}\begin{gathered}\begin{aligned} - \bar{\vInv}_{\prd}(\mNrm) & = ((1-\CRRA)\bar{\vFunc}_{\prd}(\mNrm))^{1/(1-\CRRA)} - \end{aligned}\end{gathered}\end{equation*} -\MPCMatch{and its derivatives}{} which yields the appropriate vector for constructing $\check{\Chi}$ and $\check{\Koppa}$. The rest of the procedure is analogous to that performed for the consumption rule and is thus omitted for brevity. - - -\hypertarget{extension-a-stochastic-interest-factor}{} -\subsection{Extension: A Stochastic Interest Factor} - - -Thus far we have assumed that the interest factor is constant at $\Rfree$. Extending the -previous derivations to allow for a perfectly forecastable time-varying interest factor $\Rfree_{\prd}$ -would be trivial. Allowing for a stochastic interest factor is less trivial. - - -The easiest case is where the interest factor is i.i.d., - \begin{equation}\begin{gathered}\begin{aligned} - \log \Risky_{t+n} & \sim & \Nrml(\rfree + \eprem - \sigma^{2}_{\risky}/2,\sigma^{2}_{\risky}) ~\forall~n>0 \label{eq:distRisky} - \end{aligned}\end{gathered}\end{equation} -where $\eprem$ is the risk premium and the $\sigma^{2}_{\risky}/2$ adjustment to the mean log return -guarantees that an increase in $\sigma^{2}_{\risky}$ constitutes a mean-preserving spread in the level of the return. - -This case is reasonably straightforward because \cite{merton:restat} and \cite{samuelson:portfolio} showed -that for a consumer without labor income (or with perfectly forecastable labor income) the consumption -function is linear, with an infinite-horizon MPC\footnote{See \handoutC{CRRA-RateRisk} for a derivation.} -\begin{equation}\begin{gathered}\begin{aligned} - \MPC & = 1- \left(\DiscFac \Ex_{\BegStp}[\Risky_{\prd+1}^{1-\CRRA}]\right)^{1/\CRRA} \label{eq:MPCExact} - \end{aligned}\end{gathered}\end{equation} -and in this case the previous analysis applies once we substitute this MPC for the one that characterizes -the perfect foresight problem without rate-of-return risk. - -The more realistic case where the interest factor has some serial correlation is more complex. We consider -the simplest case that captures the main features of empirical interest rate dynamics: An AR(1) process. Thus -the specification is -\begin{equation}\begin{gathered}\begin{aligned} - \risky_{\prd+1}-\risky & = (\risky_{\prd}-\risky) \gamma + \epsilon_{\prd+1} - \end{aligned}\end{gathered}\end{equation} -where $\risky$ is the long-run mean log interest factor, $0 < \gamma < 1$ is the AR(1) serial correlation -coefficient, and $\epsilon_{\prd+1}$ is the stochastic shock. - -The consumer's problem in this case now has two state variables, $\mNrm_{\prd}$ and $\risky_{\prd}$, and -is described by -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\prd}(m_{\prd},\risky_{\prd}) & = \max_{{c}_{\prd}} ~ \uFunc(c_{\prd})+ - \Ex_{\BegStp}[{\DiscFac}_{\prd+1}\PermGroFacAdjV{\vFunc}_{\prd+1}(m_{\prd+1},\risky_{\prd+1})] \label{vNormedRisky} - \\ & \text{s.t.} \nonumber \\ - a_{\prd} & = m_{\prd}-c_{\prd} \nonumber - \\ \risky_{\prd+1}-\risky & = (\risky_{\prd}-\risky)\gamma + \epsilon_{\prd+1} \notag - \\ \Risky_{\prd+1} & = \exp(\risky_{\prd+1}) \notag - \\ m_{\prd+1} & = \underbrace{\left(\Risky_{\prd+1}/\PermGroFac_{\prd+1}\right)}_{\equiv \Rprod_{\prd+1}}a_{\prd}+\TranShkEmp_{\prd+1} \nonumber. - \end{aligned}\end{gathered}\end{equation} - -% Kiichi: I will need you to read the literature and figure out how exactly we want to choose the Markov points and transition probabilities. -% When done, you will fill in the [how] text below. - -We approximate the AR(1) process by a Markov transition matrix using standard techniques. The stochastic interest factor is allowed to take -on 11 values centered around the steady-state value $\risky$. Given this Markov transition matrix, \textit{conditional} on the Markov AR(1) state the consumption functions for the `optimist' and the `pessimist' will still be linear, -with identical MPC's that are computed numerically. Given these MPC's, the (conditional) realist's consumption function can be computed for each Markov state, and the converged consumption rules constitute the solution contingent on the dynamics of the stochastic -interest rate process. - -In principle, this refinement should be combined with the previous one; -further exposition of this combination is omitted here because no new -insights spring from the combination of the two techniques. - - - -\hypertarget{imposing-artificial-borrowing-constraints}{} -\subsection{Imposing `Artificial' Borrowing Constraints} - -Optimization problems often come with additional constraints that must -be satisfied. Particularly common is an `artificial' liquidity constraint that -prevents the consumer's net worth from falling below some value, often -zero.\footnote{The word artificial is chosen only because of its clarity in distinguishing - this from the case of the `natural' borrowing constraint examined above; no derogation is - intended -- constraints of this kind certainly exist in the real world.} The problem then becomes -\begin{equation*}\begin{gathered}\begin{aligned} - \vFunc_{\prd-1}(m_{\prd-1}) & = \max_{\cNrm_{\prd-1}} ~~ \uFunc(c_{\prd-1}) + \Ex_{\prd-1} [\DiscFac \PermGroFacAdjV{\vFunc}_{\cntn(T)}(m_{\prd})] \label{eq:ConstrArt} - \\ & \mbox{s.t.} \nonumber - \\ a_{\prd-1} & = m_{\prd-1} - c_{\prd-1} - \\ m_{\prd} & = \RNrm_{\prd} a_{\prd-1} + \TranShkEmp_{\prd} - \\ a_{\prd-1} & \geq 0 . - \end{aligned}\end{gathered}\end{equation*} - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Constraint binds whenever you would like to consume more than current resources.}}{} - -By definition, the constraint will bind if the unconstrained consumer -would choose a level of spending that would violate the constraint. -Here, that means that the constraint binds if the $c_{\prd-1}$ -that satisfies the unconstrained FOC -\begin{equation}\begin{gathered}\begin{aligned} - c_{\prd-1}^{-\CRRA} & = \vFunc^{a}_{({\prd-1})_\cntn}(m_{\prd-1}-c_{\prd-1}) \label{eq:cUnc} - \end{aligned}\end{gathered}\end{equation} -is greater than $m_{\prd-1}$. Call $\grave{\cFunc}^{\ast}_{\prd-1}$ the approximated function -returning the level of $c_{\prd-1}$ that satisfies \eqref{eq:cUnc}. -Then the approximated constrained optimal consumption function will be - \begin{equation}\begin{gathered}\begin{aligned} - \grave{\cFunc}_{\prd-1}(m_{\prd-1}) & = \min[{m}_{\prd-1},\grave{\cFunc}^{\ast}_{\prd-1}(m_{\prd-1})] \label{eq:LiqCons}. - \end{aligned}\end{gathered}\end{equation} - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Read this carefully - before class. Intuition: consider discounted mv of saving zero. If - consume everything and get the same $\uFunc^{c}$, then happy. If - consumed $\TranShkEmp$ less, mv of saving would be $> \uFunc^{c}(c).$}}{} - -The introduction of the constraint also introduces a sharp -nonlinearity in all of the functions at the point where the constraint -begins to bind. As a result, to get solutions that are anywhere close -to numerically accurate it is useful to augment the grid of values of -the state variable to include the exact value at which the constraint -ceases to bind. Fortunately, this is easy to calculate. We know that -when the constraint is binding the consumer is saving nothing, which -yields marginal value of $\vFunc^{a}_{({\prd-1})_\cntn}(0)$. Further, when the -constraint is binding, $c_{\prd-1} = m_{\prd-1}$. Thus, the largest -value of consumption for which the constraint is binding will be the -point for which the marginal utility of consumption is exactly equal -to the (expected, discounted) marginal value of saving 0. We know -this because the marginal utility of consumption is a downward-sloping -function and so if the consumer were to consume $\tinyAmount$ more, -the marginal utility of that extra consumption would be \textit{below} -the (discounted, expected) marginal utility of saving, and thus the -consumer would engage in positive saving and the constraint would no -longer be binding. Thus the level of $m_{\prd-1}$ at which the -constraint stops binding is:\footnote{The logic here repeats an insight from \cite{deatonLiqConstr}.} -\begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(m_{\prd-1}) & = \vFunc^{a}_{({\prd-1})_\cntn}(0) \nonumber \\ - m_{\prd-1} & = (\vFunc^{a}_{({\prd-1})_\cntn}(0))^{(-1/\CRRA)} \nonumber - \\ & = \cFunc_{({\prd-1})_\cntn}(0). \label{eq:LCbindsTm1} - \end{aligned}\end{gathered}\end{equation} - -\hypertarget{cVScCon}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/cVScCon} - \caption{Constrained (solid) and Unconstrained (dashed) Consumption} - \label{fig:cVScCon} -\end{figure} - -The constrained problem is solved in section ``Artifical Borrowing Constraint'' -of the notebook, where the variable -\texttt{constrained} is set to be a boolean type object. If the value of \texttt{constrained} -is true, then the constraint is binding and their consumption behavior is computed to match -\eqref{eq:LiqCons}. The resulting consumption rule is shown in Figure \ref{fig:cVScCon}. For comparison purposes, -the approximate consumption rule from Figure \ref{fig:cVScCon} is -reproduced here as the solid line; this is accomplished by setting the boolean value -of \texttt{constrained} to false. - -The presence of the liquidity -constraint requires three changes to the procedures outlined above: -\begin{enumerate} -\item We redefine - $\hEndMin_{\EndStp}$, which now is the PDV of receiving - $\TranShkEmp_{\prd+1}=\TranShkEmpMin$ next period and - $\TranShkEmp_{t+n}=0~\forall~n>1$ -- that is, the pessimist believes he - will receive nothing beyond period $t+1$ -\item We augment the end-of-period \code{aVec} with zero and with a point with a small positive value so that the generated - {\mVec} will the binding point $\mNrm^{\#}$ and a point just above it (so that we can better capture the curvature - around that point) -\item We redefine the optimal consumption rule as - in equation (\ref{eq:LiqCons}). This ensures that the - liquidity-constrained `realist' will consume more than the redefined - `pessimist,' so that we will have $\koppa$ still between $0$ and $1$ - and the `method of moderation' will proceed smoothly. -\end{enumerate} - -As expected, the liquidity constraint only causes a divergence between the two functions at the point where the optimal unconstrained consumption rule runs into the 45 degree line. - -\hypertarget{recursion}{} -\section{Recursion}\label{sec:recursion} - -\hypertarget{theory}{} -\subsection{Theory} -Before we solve for periods earlier than $\trmT-1$, we assume for -convenience that in each such period a liquidity constraint exists of -the kind discussed above, preventing $c$ from exceeding $m$. This -simplifies things a bit because now we can always consider an -\code{aVec} that starts with zero as its smallest element. - -Recall now equations~(\ref{eq:vEndPrimeTm1}) and (\ref{eq:upEqbetaOp}): -\begin{equation*}\begin{gathered}\begin{aligned} - \vPEndStp(a_{\prd}) & = \Ex_{\BegStp}[\DiscFac \Rfree \PermGroFac_{\prd+1}^{-\CRRA} - \uFunc^{c}(\cFunc_{\prd+1}(\RNrm_{\prd+1} a_{\prd}+{\TranShkEmp}_{\prd+1}))] - \\\uFunc^{c}(c_{\prd}) & = \vEndStp^{a}(m_{\prd}-c_{\prd}). - \end{aligned}\end{gathered}\end{equation*} -Assuming that the problem has been solved up to period $t+1$ (and thus assuming that we have an approximated $\Aprx{\cFunc}_{\prd+1}(m_{\prd+1})$), our solution method essentially involves using these two equations in succession to work back progressively from period $\trmT-1$ to the beginning of life. Stated generally, the method is as follows. (Here, we use the original, rather than the ``refined,'' method for constructing consumption functions; the generalization of the algorithm below to use the refined method presents no difficulties.) - -\begin{enumerate} - \ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Point out that we - are defining $\vEndStp^{a}$ here by the literal summation - operation \eqref{eq:vEndeq}.}}{} - -\item For the grid of values $a_{t,i}$ in \texttt{aVec\_eee}, numerically calculate the values - of $\cFunc_{\overline{t}}(a_{t,i})$ and $\cFunc_{\overline{t}}^{a}(a_{t,i})$, - \begin{equation}\begin{gathered}\begin{aligned} - \cFunc_{\overline{t},i} & = \left(\vEndStp^{a}(a_{t,i})\right)^{-1/\CRRA}, - \\ & = \left(\DiscFac \Ex_{\BegStp} \left[\Rfree \PermGroFac_{\prd+1}^{-\CRRA}(\grave{\cFunc}_{\prd+1}(\RNrm_{\prd+1} a_{t,i} + {\TranShkEmp}_{\prd+1}))^{-\CRRA}\right]\right)^{-1/\CRRA}, \label{eq:vEndeq} - \MPCMatch{\\ \cFunc^{a}_{\overline{t},i} & = -(1/\CRRA)\left(\vEndStp^{a}(a_{t,i})\right)^{-1-1/\CRRA} \vEndStp^{a{a}}(\aNrm_{t,i}),}{} - \end{aligned}\end{gathered}\end{equation} -generating vectors of values $\vctr{\cFunc}_{\prd}$\MPCMatch{ and $\vctr{\cFunc}^{a}_{\overline{t}}$.}{.} - -\item Construct a corresponding vector of values of $\vctr{m}_{\prd}=\vctr{\cNrm}_{\prd}+\vctr{\aNrm}_{\prd}$\MPCMatch{; similarly construct a corresponding list of MPC's $\vctr{\MPC}_{\prd}$ using equation \eqref{eq:MPCfromMPTHC}.}{.} - -\item Construct a corresponding vector $\vctr{\mu_{\prd}}$, the levels\MPCMatch{ and first derivatives}{} of $\vctr{\koppa}_{\prd}$, and the levels\MPCMatch{ and first derivatives}{} of $\vctr{\chi}_{\prd}$. - -\item Construct an interpolating approximation $\Aprx{\chi}_{\prd}$ that\MPCMatch{ smoothly matches both the level and the slope}{the level} at those points. - -\item If we are to approximate the value function, construct a corresponding list of values of $\vctr{v}_{\prd}$, the levels\MPCMatch{ and first derivatives of $\vctr{\Koppa}_{\prd}$,}{,} and the levels\MPCMatch{ and first derivatives}{} of $\hat{\vctr{\Chi}}_{\prd}$; and construct an interpolating approximation function $\hat{\Chi}_{\prd}$ that matches those points. -\end{enumerate} - -With $\Aprx{\chi}_{\prd}$ in hand, our approximate consumption function -is computed directly from the appropriate substitutions in \eqref{eq:cFuncHi} -and related equations. With this consumption -rule in hand, we can continue the backwards recursion to period $t-1$ -and so on back to the beginning of life. - -Note that this loop does not contain an item for constructing $\hat{\vFunc}_{\prd}^{a}(m_{\prd})$. This is because with $\Aprx{\Hi{\cFunc}}_{\prd}(m_{\prd})$ in hand, we simply \textit{define} $\hat{\vFunc}^{m}_{\prd}(m_{\prd}) = \uFunc^{c}(\Aprx{\Hi{\cFunc}}_{\prd}(m_{\prd}))$ so there is no need to construct interpolating approximations - the function arises `free' (or nearly so) from our constructed $\Aprx{\Hi{\cFunc}}_{\prd}(m_{\prd})$ via the usual envelope result (cf.\ \eqref{eq:envelope}). - diff --git a/docs/sec_multiple-control-variables-input-clean.tex b/docs/sec_multiple-control-variables-input-clean.tex deleted file mode 100644 index d35c2b973..000000000 --- a/docs/sec_multiple-control-variables-input-clean.tex +++ /dev/null @@ -1,280 +0,0 @@ -\hypertarget{multiple-control-variables}{} -\section{Multiple Control Variables}\label{sec:multiple-control-variables} -We now consider how to solve problems with multiple control variables. - -\subsection{Theory}\label{subsec:MCTheory} - -The new portfolio-share control variable is captured by the archaic Greek character \href{https://en.wikipedia.org/wiki/Stigma_(ligature)}{`stigma'}; it represents the share $\Shr$ of their disposable assets the agent invests in the risky asset (conventionally, the stock market). Designating the return factor for the risky asset as $\Risky$ and the share of the portfolio invested in $\Risky$ as $\Shr$, the realized portfolio rate of return $\Rport$ as a function of the share $\Shr$ is: -\begin{equation}\begin{gathered}\begin{aligned} - \Rport(\Shr) &= R+(\Risky-R)\Shr \label{eq:Shr}. - \end{aligned}\end{gathered}\end{equation} -If we imagine the portfolio share decision as being made simultaneously with the $c$ decision, the traditional way of writing the problem is (substituting the budget constraint): -\begin{equation}\begin{gathered}\begin{aligned} - v_{t}(m) & = \max_{\{\cFunc,\Shr\}} ~~ \uFunc(c) + \ExMidPrd[\beta v_{t+1}((m-c)\Rport(\Shr) + {\TranShkEmp}_{t+1})] \label{eq:Bellmanundated} - \end{aligned}\end{gathered}\end{equation} -where we have deliberately omitted the {period}-designating subscripts for $\Shr$ and the return factors to highlight the point that, once the consumption and $\Shr$ decisions have been made, it makes no difference to this equation whether the risky return factor $\Risky$ is revealed a nanosecond before the end of the current {period} or a nanosecond after the beginning of the successor {period}. - -%But as a notational choice, there is good reason to designate the realization as happening in $t+1$: A standard way of motivating stochastic returns and wages is to attribute them to ``productivity shocks'' and to assume that the productivity shock associated with a date is the one that affects the production function for that date. - -%\renewcommand{\prd}{t} % For the rest of the doc, use generic t vs t+1 - -\begin{comment} - Designating the return factor for the risky asset as $\Risky_{t+1}$, and using $\Shr_{t}$ to represent the proportion of the portfolio invested in this asset before the return is realized after the beginning of $t+1$, corresponding to an assumption that the consumer cannot be `net short' and cannot issue net equity), the overall return on the consumer's portfolio between $t$ and $t+1$ will be: - \begin{equation}\begin{gathered}\begin{aligned} - \Rport_{t+1} & = R(1-\Shr_{t}) + \Risky_{t+1}\Shr_{t} \label{eq:return1} - \\ & = R + (\Risky_{t+1}-R) \Shr_{t} %\label{eq:return2} - \end{aligned}\end{gathered}\end{equation} - and the maximization problem is - \begin{equation*}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{\{{c}_{t},\Shr_{t}\}} ~~ \uFunc(c_{t}) + \beta - \ExEndStp[{v}_{t+1}(m_{t+1})] - \\ & \text{s.t.} \nonumber - \\ \Rport_{t+1} & = R + (\Risky_{t+1}-R) \Shr_{t} - \\ m_{t+1} & = (m_{t}-c_{t})\Rport_{t+1} + \TranShkEmp_{t+1} - \\ 0 \leq & \Shr_{t} \leq 1, \label{eq:noshorts} - \end{aligned}\end{gathered}\end{equation*} - - The first order condition with respect to $c_{t}$ is almost identical to that in the single-control problem, equation (\ref{eq:upceqEvtp1}); the only difference is that the nonstochastic interest factor $R$ is now replaced by the portfolio return ${\Rport}_{t+1}$, - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \beta \ExEndStp [{\Rport}_{t+1} v^{m}_{t+1}(m_{t+1})] \label{eq:valfuncFOCRtilde}, - \end{aligned}\end{gathered}\end{equation} - and the Envelope theorem derivation remains the same, yielding the Euler equation for consumption - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \ExEndStp[\beta {\Rport}_{t+1} \uFunc^{c}(c_{t+1})]. \label{eq:EulercRiskyR} - \end{aligned}\end{gathered}\end{equation} - - The first order condition with respect to the risky portfolio share is - \begin{equation}\begin{gathered}\begin{aligned} - 0 & = \ExEndStp[{v}_{\MidStpNxt}^{m}(m_{t+1})(\Risky_{t+1}-R){a}_{t}] \notag - \\ & = \ExEndStp\left[\uFunc^{c}\left(\cFunc_{t+1}(m_{t+1})\right)(\Risky_{t+1}-R)\right]{a}_{t} - \\ & = \ExEndStp\left[\uFunc^{c}\left(\cFunc_{t+1}(m_{t+1})\right)(\Risky_{t+1}-R)\right], \label{eq:FOCw} - \end{aligned}\end{gathered}\end{equation} - where the last line follows because $0/a_{t}=0$. - - As before, we define $\vEnd$ as a function that yields the expected $t+1$ value of ending period $t$ with assets $a_{t}$. However, now that there are two control variables, the expectation must be defined as a function of the chosen values of both of those variables, because expected end-of-period value will depend not just on how much the agent saves, but also on how the saved assets are allocated between the risky and riskless assets. Thus we define - \begin{equation*}\begin{gathered}\begin{aligned} - \vMidStp(a_{t},\Shr_{t}) & = \beta v_{\arvlstepShr}(m_{t+1}) - \end{aligned}\end{gathered}\end{equation*} - which has derivatives - \begin{equation}\begin{gathered}\begin{aligned} - \vMidStp^a & = \ExEndStp[\beta {\Rport}_{t+1}v_{t+1}^{m}(m_{t+1})] = \ExEndStp[\beta {\Rport}_{t+1}{\uFunc}_{t+1}^{c}(\cFunc_{t+1}(m_{t+1}))] - \end{aligned}\end{gathered}\end{equation} - \begin{equation}\begin{gathered}\begin{aligned} - \vMidStp^{\Shr} & = \ExEndStp[\beta (\Risky_{t+1}-R){v}_{t+1}^{m}(m_{t+1}) ]a_{t} = \ExEndStp[\beta (\Risky_{t+1}-R){\uFunc}_{t+1}^{c}(\cFunc_{t+1}(m_{t+1})) ]a_{t} \notag - \end{aligned}\end{gathered}\end{equation} - implying that the first order conditions (\ref{eq:EulercRiskyR}) and - (\ref{eq:FOCw}) can be rewritten - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \vMidStp^{a}(m_{t}-c_{t},\Shr_{t}) \label{eq:FOCc} - \end{aligned}\end{gathered}\end{equation} - and - \begin{equation}\begin{gathered}\begin{aligned} - 0 & = v^{\Shr}_{\vMidStpStgShr}(a_{t},\Shr_{t}). \label{eq:FOCShr} - \end{aligned}\end{gathered}\end{equation} -\end{comment} - -\hypertarget{stages-within-a-period}{} -\subsection{{Stage}s Within a {Period}}\label{subsec:stageswithin} - -Solving simultaneously for the two variables $\Shr$ and $c$ can be computationally challenging. Fortunately, there is a simple solution: Break the problem into two `{stage}s'\footnote{cite mnw and ael papers.} -which we will call the `consumption {stage} $\cFunc$' and the `portfolio {stage} $\Shr$.' These could come in either order in the {period}: We designate the `portfolio choice first, then consumption' version by $[\Shr,\cFunc]$ and the `consumption choice first, then portfolio' as $[\cFunc,\Shr]$. - -In a problem with multiple {stages}, if we want to refer to a sub-{step} of a particular {stage} -- say, the {\Arrival} {stage} of the portfolio {stage} -- we simply add a {stage}-indicator subscript (in square brackets) to the notation we have been using until now. That is, the {\Arrival} {stage} of the portfolio problem would be $v_{_\arvl[\Shr]}$. - -\hypertarget{revised-consumers-problem}{} -\subsubsection{The (Revised) Consumer's Problem}\label{subsubsec:revised-consumers-problem} - -A slight modification to the consumer's problem specified earlier is necessary to make the {stage}s of the problem completely modular. The difficulty with the earlier formulation is that it assumed that asset returns occurred in the middle {step} of the consumption problem. Our revised version of the consumption problem takes as its input state the amount of bank balances that have resulted from any prior portfolio decision. The problem is therefore: - \begin{equation}\begin{gathered}\begin{aligned} - v_{[\cFunc]}(m) & = \max_{c} ~~ \uFunc(c)+ v_{[\cFunc]_{_\cntn}}(\underbrace{m-c}_{a}) -\\ v_{_\arvl[\cFunc]}(b) & = \Ex_{_\arvl[\cFunc]}\left[v_{[c]}(\overbrace{b+\TranShkEmp}^{m})\right] \label{eq:vBalances} - \end{aligned}\end{gathered}\end{equation} - - -\hypertarget{subsubsec:investors-problem}{} -\subsubsection{The Investor's Problem}\label{subsubsec:investors-problem} - -Consider the standalone problem of an `investor' whose continuation-value function $v_{[\Shr]_\cntn}$ depends on how much wealth $\acute{w}$ they end up after the realization of the stochastic $\Risky$ return. The expected value that the investor will obtain from any combination of initial $w$ and their optimal choice of the portfolio share $\Shr$ is the expectation of the continuation-value function over the wealth that results from the portfolio choice: -\begin{equation}\begin{gathered}\begin{aligned} - v_{_\arvl[\Shr]}(w) = & \max_{\Shr}~ \Ex_{\BegStp[\Shr]}\left[v_{[\Shr]_{_\cntn}}\overbrace{\left(\Rport(\Shr){w}\right)}^{\acute{w}}\right] \label{eq:vMidStpShr} - \end{aligned}\end{gathered}\end{equation} -where we have omitted any {period} designator like $t$ for the {period} in which this problem is solved because, with the continuation-value function defined already as $v_{[\Shr]_\cntn}(\acute{w})$, the problem is self-contained. The solution to this problem will yield an optimal $\Shr$ decision rule $\optml{\Shr}(w).$ Finally, we can specify the value of an investor `arriving' with $w$ as the expected value that will be obtained when the investor invests optimally, generating the \textit{ex ante} optimal stochastic portfolio return factor $\optml{\Rport}(w)=\Rport(\optml{\Shr}(w))$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{[\Shr]{_\arvl}}(w) = & \Ex_{_\arvl}[v_{[\Shr]_\cntn}](\overbrace{\optml{\Rport}(w)}^{\acute{w}})]. -\end{aligned}\end{gathered}\end{equation} - -The reward for all this notational investment is that it is now clear that \emph{exactly the same code} for solving the portfolio share problem can be used in two distinct problems: a `beginning-of-period-returns' model and an `end-of-period-returns' model. - -\hypertarget{beginning-returns}{} -\subsubsection{The `beginning-of-period returns' Problem}\label{subsubsec:beginning-returns} -The beginning-returns problem effectively just inserts a portfolio choice that happens at a {stage} immediately before the consumption {stage} in the optimal consumption problem described in \eqref{eq:vBalances}, for which we had a beginning-of-{stage} value function $v_{_\arvl[\cFunc]}(b)$. The agent makes their portfolio share decision within the {stage} but (obviously) before the risky returns $\Risky$ for the {period} have been realized. So the problem's portfolio-choice {stage} also takes $k$ as its initial state and solves the investor's problem outlined in section~\ref{subsubsec:investors-problem} above: -\begin{equation}\begin{gathered}\begin{aligned} - v_{[\Shr]_\arvl}(k) & = \Ex_{[\Shr]_\arvl}[v_{[\Shr]_{_\cntn}}(\underbrace{k\optml{\Rport}}_{b})] -\\v_{[\Shr]_\cntn}(b) & = v_{_\arvl[\cFunc]}(b) - \end{aligned}\end{gathered}\end{equation} - -Since in this setup bank balances have been determined before the consumption problems starts, we need to rewrite the consumption {stage} as a function of bank balances that will have resulted from the portfolio investment $b$, combined with the income shocks $\TranShkEmp$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{_\arvl[\cFunc]}(b) = & \max_{\cFunc}~ \uFunc(c) + \Ex_{_\arvl[\cFunc]}[v_{[\cFunc]_\cntn}(\underbrace{\overbrace{b+\TranShkEmp}^{m}-c}_{a})] - \end{aligned}\end{gathered}\end{equation} -where, because the consumption {stage} is the last {stage} in the {period}, the continuatibon-value function for the $\cFunc$ {stage} is just the continuation-value function for the period as a whole: -\begin{equation}\begin{gathered}\begin{aligned} - v_{[\cFunc]_\cntn}(a) = & v_{t_\cntn}(a) - \end{aligned}\end{gathered}\end{equation} -(and recall that $v_{t_\cntn}(a)$ is exogenously provided as an input to the {period}'s problem via the transition equation assumed earlier: $v_{t_\cntn}(a)=\beta v_{_\arvl(t+1)}(a)$). - -\subsubsection{The `end-of-period-returns' Problem} - -If the portfolio share and risky returns are realized at the end of the {period}, we need to move the portfolio choice {stage} to immediately before the point at which returns are realized (and after the $\cFunc$ choice has been made). The problem is the same as the portfolio problem defined above, except that the input for the investment {stage} is the assets remaining after the consumption choice: $a$. So, the portfolio {stage} of the problem is -\begin{equation}\begin{gathered}\begin{aligned} - v_{_\arvl[\Shr]}(a) = & \Ex_{_\arvl[\Shr]}[v_{[\Shr]_{_\cntn}}(\underbrace{a\optml{\Rport}}_{k})] %= \Ex_{[\cFunc]_\arvl}[\vFunc_{}(\kNrm)] - \end{aligned}\end{gathered}\end{equation} -where we are designating the post-realization result of the investment as $k$, and since the $\Shr$-{stage} is the last {stage} of the problem the end-of-{stage} $k$ becomes the end-of-{period} $k_{t}.$ - -The `state transition' equation between $t$ and $t+1$ is simply $b_{t+1} = k_{t}$ and the continuation-value function transition is $v_{t_\cntn}(k) \mapsto \beta v_{_\arvl(t+1)}(k)$ which reflects the above-mentioned point that there is no substantive difference between the two problems (their $v_{[\cFunc]}(m)$ value functions and $\cFunc(m)$ functions will be identical). - -(Note that we are assuming that there will be only one consumption function in the period, so no {stage} subscript is necessary to pick out `the consumption function'). - -\subsubsection{Numerical Solution} -While the investor's problem cannot be solved using the endogenous gridpoints method,\footnote{Because $\vShrEnd$ is not invertible with respect to $\Shr$, see [references to MNW and AEL's work].} -we can solve it numerically for the optimal $\Shr$ at a vector of $\vctr{a}$ ({\aVecCode} in the code) and then construct an approximated optimal portfolio share function $\Aprx{\optml{\Shr}}(a)$ as the interpolating function among the members of the $\{\vctr{a},\vctr{\Shr}\}$ mapping. Having done this, we can now calculate a vector of values and marginal values that correspond to $\aVec$: -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{v} & = v_{_\arvl[\Shr]}(\vctr{a}) \label{eq:vShrEnd} -\\ \vctr{v}^a & = v^{a}_{_\arvl[\Shr]}(\vctr{a}). - \end{aligned}\end{gathered}\end{equation} - -With the $\vctr{v}^{a}$ approximation described in hand, we can construct our approximation to the consumption function using \emph{exactly the same EGM procedure} that we used in solving the problem \emph{without} a portfolio choice (see \eqref{eq:cGoth}): -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{c} & \equiv \left(\vctr{v}^{a}\right)^{-1/\rho} \label{eq:cVecPort}, - \end{aligned}\end{gathered}\end{equation} -which, following a procedure identical to that in the EGM subsection \ref{subsec:egm}, yields an approximated consumption function $\Aprx{\cFunc}_{t}(m)$. Thus, again, we can construct the consumption function at nearly zero cost (once we have calculated $\vctr{v}^{a}$). - -\hypertarget{the-point}{} - -\subsubsection{The Point}\label{subsubsec:the-point} - -The upshot is that all we need to do is change some of the transition equations and we can use the same solution code (both for the $\Shr$-stage and the $\cFunc$-stage) to solve the problem with either assumption (beginning-of-period or end-of-period) about the timing of portfolio choice. There is even an obvious notation for the two problems: $v_{_\arvl t[\Shr{c}]}$ can be the {period}-arrival value function for the version where the portfolio share is chosen at the beginning of the period, and $v_{_\arvl t[{c}\Shr]}$ is {period}-arrival value for the the problem where the share choice is at the end. - -What is the benefit of writing effectively the identical problem in two different ways? There are several: -\begin{itemize} -\item It demonstrates that, if they are carefully constructed, Bellman problems can be ``modular'' - \begin{itemize} - \item In a life cycle model one might want to assume that at at some ages agents have a portfolio choice and at other ages they do not. The consumption problem makes no assumption about whether there is a portfolio choice decision (before or after the consumption choice), so there would be zero cost of having an age-varying problem in which you drop in whatever choices are appropriate to the life cycle stage. - \end{itemize} -\item It emphasizes the flexibilty of choice a modeler has to date variables arbitrarily. In the specific example examined here, there is a strong case for preferring the beginning-returns specification because we typically think of productivity or other shocks at date $t$ affecting the agent's state variables before the agent makes that period's choices. It would be awkward and confusing to have a productivity shock dated $t-1$ effectively applying for the problem being solved at $t$ (as in the end-returns specification) -\item It may help to identify more efficient solution methods - \begin{itemize} - \item For example, under the traditional formulation in equation \eqref{eq:Bellmanundated} it might not occur to a modeler that the endogenous gridpoints solution method can be used, because when portfolio choice and consumption choice are considered simultaneously the EGM method breaks down because the portfolio choice part of the problem is not susceptible to EGM solution. But when the problem is broken into two simpler problems, it becomes clear that EGM can still be applied to the consumption problem even though it cannot be applied to the portfolio choice problem - \end{itemize} -\end{itemize} - -% the problem needs to be altered to bring the {step}s involving the realization of risky returns into {period} $\prd$; the variable with which the agent ends the period is now $\bNrm_{\prd}$ and to avoid confusion with the prior model in which we assumed $k_{\prd+1}={a}_{\prd}$ we will now define $\kappa_{\prd+1}={\bNrm}_{\prd}$. The continuation-value function for the $[\Shr]$ {stage} now becomes -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\prd[\Shr]_\cntn}(a_{\prd}) & = \DiscFac \vFunc_{[\cFunc]_\arvl(\prd+1)}({\kappa}_{\prd+1}) -% \end{aligned}\end{gathered}\end{equation} -% while the dynamic budget constraint for $m$ changes to -% \begin{equation}\begin{gathered}\begin{aligned} -% m_{\prd} & = {\kappa}_{\prd}+\TranShkEmp_{\prd} -% \end{aligned}\end{gathered}\end{equation} -% and the problem in the decision step is now -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\prd}(m) & = \max_{c}~~\uFunc(c)+\Ex_{\prd}[\vFunc_{\prd_\cntn}(m-c)] -% \end{aligned}\end{gathered}\end{equation} -% while value as a function of $\mNrm$ in the arrival step is now -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{_{\arvl}\prd}({\kappa}_{\prd}) & = \Ex_{_\arvl\prd}[\vFunc_{\prd}(m)] -% \end{aligned}\end{gathered}\end{equation} -% which, \textit{mutatis mutandis}, is the same as in \eqref{eq:vNormed}. - - - - -% The second stage in the period will be the solution to the problem of a consumer solving an optimal portfolio choice problem before having made their consumption decision. - -% We continue to assume that the consumer enters period $t$ with the single state variable, $k_{\prd}.$ But (as before) the assumption is that this is before the $t$-dated shocks have been realized. It is at this stage that the consumer makes their portfolio choice, knowing the degree of riskiness of the rate of return but not its period-$t$ realization. Designating the `share-choice' stage by the control variable $\Shr$ which is the proportion of the portfolio to invest in the risky asset, %the problem's FOC in the new notation is (compare to \eqref{eq:FOCShr}): - -% It will be convenient to designate a stage within a period by naming a given stage in period $\prd$ after the control variable chosen in the middle step of the stage; in this case $\prd[\Shr]$. The consumer's problem at the $\Shr$ stage is -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\arvlstepShr}(a_{\prd}) & = \max_{\Shr}~\vMidStpStgShr(a_{\prd},\Shr_{\prd}) \label{eq:vMidStpShr} -% \end{aligned}\end{gathered}\end{equation} -% whose FOC in the new notation is (compare to \eqref{eq:FOCShr}): -% \begin{equation}\begin{gathered}\begin{aligned} -% 0 & = \vShrMid(a_{\prd},\Shr_{\prd}). \label{eq:vShrEnd} -% \end{aligned}\end{gathered}\end{equation} - -\subsection{Application}\label{subsec:MCApplication} - - -In specifying the stochastic process for $\Risky_{t+1}$, we follow the common practice of assuming that returns are lognormally distributed, $\log \Risky \sim \Nrml(\eprem+r-\sigma^{2}_{\risky}/2,\sigma^{2}_{\risky})$ where $\eprem$ is the equity premium over the thin returns $r$ available on the riskless asset.\footnote{This guarantees that $\Ex[\Risky] = \EPrem$ is invariant to the choice of $\sigma^{2}_{\eprem}$; see \handoutM{LogELogNorm}.} - -As with labor income uncertainty, it is necessary to discretize the rate-of-return risk in order to have a problem that is soluble in a reasonable amount of time. We follow the same procedure as for labor income uncertainty, generating a set of $n_{\risky}$ equiprobable shocks to the rate of return; in a slight abuse of notation, we will designate the portfolio-weighted return (contingent on the chosen portfolio share in equity, and potentially contingent on any other aspect of the consumer's problem) simply as $\Rport_{i,j}$ (where dependence on $i$ is allowed to permit the possibility of nonzero correlation between the return on the risky asset and the $\TranShkEmp$ shock to labor income (for example, in recessions the stock market falls and labor income also declines). - - -The direct expressions for the derivatives of $\vEnd$ are -\begin{equation}\begin{gathered}\begin{aligned} - \vEndStp^{a}(a_{t},\Shr_{t}) & = \beta \left(\frac{1}{n_{\risky} n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}}\sum_{j=1}^{n_{\risky} }\Rport_{i,j} \left(\cFunc_{t+1}(\Rport_{i,j}a_{t}+\TranShkEmp_{i})\right)^{-\rho} - \\ \vEndStp^{\Shr}(a_{t},\Shr_{t}) & = \beta \left(\frac{1}{n_{\risky} n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}}\sum_{j=1}^{n_{\risky} }(\Risky_{i,j}-R)\left(\cFunc_{t+1}(\Rport_{i,j}a_{t}+\TranShkEmp_{i})\right)^{-\rho}. - \end{aligned}\end{gathered}\end{equation} - -Writing these equations out explicitly makes a problem very apparent: For every different combination of $\{{a}_{t},\Shr_{t}\}$ that the routine wishes to consider, it must perform two double-summations of $n_{\risky} \times n_{\TranShkEmp}$ terms. Once again, there is an inefficiency if it must perform these same calculations many times for the same or nearby values of $\{{a}_{t},\Shr_{t}\}$, and again the solution is to construct an approximation to the (inverses of the) derivatives of the $\vEnd$ function. - -Details of the construction of the interpolating approximations are given below; assume for the moment that we have the approximations $\Aprx{v}_{\EndStp}^{a}$ and $\Aprx{v}_{\EndStp}^{\Shr}$ in hand and we want to proceed. As noted above in the discussion of \eqref{eq:Bellmanundated}, nonlinear equation solvers can find the solution to a set of simultaneous equations. Thus we could ask one to solve -\begin{equation}\begin{gathered}\begin{aligned} - c_{t}^{-\rho} & = \Aprx{v}^{a}_{{t_\cntn}}(m_{t}-c_{t},\Shr_{t}) %\label{eq:FOCwrtcMultContr} - \\ 0 & = \Aprx{v}^{\Shr}_{{t_\cntn}}(m_{t}-c_{t},\Shr_{t}) \label{eq:FOCwrtw} - \end{aligned}\end{gathered}\end{equation} -simultaneously for $c$ and $\Shr$ at the set of potential $m_{t}$ values defined in {\mVec}. However, as noted above, multidimensional constrained -maximization problems are difficult and sometimes quite slow to -solve. - -There is a better way. Define the problem -%\providecommand{\Opt}{} -%\renewcommand{\Opt}{\tilde} -%\providecommand{\vOpt}{} -%\renewcommand{\vOpt}{\overset{*}{\vFunc}} -\begin{equation}\begin{gathered}\begin{aligned} - \Opt{v}_{{t_\cntn}}(a_{t}) & = \max_{\Shr_{t}} ~~ \vEndStp(a_{t},\Shr_{t}) - \\ & \text{s.t.} \nonumber - \\ 0 \leq & \Shr_{t} \leq 1 - \end{aligned}\end{gathered}\end{equation} -where the tilde over $\Opt{v}(a)$ indicates that this is the $v$ that has been optimized with respect to all of the arguments other than the one still present ($a_{t}$). We solve this problem for the set of gridpoints in \code{aVec} and use the results to construct the interpolating function $\Aprx{\Opt{v}}_{t}^{a}(a_{t})$.\footnote{A faster solution could be obtained by, for each element in \code{aVec}, computing $\vEndStp^{\Shr}(m_{t}-c_{t},\Shr)$ of a grid of values of $\Shr$, and then using an approximating interpolating function (rather than the full expectation) in the \texttt{FindRoot} command. The associated speed improvement is fairly modest, however, so this route was not pursued.} With this function in hand, we can use the first order condition from the single-control problem -\begin{equation*}\begin{gathered}\begin{aligned} - c_{t}^{-\rho} & = \Aprx{\Opt{v}}_{t}^{a}(m_{t}-c_{t}) - \end{aligned}\end{gathered}\end{equation*} -to solve for the optimal level of consumption as a function of $m_{t}$ using the endogenous gridpoints method described above. Thus we have transformed the multidimensional optimization problem into a sequence of two simple optimization problems. - -Note the parallel between this trick and the fundamental insight of dynamic programming: Dynamic programming techniques transform a multi-period (or infinite-period) optimization problem into a sequence of two-period optimization problems which are individually much easier to solve; we have done the same thing here, but with multiple dimensions of controls rather than multiple periods. - -\hypertarget{implementation}{} -\subsection{Implementation} - -Following the discussion from section \ref{subsec:MCTheory}, to provide a numerical solution to the problem -with multiple control variables, we must define expressions that capture the expected marginal value of end-of-period -assets with respect to the level of assets and the share invested in risky assets. This is addressed in ``Multiple Control Variables.'' - - - -% Having the \texttt{GothicMC} subclass available, we can proceed with implementing the steps laid out in section \ref{subsec:MCApplication} to solve the problem at hand. Initially, the two distributions that capture the uncertainty faced by consumers in this scenario are discretized. Subsequently, the \texttt{GothicMC} class is invoked with the requisite arguments to create an instance that includes the necessary functions to depict the first-order conditions of the consumer's problem. Following that, an improved grid of end-of-period assets is established. - -% Here is where we can see how the approach described in section \ref{subsec:MCApplication} is reflected in the code. For the terminal period, the optimal share of risky assets is determined for each point in \texttt{aVec\_eee}, and then the endogenous gridpoints method is employed to compute the optimal consumption level given that the share in the risky asset has been chosen optimally. It's worth noting that this solution takes into account the possibility of a binding artificial borrowing constraint. Lastly, the interpolation process is executed for both the optimal consumption function and the optimal share of the portfolio in risky assets. These values are stored in their respective dictionaries (\texttt{mGridPort\_life}, \texttt{cGridPort\_life}, and \texttt{ShrGrid\_life}) and utilized to conduct the recursive process outlined in the `Recursion' section, thus yielding the numerical solution for all earlier periods. - -\hypertarget{results-with-multiple-controls}{} -\subsection{Results With Multiple Controls}\label{subsec:results-with-multiple-controls} - -Figure~\ref{fig:PlotctMultContr} plots the $t-1$ consumption function generated by the program; qualitatively it does not look much different from the consumption functions generated by the program without portfolio choice. - -But Figure~\ref{fig:PlotRiskySharetOfat} which plots the optimal portfolio share as a function of the level of assets, exhibits several interesting features. First, even with a coefficient of relative risk aversion of 6, an equity premium of only 4 percent, and an annual standard deviation in equity returns of 15 percent, the optimal choice is for the agent to invest a proportion 1 (100 percent) of the portfolio in stocks (instead of the safe bank account with riskless return $R$) is at values of $a_{t}$ less than about 2. Second, the proportion of the portfolio kept in stocks is \textit{declining} in the level of wealth - i.e., the poor should hold all of their meager assets in stocks, while the rich should be cautious, holding more of their wealth in safe bank deposits and less in stocks. This seemingly bizarre (and highly counterfactual -- see \cite{carroll:richportfolios}) prediction reflects the nature of the risks the consumer faces. Those consumers who are poor in measured financial wealth will likely derive a high proportion of future consumption from their labor income. Since by assumption labor income risk is uncorrelated with rate-of-return risk, the covariance between their future consumption and future stock returns is relatively low. By contrast, persons with relatively large wealth will be paying for a large proportion of future consumption out of that wealth, and hence if they invest too much of it in stocks their consumption will have a high covariance with stock returns. Consequently, they reduce that correlation by holding some of their wealth in the riskless form. - -\hypertarget{PlotctMultContr}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/PlotctMultContr} - \caption{$\cFunc(m_{1})$ With Portfolio Choice} - \label{fig:PlotctMultContr} -\end{figure} - -\hypertarget{PlotRiskySharetOfat}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/PlotRiskySharetOfat} - \caption{Portfolio Share in Risky Assets in First Period $\Shr(a)$} - \label{fig:PlotRiskySharetOfat} -\end{figure} diff --git a/docs/sec_multiple-control-variables-input.tex b/docs/sec_multiple-control-variables-input.tex deleted file mode 100644 index 2c48124f6..000000000 --- a/docs/sec_multiple-control-variables-input.tex +++ /dev/null @@ -1,280 +0,0 @@ -\hypertarget{multiple-control-variables}{} -\section{Multiple Control Variables}\label{sec:multiple-control-variables} -We now consider how to solve problems with multiple control variables. - -\subsection{Theory}\label{subsec:MCTheory} - -The new portfolio-share control variable is captured by the archaic Greek character \href{https://en.wikipedia.org/wiki/Stigma_(ligature)}{`stigma'}; it represents the share $\Shr$ of their disposable assets the agent invests in the risky asset (conventionally, the stock market). Designating the return factor for the risky asset as $\Risky$ and the share of the portfolio invested in $\Risky$ as $\Shr$, the realized portfolio rate of return $\Rport$ as a function of the share $\Shr$ is: -\begin{equation}\begin{gathered}\begin{aligned} - \Rport(\Shr) &= \Rfree+(\Risky-\Rfree)\Shr \label{eq:Shr}. - \end{aligned}\end{gathered}\end{equation} -If we imagine the portfolio share decision as being made simultaneously with the $\cNrm$ decision, the traditional way of writing the problem is (substituting the budget constraint): -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\prd}(m) & = \max_{\{\cFunc,\Shr\}} ~~ \uFunc(c) + \ExMidPrd[\DiscFac \vFunc_{\prd+1}((m-c)\Rport(\Shr) + {\TranShkEmp}_{\prd+1})] \label{eq:Bellmanundated} - \end{aligned}\end{gathered}\end{equation} -where we have deliberately omitted the {period}-designating subscripts for $\Shr$ and the return factors to highlight the point that, once the consumption and $\Shr$ decisions have been made, it makes no difference to this equation whether the risky return factor $\Risky$ is revealed a nanosecond before the end of the current {period} or a nanosecond after the beginning of the successor {period}. - -%But as a notational choice, there is good reason to designate the realization as happening in $t+1$: A standard way of motivating stochastic returns and wages is to attribute them to ``productivity shocks'' and to assume that the productivity shock associated with a date is the one that affects the production function for that date. - -%\renewcommand{\prd}{t} % For the rest of the doc, use generic t vs t+1 - -\begin{comment} - Designating the return factor for the risky asset as $\Risky_{\prd+1}$, and using $\Shr_{\prd}$ to represent the proportion of the portfolio invested in this asset before the return is realized after the beginning of $\prd+1$, corresponding to an assumption that the consumer cannot be `net short' and cannot issue net equity), the overall return on the consumer's portfolio between $t$ and $t+1$ will be: - \begin{equation}\begin{gathered}\begin{aligned} - \Rport_{\prd+1} & = \Rfree(1-\Shr_{\prd}) + \Risky_{\prd+1}\Shr_{\prd} \label{eq:return1} - \\ & = \Rfree + (\Risky_{\prd+1}-\Rfree) \Shr_{\prd} %\label{eq:return2} - \end{aligned}\end{gathered}\end{equation} - and the maximization problem is - \begin{equation*}\begin{gathered}\begin{aligned} - \vFunc_{\prd}(m_{\prd}) & = \max_{\{{c}_{\prd},\Shr_{\prd}\}} ~~ \uFunc(c_{\prd}) + \DiscFac - \ExEndStp[{\vFunc}_{\prd+1}(m_{\prd+1})] - \\ & \text{s.t.} \nonumber - \\ \Rport_{\prd+1} & = \Rfree + (\Risky_{\prd+1}-\Rfree) \Shr_{\prd} - \\ m_{\prd+1} & = (m_{\prd}-c_{\prd})\Rport_{\prd+1} + \TranShkEmp_{\prd+1} - \\ 0 \leq & \Shr_{\prd} \leq 1, \label{eq:noshorts} - \end{aligned}\end{gathered}\end{equation*} - - The first order condition with respect to $c_{\prd}$ is almost identical to that in the single-control problem, equation (\ref{eq:upceqEvtp1}); the only difference is that the nonstochastic interest factor $\Rfree$ is now replaced by the portfolio return ${\Rport}_{\prd+1}$, - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{\prd}) & = \DiscFac \ExEndStp [{\Rport}_{\prd+1} \vFunc^{m}_{\prd+1}(m_{\prd+1})] \label{eq:valfuncFOCRtilde}, - \end{aligned}\end{gathered}\end{equation} - and the Envelope theorem derivation remains the same, yielding the Euler equation for consumption - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{\prd}) & = \ExEndStp[\DiscFac {\Rport}_{\prd+1} \uFunc^{c}(c_{\prd+1})]. \label{eq:EulercRiskyR} - \end{aligned}\end{gathered}\end{equation} - - The first order condition with respect to the risky portfolio share is - \begin{equation}\begin{gathered}\begin{aligned} - 0 & = \ExEndStp[{\vFunc}_{\MidStpNxt}^{m}(m_{\prd+1})(\Risky_{\prd+1}-\Rfree){a}_{\prd}] \notag - \\ & = \ExEndStp\left[\uFunc^{c}\left(\cFunc_{\prd+1}(m_{\prd+1})\right)(\Risky_{\prd+1}-\Rfree)\right]{a}_{\prd} - \\ & = \ExEndStp\left[\uFunc^{c}\left(\cFunc_{\prd+1}(m_{\prd+1})\right)(\Risky_{\prd+1}-\Rfree)\right], \label{eq:FOCw} - \end{aligned}\end{gathered}\end{equation} - where the last line follows because $0/a_{\prd}=0$. - - As before, we define $\vEnd$ as a function that yields the expected $t+1$ value of ending period $t$ with assets $a_{\prd}$. However, now that there are two control variables, the expectation must be defined as a function of the chosen values of both of those variables, because expected end-of-period value will depend not just on how much the agent saves, but also on how the saved assets are allocated between the risky and riskless assets. Thus we define - \begin{equation*}\begin{gathered}\begin{aligned} - \vMidStp(a_{\prd},\Shr_{\prd}) & = \DiscFac \vFunc_{\arvlstepShr}(m_{\prd+1}) - \end{aligned}\end{gathered}\end{equation*} - which has derivatives - \begin{equation}\begin{gathered}\begin{aligned} - \vMidStp^a & = \ExEndStp[\DiscFac {\Rport}_{\prd+1}\vFunc_{\prd+1}^{m}(m_{\prd+1})] = \ExEndStp[\DiscFac {\Rport}_{\prd+1}{\uFunc}_{\prd+1}^{c}(\cFunc_{\prd+1}(m_{\prd+1}))] - \end{aligned}\end{gathered}\end{equation} - \begin{equation}\begin{gathered}\begin{aligned} - \vMidStp^{\Shr} & = \ExEndStp[\DiscFac (\Risky_{\prd+1}-\Rfree){\vFunc}_{\prd+1}^{m}(m_{\prd+1}) ]a_{\prd} = \ExEndStp[\DiscFac (\Risky_{\prd+1}-\Rfree){\uFunc}_{\prd+1}^{c}(\cFunc_{\prd+1}(m_{\prd+1})) ]a_{\prd} \notag - \end{aligned}\end{gathered}\end{equation} - implying that the first order conditions (\ref{eq:EulercRiskyR}) and - (\ref{eq:FOCw}) can be rewritten - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{\prd}) & = \vMidStp^{a}(m_{\prd}-c_{\prd},\Shr_{\prd}) \label{eq:FOCc} - \end{aligned}\end{gathered}\end{equation} - and - \begin{equation}\begin{gathered}\begin{aligned} - 0 & = \vFunc^{\Shr}_{\vMidStpStgShr}(a_{\prd},\Shr_{\prd}). \label{eq:FOCShr} - \end{aligned}\end{gathered}\end{equation} -\end{comment} - -\hypertarget{stages-within-a-period}{} -\subsection{{Stage}s Within a {Period}}\label{subsec:stageswithin} - -Solving simultaneously for the two variables $\Shr$ and $c$ can be computationally challenging. Fortunately, there is a simple solution: Break the problem into two `{stage}s'\footnote{cite mnw and ael papers.} -which we will call the `consumption {stage} $\cFunc$' and the `portfolio {stage} $\Shr$.' These could come in either order in the {period}: We designate the `portfolio choice first, then consumption' version by $[\Shr,\cFunc]$ and the `consumption choice first, then portfolio' as $[\cFunc,\Shr]$. - -In a problem with multiple {stages}, if we want to refer to a sub-{step} of a particular {stage} -- say, the {\Arrival} {stage} of the portfolio {stage} -- we simply add a {stage}-indicator subscript (in square brackets) to the notation we have been using until now. That is, the {\Arrival} {stage} of the portfolio problem would be $\vFunc_{_\arvl[\Shr]}$. - -\hypertarget{revised-consumers-problem}{} -\subsubsection{The (Revised) Consumer's Problem}\label{subsubsec:revised-consumers-problem} - -A slight modification to the consumer's problem specified earlier is necessary to make the {stage}s of the problem completely modular. The difficulty with the earlier formulation is that it assumed that asset returns occurred in the middle {step} of the consumption problem. Our revised version of the consumption problem takes as its input state the amount of bank balances that have resulted from any prior portfolio decision. The problem is therefore: - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{[\cFunc]}(\mNrm) & = \max_{\cNrm} ~~ \uFunc(\cNrm)+ \vFunc_{[\cFunc]_{_\cntn}}(\underbrace{\mNrm-\cNrm}_{\aNrm}) -\\ \vFunc_{_\arvl[\cFunc]}(\bNrm) & = \Ex_{_\arvl[\cFunc]}\left[\vFunc_{[\cNrm]}(\overbrace{\bNrm+\TranShkEmp}^{m})\right] \label{eq:vBalances} - \end{aligned}\end{gathered}\end{equation} - - -\hypertarget{subsubsec:investors-problem}{} -\subsubsection{The Investor's Problem}\label{subsubsec:investors-problem} - -Consider the standalone problem of an `investor' whose continuation-value function $\vFunc_{[\Shr]_\cntn}$ depends on how much wealth $\wlthAftr$ they end up after the realization of the stochastic $\Risky$ return. The expected value that the investor will obtain from any combination of initial $\wlthBefr$ and their optimal choice of the portfolio share $\Shr$ is the expectation of the continuation-value function over the wealth that results from the portfolio choice: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{_\arvl[\Shr]}(\wlthBefr) = & \max_{\Shr}~ \Ex_{\BegStp[\Shr]}\left[\vFunc_{[\Shr]_{_\cntn}}\overbrace{\left(\Rport(\Shr){\wlthBefr}\right)}^{\wlthAftr}\right] \label{eq:vMidStpShr} - \end{aligned}\end{gathered}\end{equation} -where we have omitted any {period} designator like $\prd$ for the {period} in which this problem is solved because, with the continuation-value function defined already as $\vFunc_{[\Shr]_\cntn}(\wlthAftr)$, the problem is self-contained. The solution to this problem will yield an optimal $\Shr$ decision rule $\optml{\Shr}(\wlthBefr).$ Finally, we can specify the value of an investor `arriving' with $\wlthBefr$ as the expected value that will be obtained when the investor invests optimally, generating the \textit{ex ante} optimal stochastic portfolio return factor $\optml{\Rport}(\wlthBefr)=\Rport(\optml{\Shr}(\wlthBefr))$: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{[\Shr]{_\arvl}}(\wlthBefr) = & \Ex_{_\arvl}[\vFunc_{[\Shr]_\cntn}](\overbrace{\optml{\Rport}(\wlthBefr)}^{\wlthAftr})]. -\end{aligned}\end{gathered}\end{equation} - -The reward for all this notational investment is that it is now clear that \emph{exactly the same code} for solving the portfolio share problem can be used in two distinct problems: a `beginning-of-period-returns' model and an `end-of-period-returns' model. - -\hypertarget{beginning-returns}{} -\subsubsection{The `beginning-of-period returns' Problem}\label{subsubsec:beginning-returns} -The beginning-returns problem effectively just inserts a portfolio choice that happens at a {stage} immediately before the consumption {stage} in the optimal consumption problem described in \eqref{eq:vBalances}, for which we had a beginning-of-{stage} value function $\vFunc_{_\arvl[\cFunc]}(\bNrm)$. The agent makes their portfolio share decision within the {stage} but (obviously) before the risky returns $\Risky$ for the {period} have been realized. So the problem's portfolio-choice {stage} also takes $\kNrm$ as its initial state and solves the investor's problem outlined in section~\ref{subsubsec:investors-problem} above: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{[\Shr]_\arvl}(\kNrm) & = \Ex_{[\Shr]_\arvl}[\vFunc_{[\Shr]_{_\cntn}}(\underbrace{\kNrm\optml{\Rport}}_{\bNrm})] -\\\vFunc_{[\Shr]_\cntn}(\bNrm) & = \vFunc_{_\arvl[\cFunc]}(\bNrm) - \end{aligned}\end{gathered}\end{equation} - -Since in this setup bank balances have been determined before the consumption problems starts, we need to rewrite the consumption {stage} as a function of bank balances that will have resulted from the portfolio investment $\bNrm$, combined with the income shocks $\TranShkEmp$: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{_\arvl[\cFunc]}(\bNrm) = & \max_{\cFunc}~ \uFunc(\cNrm) + \Ex_{_\arvl[\cFunc]}[\vFunc_{[\cFunc]_\cntn}(\underbrace{\overbrace{\bNrm+\TranShkEmp}^{\mNrm}-\cNrm}_{\aNrm})] - \end{aligned}\end{gathered}\end{equation} -where, because the consumption {stage} is the last {stage} in the {period}, the continuatibon-value function for the $\cFunc$ {stage} is just the continuation-value function for the period as a whole: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{[\cFunc]_\cntn}(\aNrm) = & \vFunc_{\prd_\cntn}(\aNrm) - \end{aligned}\end{gathered}\end{equation} -(and recall that $\vFunc_{\prd_\cntn}(\aNrm)$ is exogenously provided as an input to the {period}'s problem via the transition equation assumed earlier: $\vFunc_{\prd_\cntn}(\aNrm)=\DiscFac \vFunc_{_\arvl(\prd+1)}(a)$). - -\subsubsection{The `end-of-period-returns' Problem} - -If the portfolio share and risky returns are realized at the end of the {period}, we need to move the portfolio choice {stage} to immediately before the point at which returns are realized (and after the $\cFunc$ choice has been made). The problem is the same as the portfolio problem defined above, except that the input for the investment {stage} is the assets remaining after the consumption choice: $\aNrm$. So, the portfolio {stage} of the problem is -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{_\arvl[\Shr]}(\aNrm) = & \Ex_{_\arvl[\Shr]}[\vFunc_{[\Shr]_{_\cntn}}(\underbrace{\aNrm\optml{\Rport}}_{\kNrm})] %= \Ex_{[\cFunc]_\arvl}[\vFunc_{}(\kNrm)] - \end{aligned}\end{gathered}\end{equation} -where we are designating the post-realization result of the investment as $\kNrm$, and since the $\Shr$-{stage} is the last {stage} of the problem the end-of-{stage} $\kNrm$ becomes the end-of-{period} $\kNrm_{\prd}.$ - -The `state transition' equation between $\prd$ and $\prd+1$ is simply $\bNrm_{t+1} = \kNrm_{\prd}$ and the continuation-value function transition is $\vFunc_{\prd_\cntn}(\kNrm) \mapsto \DiscFac \vFunc_{_\arvl(\prd+1)}(\kNrm)$ which reflects the above-mentioned point that there is no substantive difference between the two problems (their $\vFunc_{[\cFunc]}(\mNrm)$ value functions and $\cFunc(\mNrm)$ functions will be identical). - -(Note that we are assuming that there will be only one consumption function in the period, so no {stage} subscript is necessary to pick out `the consumption function'). - -\subsubsection{Numerical Solution} -While the investor's problem cannot be solved using the endogenous gridpoints method,\footnote{Because $\vShrEnd$ is not invertible with respect to $\Shr$, see [references to MNW and AEL's work].} -we can solve it numerically for the optimal $\Shr$ at a vector of $\vctr{a}$ ({\aVecCode} in the code) and then construct an approximated optimal portfolio share function $\Aprx{\optml{\Shr}}(a)$ as the interpolating function among the members of the $\{\vctr{a},\vctr{\Shr}\}$ mapping. Having done this, we can now calculate a vector of values and marginal values that correspond to $\aVec$: -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{v} & = \vFunc_{_\arvl[\Shr]}(\vctr{a}) \label{eq:vShrEnd} -\\ \vctr{v}^\aNrm & = \vFunc^{\aNrm}_{_\arvl[\Shr]}(\vctr{a}). - \end{aligned}\end{gathered}\end{equation} - -With the $\vctr{v}^{\aNrm}$ approximation described in hand, we can construct our approximation to the consumption function using \emph{exactly the same EGM procedure} that we used in solving the problem \emph{without} a portfolio choice (see \eqref{eq:cGoth}): -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{c} & \equiv \left(\vctr{\vNrm}^{\aNrm}\right)^{-1/\CRRA} \label{eq:cVecPort}, - \end{aligned}\end{gathered}\end{equation} -which, following a procedure identical to that in the EGM subsection \ref{subsec:egm}, yields an approximated consumption function $\Aprx{\cFunc}_{\prd}(m)$. Thus, again, we can construct the consumption function at nearly zero cost (once we have calculated $\vctr{v}^{a}$). - -\hypertarget{the-point}{} - -\subsubsection{The Point}\label{subsubsec:the-point} - -The upshot is that all we need to do is change some of the transition equations and we can use the same solution code (both for the $\Shr$-stage and the $\cFunc$-stage) to solve the problem with either assumption (beginning-of-period or end-of-period) about the timing of portfolio choice. There is even an obvious notation for the two problems: $\vFunc_{_\arvl\prd[\Shr{c}]}$ can be the {period}-arrival value function for the version where the portfolio share is chosen at the beginning of the period, and $\vFunc_{_\arvl\prd[{c}\Shr]}$ is {period}-arrival value for the the problem where the share choice is at the end. - -What is the benefit of writing effectively the identical problem in two different ways? There are several: -\begin{itemize} -\item It demonstrates that, if they are carefully constructed, Bellman problems can be ``modular'' - \begin{itemize} - \item In a life cycle model one might want to assume that at at some ages agents have a portfolio choice and at other ages they do not. The consumption problem makes no assumption about whether there is a portfolio choice decision (before or after the consumption choice), so there would be zero cost of having an age-varying problem in which you drop in whatever choices are appropriate to the life cycle stage. - \end{itemize} -\item It emphasizes the flexibilty of choice a modeler has to date variables arbitrarily. In the specific example examined here, there is a strong case for preferring the beginning-returns specification because we typically think of productivity or other shocks at date $\prd$ affecting the agent's state variables before the agent makes that period's choices. It would be awkward and confusing to have a productivity shock dated $\prd-1$ effectively applying for the problem being solved at $\prd$ (as in the end-returns specification) -\item It may help to identify more efficient solution methods - \begin{itemize} - \item For example, under the traditional formulation in equation \eqref{eq:Bellmanundated} it might not occur to a modeler that the endogenous gridpoints solution method can be used, because when portfolio choice and consumption choice are considered simultaneously the EGM method breaks down because the portfolio choice part of the problem is not susceptible to EGM solution. But when the problem is broken into two simpler problems, it becomes clear that EGM can still be applied to the consumption problem even though it cannot be applied to the portfolio choice problem - \end{itemize} -\end{itemize} - -% the problem needs to be altered to bring the {step}s involving the realization of risky returns into {period} $\prd$; the variable with which the agent ends the period is now $\bNrm_{\prd}$ and to avoid confusion with the prior model in which we assumed $k_{\prd+1}={a}_{\prd}$ we will now define $\kappa_{\prd+1}={\bNrm}_{\prd}$. The continuation-value function for the $[\Shr]$ {stage} now becomes -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\prd[\Shr]_\cntn}(a_{\prd}) & = \DiscFac \vFunc_{[\cFunc]_\arvl(\prd+1)}({\kappa}_{\prd+1}) -% \end{aligned}\end{gathered}\end{equation} -% while the dynamic budget constraint for $m$ changes to -% \begin{equation}\begin{gathered}\begin{aligned} -% m_{\prd} & = {\kappa}_{\prd}+\TranShkEmp_{\prd} -% \end{aligned}\end{gathered}\end{equation} -% and the problem in the decision step is now -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\prd}(m) & = \max_{c}~~\uFunc(c)+\Ex_{\prd}[\vFunc_{\prd_\cntn}(m-c)] -% \end{aligned}\end{gathered}\end{equation} -% while value as a function of $\mNrm$ in the arrival step is now -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{_{\arvl}\prd}({\kappa}_{\prd}) & = \Ex_{_\arvl\prd}[\vFunc_{\prd}(m)] -% \end{aligned}\end{gathered}\end{equation} -% which, \textit{mutatis mutandis}, is the same as in \eqref{eq:vNormed}. - - - - -% The second stage in the period will be the solution to the problem of a consumer solving an optimal portfolio choice problem before having made their consumption decision. - -% We continue to assume that the consumer enters period $t$ with the single state variable, $k_{\prd}.$ But (as before) the assumption is that this is before the $t$-dated shocks have been realized. It is at this stage that the consumer makes their portfolio choice, knowing the degree of riskiness of the rate of return but not its period-$t$ realization. Designating the `share-choice' stage by the control variable $\Shr$ which is the proportion of the portfolio to invest in the risky asset, %the problem's FOC in the new notation is (compare to \eqref{eq:FOCShr}): - -% It will be convenient to designate a stage within a period by naming a given stage in period $\prd$ after the control variable chosen in the middle step of the stage; in this case $\prd[\Shr]$. The consumer's problem at the $\Shr$ stage is -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\arvlstepShr}(a_{\prd}) & = \max_{\Shr}~\vMidStpStgShr(a_{\prd},\Shr_{\prd}) \label{eq:vMidStpShr} -% \end{aligned}\end{gathered}\end{equation} -% whose FOC in the new notation is (compare to \eqref{eq:FOCShr}): -% \begin{equation}\begin{gathered}\begin{aligned} -% 0 & = \vShrMid(a_{\prd},\Shr_{\prd}). \label{eq:vShrEnd} -% \end{aligned}\end{gathered}\end{equation} - -\subsection{Application}\label{subsec:MCApplication} - - -In specifying the stochastic process for $\Risky_{\prd+1}$, we follow the common practice of assuming that returns are lognormally distributed, $\log \Risky \sim \Nrml(\eprem+\rfree-\sigma^{2}_{\risky}/2,\sigma^{2}_{\risky})$ where $\eprem$ is the equity premium over the thin returns $\rfree$ available on the riskless asset.\footnote{This guarantees that $\Ex[\Risky] = \EPrem$ is invariant to the choice of $\sigma^{2}_{\eprem}$; see \handoutM{LogELogNorm}.} - -As with labor income uncertainty, it is necessary to discretize the rate-of-return risk in order to have a problem that is soluble in a reasonable amount of time. We follow the same procedure as for labor income uncertainty, generating a set of $n_{\risky}$ equiprobable shocks to the rate of return; in a slight abuse of notation, we will designate the portfolio-weighted return (contingent on the chosen portfolio share in equity, and potentially contingent on any other aspect of the consumer's problem) simply as $\Rport_{i,j}$ (where dependence on $i$ is allowed to permit the possibility of nonzero correlation between the return on the risky asset and the $\TranShkEmp$ shock to labor income (for example, in recessions the stock market falls and labor income also declines). - - -The direct expressions for the derivatives of $\vEnd$ are -\begin{equation}\begin{gathered}\begin{aligned} - \vEndStp^{a}(a_{\prd},\Shr_{\prd}) & = \DiscFac \left(\frac{1}{n_{\risky} n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}}\sum_{j=1}^{n_{\risky} }\Rport_{i,j} \left(\cFunc_{\prd+1}(\Rport_{i,j}a_{\prd}+\TranShkEmp_{i})\right)^{-\CRRA} - \\ \vEndStp^{\Shr}(a_{\prd},\Shr_{\prd}) & = \DiscFac \left(\frac{1}{n_{\risky} n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}}\sum_{j=1}^{n_{\risky} }(\Risky_{i,j}-\Rfree)\left(\cFunc_{\prd+1}(\Rport_{i,j}a_{\prd}+\TranShkEmp_{i})\right)^{-\CRRA}. - \end{aligned}\end{gathered}\end{equation} - -Writing these equations out explicitly makes a problem very apparent: For every different combination of $\{{a}_{\prd},\Shr_{\prd}\}$ that the routine wishes to consider, it must perform two double-summations of $n_{\risky} \times n_{\TranShkEmp}$ terms. Once again, there is an inefficiency if it must perform these same calculations many times for the same or nearby values of $\{{a}_{\prd},\Shr_{\prd}\}$, and again the solution is to construct an approximation to the (inverses of the) derivatives of the $\vEnd$ function. - -Details of the construction of the interpolating approximations are given below; assume for the moment that we have the approximations $\Aprx{\vFunc}_{\EndStp}^{a}$ and $\Aprx{\vFunc}_{\EndStp}^{\Shr}$ in hand and we want to proceed. As noted above in the discussion of \eqref{eq:Bellmanundated}, nonlinear equation solvers can find the solution to a set of simultaneous equations. Thus we could ask one to solve -\begin{equation}\begin{gathered}\begin{aligned} - c_{\prd}^{-\CRRA} & = \Aprx{\vFunc}^{a}_{{\prd_\cntn}}(m_{\prd}-c_{\prd},\Shr_{\prd}) %\label{eq:FOCwrtcMultContr} - \\ 0 & = \Aprx{\vFunc}^{\Shr}_{{\prd_\cntn}}(m_{\prd}-c_{\prd},\Shr_{\prd}) \label{eq:FOCwrtw} - \end{aligned}\end{gathered}\end{equation} -simultaneously for $\cNrm$ and $\Shr$ at the set of potential $m_{\prd}$ values defined in {\mVec}. However, as noted above, multidimensional constrained -maximization problems are difficult and sometimes quite slow to -solve. - -There is a better way. Define the problem -%\providecommand{\Opt}{} -%\renewcommand{\Opt}{\tilde} -%\providecommand{\vOpt}{} -%\renewcommand{\vOpt}{\overset{*}{\vFunc}} -\begin{equation}\begin{gathered}\begin{aligned} - \Opt{\vFunc}_{{\prd_\cntn}}(a_{\prd}) & = \max_{\Shr_{\prd}} ~~ \vEndStp(a_{\prd},\Shr_{\prd}) - \\ & \text{s.t.} \nonumber - \\ 0 \leq & \Shr_{\prd} \leq 1 - \end{aligned}\end{gathered}\end{equation} -where the tilde over $\Opt{\vFunc}(a)$ indicates that this is the $\vFunc$ that has been optimized with respect to all of the arguments other than the one still present ($a_{\prd}$). We solve this problem for the set of gridpoints in \code{aVec} and use the results to construct the interpolating function $\Aprx{\Opt{\vFunc}}_{\prd}^{a}(a_{\prd})$.\footnote{A faster solution could be obtained by, for each element in \code{aVec}, computing $\vEndStp^{\Shr}(m_{\prd}-c_{\prd},\Shr)$ of a grid of values of $\Shr$, and then using an approximating interpolating function (rather than the full expectation) in the \texttt{FindRoot} command. The associated speed improvement is fairly modest, however, so this route was not pursued.} With this function in hand, we can use the first order condition from the single-control problem -\begin{equation*}\begin{gathered}\begin{aligned} - c_{\prd}^{-\CRRA} & = \Aprx{\Opt{\vFunc}}_{\prd}^{a}(m_{\prd}-c_{\prd}) - \end{aligned}\end{gathered}\end{equation*} -to solve for the optimal level of consumption as a function of $m_{\prd}$ using the endogenous gridpoints method described above. Thus we have transformed the multidimensional optimization problem into a sequence of two simple optimization problems. - -Note the parallel between this trick and the fundamental insight of dynamic programming: Dynamic programming techniques transform a multi-period (or infinite-period) optimization problem into a sequence of two-period optimization problems which are individually much easier to solve; we have done the same thing here, but with multiple dimensions of controls rather than multiple periods. - -\hypertarget{implementation}{} -\subsection{Implementation} - -Following the discussion from section \ref{subsec:MCTheory}, to provide a numerical solution to the problem -with multiple control variables, we must define expressions that capture the expected marginal value of end-of-period -assets with respect to the level of assets and the share invested in risky assets. This is addressed in ``Multiple Control Variables.'' - - - -% Having the \texttt{GothicMC} subclass available, we can proceed with implementing the steps laid out in section \ref{subsec:MCApplication} to solve the problem at hand. Initially, the two distributions that capture the uncertainty faced by consumers in this scenario are discretized. Subsequently, the \texttt{GothicMC} class is invoked with the requisite arguments to create an instance that includes the necessary functions to depict the first-order conditions of the consumer's problem. Following that, an improved grid of end-of-period assets is established. - -% Here is where we can see how the approach described in section \ref{subsec:MCApplication} is reflected in the code. For the terminal period, the optimal share of risky assets is determined for each point in \texttt{aVec\_eee}, and then the endogenous gridpoints method is employed to compute the optimal consumption level given that the share in the risky asset has been chosen optimally. It's worth noting that this solution takes into account the possibility of a binding artificial borrowing constraint. Lastly, the interpolation process is executed for both the optimal consumption function and the optimal share of the portfolio in risky assets. These values are stored in their respective dictionaries (\texttt{mGridPort\_life}, \texttt{cGridPort\_life}, and \texttt{ShrGrid\_life}) and utilized to conduct the recursive process outlined in the `Recursion' section, thus yielding the numerical solution for all earlier periods. - -\hypertarget{results-with-multiple-controls}{} -\subsection{Results With Multiple Controls}\label{subsec:results-with-multiple-controls} - -Figure~\ref{fig:PlotctMultContr} plots the $\prd-1$ consumption function generated by the program; qualitatively it does not look much different from the consumption functions generated by the program without portfolio choice. - -But Figure~\ref{fig:PlotRiskySharetOfat} which plots the optimal portfolio share as a function of the level of assets, exhibits several interesting features. First, even with a coefficient of relative risk aversion of 6, an equity premium of only 4 percent, and an annual standard deviation in equity returns of 15 percent, the optimal choice is for the agent to invest a proportion 1 (100 percent) of the portfolio in stocks (instead of the safe bank account with riskless return $\Rfree$) is at values of $a_{\prd}$ less than about 2. Second, the proportion of the portfolio kept in stocks is \textit{declining} in the level of wealth - i.e., the poor should hold all of their meager assets in stocks, while the rich should be cautious, holding more of their wealth in safe bank deposits and less in stocks. This seemingly bizarre (and highly counterfactual -- see \cite{carroll:richportfolios}) prediction reflects the nature of the risks the consumer faces. Those consumers who are poor in measured financial wealth will likely derive a high proportion of future consumption from their labor income. Since by assumption labor income risk is uncorrelated with rate-of-return risk, the covariance between their future consumption and future stock returns is relatively low. By contrast, persons with relatively large wealth will be paying for a large proportion of future consumption out of that wealth, and hence if they invest too much of it in stocks their consumption will have a high covariance with stock returns. Consequently, they reduce that correlation by holding some of their wealth in the riskless form. - -\hypertarget{PlotctMultContr}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/PlotctMultContr} - \caption{$\cFunc(m_{1})$ With Portfolio Choice} - \label{fig:PlotctMultContr} -\end{figure} - -\hypertarget{PlotRiskySharetOfat}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/PlotRiskySharetOfat} - \caption{Portfolio Share in Risky Assets in First Period $\Shr(a)$} - \label{fig:PlotRiskySharetOfat} -\end{figure} diff --git a/docs/sec_normalization-input-clean.tex b/docs/sec_normalization-input-clean.tex deleted file mode 100644 index 101cb035d..000000000 --- a/docs/sec_normalization-input-clean.tex +++ /dev/null @@ -1,38 +0,0 @@ -\hypertarget{normalization}{} -\section{Normalization}\label{sec:normalization} - -The single most powerful method for speeding the solution of such models is to redefine the problem in a way that reduces the number of state variables (if at all possible). In the consumption context, the obvious idea is to see whether the problem can be rewritten in terms of the ratio of various variables to permanent noncapital (`labor') income $\pLvl_{t}$ (henceforth for brevity, `permanent income.') - -In the last {period} of life $T$, there is no future, $\vLvl_{T+1} = 0$, so the optimal plan is to consume everything: -\begin{equation}\begin{gathered}\begin{aligned} - \vLvl_{t}(\mLvl_{t},\pLvl_{t}) & = \frac{\mLvl_{t}^{1-\rho}}{1-\rho}. \label{eq:levelTm1} - \end{aligned}\end{gathered}\end{equation} -Now define nonbold variables as the bold variable divided by the level of permanent income in the same period, so that, for example, $m_{t}=\mLvl_{t}/\pLvl_{t}$; and define $v_{t}(m_{t}) = \uFunc(m_{t})$.\footnote{Nonbold value is bold value divided by $\pLvl^{1-\rho}$ rather than $\pLvl$.} For our CRRA utility function, $\uFunc(xy)=x^{1-\rho}\uFunc(y)$, so (\ref{eq:levelTm1}) can be rewritten as -\begin{equation}\begin{gathered}\begin{aligned} - \vLvl_{t}(\mLvl_{t},\pLvl_{t}) & = \pLvl_{t}^{1-\rho}\frac{{m}_{t}^{1-\rho}}{1-\rho} \\ - & = (\pLvl_{\prd-1}G_{t})^{1-\rho}\frac{{m}_{t}^{1-\rho}}{1-\rho} \\ - &= \pLvl_{\prd-1}^{1-\rho}G_{t}^{1-\rho}v_{t}(m_{t}). \label{eq:vT} - \end{aligned}\end{gathered}\end{equation} - -Now define a new optimization problem: - \begin{equation}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{{c}_{t}} ~~ \uFunc(c_{t})+{\beta}\Ex_{t}[ G_{t+1}^{1-\rho}v_{t+1}(m_{t+1})] \label{eq:vNormed} \\ - & \text{s.t.} \\ - a_{t} & = m_{t}-c_{t} \\ - k_{t+1} & = a_{t} \\ - b_{t+1} & = \underbrace{\left(R/G_{t+1}\right)}_{\equiv \mathcal{R}_{t+1}}k_{t+1} \\ - m_{t+1} & = b_{t+1}+\TranShkEmp_{t+1}, - \end{aligned}\end{gathered}\end{equation} -where division by $G$ in second-to-last equation yields a normalized return factor $\mathcal{R}$ which is the consequence of the fact that we have divided $t+1$ level variables by $\pLvl_{t+1}=G_{t+1}\pLvl_{t}$. - -Then it is easy to see that for $t=T-1$, -\begin{equation*}\begin{gathered}\begin{aligned} - \vLvl_{t}(\mLvl_{t},\pLvl_{t}) & = \pLvl_{t+1}^{1-\rho}v_{t}(m_{t}) - \end{aligned}\end{gathered}\end{equation*} -and so on back to all earlier periods. Hence, if we solve the problem \eqref{eq:vNormed} which has only a single state variable $m_{\prd-1}$, we can obtain the levels of the value function, consumption, and all other variables from the corresponding permanent-income-normalized solution objects by multiplying each by $\pLvl_{t}$, e.g.\ $\cFunc_{t}(\mLvl_{t},\pLvl_{t})=\pLvl_{t}\cFunc_{t}(\mLvl_{t}/\pLvl_{t})$ (or, for the value function, $\vLvl _{t}(\mLvl_{t},\pLvl_{t}) = \pLvl_{t}^{1-\rho}v_{t}(m_{t}))$. We have thus reduced the problem from two continuous state variables to one (and thereby enormously simplified its solution). - -For future reference it will also be useful to write the problem \eqref{eq:vNormed} in the traditional way, by substituting $b_{t+1},k_{t+1},$ and $a_{t}$ into $m_{t+1}$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{c} ~~ \uFunc(c)+ \beta \Ex_{t}[ G_{t+1}^{1-\rho}v_{t+1}(\overbrace{(m_{t}-c)(R/G_{t+1})+\TranShkEmp_{t+1}}^{m_{t+1}})] \label{eq:vusual}. - \end{aligned}\end{gathered}\end{equation} - diff --git a/docs/sec_normalization-input.tex b/docs/sec_normalization-input.tex deleted file mode 100644 index 23ebbdd83..000000000 --- a/docs/sec_normalization-input.tex +++ /dev/null @@ -1,38 +0,0 @@ -\hypertarget{normalization}{} -\section{Normalization}\label{sec:normalization} - -The single most powerful method for speeding the solution of such models is to redefine the problem in a way that reduces the number of state variables (if at all possible). In the consumption context, the obvious idea is to see whether the problem can be rewritten in terms of the ratio of various variables to permanent noncapital (`labor') income $\pLvl_{\prd}$ (henceforth for brevity, `permanent income.') - -In the last {period} of life $\trmT$, there is no future, $\vLvl_{\trmT+1} = 0$, so the optimal plan is to consume everything: -\begin{equation}\begin{gathered}\begin{aligned} - \vLvl_{\prdT}(\mLvl_{\prdT},\pLvl_{\prdT}) & = \frac{\mLvl_{\prdT}^{1-\CRRA}}{1-\CRRA}. \label{eq:levelTm1} - \end{aligned}\end{gathered}\end{equation} -Now define nonbold variables as the bold variable divided by the level of permanent income in the same period, so that, for example, $\mNrm_{\prdT}=\mLvl_{\prdT}/\pLvl_{\prdT}$; and define $\vFunc_{\prdT}(\mNrm_{\prdT}) = \uFunc(\mNrm_{\prdT})$.\footnote{Nonbold value is bold value divided by $\pLvl^{1-\CRRA}$ rather than $\pLvl$.} For our CRRA utility function, $\uFunc(xy)=x^{1-\CRRA}\uFunc(y)$, so (\ref{eq:levelTm1}) can be rewritten as -\begin{equation}\begin{gathered}\begin{aligned} - \vLvl_{\prdT}(\mLvl_{\prdT},\pLvl_{\prdT}) & = \pLvl_{\prdT}^{1-\CRRA}\frac{{\mNrm}_{\prdT}^{1-\CRRA}}{1-\CRRA} \\ - & = (\pLvl_{\prdT-1}\PermGroFac_{\prdT})^{1-\CRRA}\frac{{\mNrm}_{\prdT}^{1-\CRRA}}{1-\CRRA} \\ - &= \pLvl_{\prdT-1}^{1-\CRRA}\PermGroFac_{\prdT}^{1-\CRRA}\vFunc_{\prdT}(\mNrm_{\prdT}). \label{eq:vT} - \end{aligned}\end{gathered}\end{equation} - -Now define a new optimization problem: - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\prd}(\mNrm_{\prd}) & = \max_{{\cNrm}_{\prd}} ~~ \uFunc(\cNrm_{\prd})+{\DiscFac}\Ex_{\prd}[ \PermGroFac_{\prd+1}^{1-\CRRA}\vFunc_{\prd+1}(\mNrm_{\prd+1})] \label{eq:vNormed} \\ - & \text{s.t.} \\ - \aNrm_{\prd} & = \mNrm_{\prd}-\cNrm_{\prd} \\ - {\kNrm}_{\prd+1} & = \aNrm_{\prd} \\ - \bNrm_{\prd+1} & = \underbrace{\left(\Rfree/\PermGroFac_{\prd+1}\right)}_{\equiv \RNrm_{\prd+1}}{\kNrm}_{\prd+1} \\ - \mNrm_{t+1} & = \bNrm_{t+1}+\TranShkEmp_{t+1}, - \end{aligned}\end{gathered}\end{equation} -where division by $\PermGroFac$ in second-to-last equation yields a normalized return factor $\RNrm$ which is the consequence of the fact that we have divided $\prd+1$ level variables by $\pLvl_{\prd+1}=\PermGroFac_{\prd+1}\pLvl_{\prd}$. - -Then it is easy to see that for $\prd=\trmT-1$, -\begin{equation*}\begin{gathered}\begin{aligned} - \vLvl_{\prd}(\mLvl_{\prd},\pLvl_{\prd}) & = \pLvl_{\prd+1}^{1-\CRRA}\vFunc_{\prdt}(\mNrm_{\prdt}) - \end{aligned}\end{gathered}\end{equation*} -and so on back to all earlier periods. Hence, if we solve the problem \eqref{eq:vNormed} which has only a single state variable $\mNrm_{\prd-1}$, we can obtain the levels of the value function, consumption, and all other variables from the corresponding permanent-income-normalized solution objects by multiplying each by $\pLvl_{\prd}$, e.g.\ $\cFunc_{\prd}(\mLvl_{\prd},\pLvl_{\prd})=\pLvl_{\prd}\cFunc_{\prd}(\mLvl_{\prd}/\pLvl_{\prd})$ (or, for the value function, $\vLvl _{\prd}(\mLvl_{\prd},\pLvl_{\prd}) = \pLvl_{\prd}^{1-\CRRA}\vFunc_{\prd}(\mNrm_{\prd}))$. We have thus reduced the problem from two continuous state variables to one (and thereby enormously simplified its solution). - -For future reference it will also be useful to write the problem \eqref{eq:vNormed} in the traditional way, by substituting $\bNrm_{\prdt+1},{\kNrm}_{\prdt+1},$ and $\aNrm_{\prdt}$ into $\mNrm_{t+1}$: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\prdt}(\mNrm_{\prdt}) & = \max_{\cNrm} ~~ \uFunc(\cNrm)+ \DiscFac \Ex_{\prdt}[ \PermGroFac_{\prdt+1}^{1-\CRRA}\vFunc_{\prdt+1}(\overbrace{(\mNrm_{t}-\cNrm)(\Rfree/\PermGroFac_{t+1})+\TranShkEmp_{t+1}}^{m_{t+1}})] \label{eq:vusual}. - \end{aligned}\end{gathered}\end{equation} - diff --git a/docs/sec_notation-input-clean.tex b/docs/sec_notation-input-clean.tex deleted file mode 100644 index 34e5386a6..000000000 --- a/docs/sec_notation-input-clean.tex +++ /dev/null @@ -1,85 +0,0 @@ - -\hypertarget{notation}{} -\section{Notation}\label{sec:notation} - -\subsection{Periods, Stages, Steps} - -The problem specified above assumes that the agent has only one decision problem to solve in any {period}. In practice, it is increasingly common to model agents who have multiple choice {stage}s per {period}; an agent's problem might have, say, a consumption decision (call it the $\cFunc$ {stage}), a labor supply {stage} (call it $\labor$) and a choice of what proportion $\Shr$ of their assets to invest in a risky asset (the portfolio-choice {stage}). - -The modeler might well want to explore whether the order in which the {stage}s are solved makes any difference, either to the substantive results or to aspects of the computational solution like speed and accuracy. - -If, as in section \ref{sec:the-problem}, we hard-wire into the solution code for each {stage} an assumption that its successor {stage} will be something in particular (say, the consumption {stage} assumes that the portfolio choice is next), then if we want to change the order of the {stage}s (say, labor supply after consumption, followed by portfolio choice), we will need to re-hard-wire each of the stages to know particular things about its new successor (for example, the specifics of the distribution of the rate of return on the risky asset would need to be known by whatever {stage} precedes the portfolio choice {stage}). - -But one of the cardinal insights of Bellman's (1957, ``Dynamic Programming'') original work is that \emph{everything that matters} for the solution to the current problem is encoded in a `continuation-value function.' %that incorporates \texttt{everything about the future} that is important to solution of the present stage. %This point is important for a number of reasons, but here we will focus on one problem of ignoring it. Actual solution of the maximization problem as specified in \eqref{eq:vNormed} requires the current agent to have knowledge not only of the successor value function, but also of other aspects of the problem like the distributions of the future period's stochastic shocks. So any solution to the problem that directly uses in \eqref{eq:vNormed} will need to hard-wire into itself the specifics of the successor problem. - -Using Bellman's insight, we describe here a framework for isolating the {stage} problems within a {period} from each other, and the {period} from its successors in any future {period}; the advantage of this is that the isolated {stage} and {period} problems will then be `modular': We can solve them in any order \textit{without changing any code}. After considering the {stage}-order $[\ell,\cFunc,\Shr]$, the modeler can costlessly reorder the {stage}s to consider, say, the order $[\ell,\Shr,\cFunc]$.\footnote{As long as the beginning-of-{stage} and end-of-{stage} value functions for the {stage}s all depend on the same state variables; see the discussion in section \ref{sec:multiple-control-variables} for further discussion.} - -\subsection{Steps} - -The key to the framework is distinguishing, within each {stage}'s Bellman problem, three {steps}: - -\begin{enumerate} -\item \textbf{\Arrival}: Incoming state variables (e.g., $k$) are known, but any shocks associated with the period have not been realized and decision(s) have not yet been made -\item \textbf{\Decision}: All exogenous variables (like income shocks, rate of return shocks, and predictable income growth $G$) have been realized (so that, e.g., $m$'s value is known) and the agent solves the optimization problem -\item \textbf{\Continuation}: After all decisions have been made, their consequences are measured by evaluation of the continuing-value function at the values of the `outgoing' state variables (sometimes called `post-state' variables). -\end{enumerate} - -%In the standard treatment in the literature, the (implicit) default assumption is that the {step} where the agent is solving a decision problem is the unique {step} at which the problem is defined. This is what was done above, when (for example) in \eqref{eq:vNormed} we related the value $\vFunc$ of the current decision to the expectation of the future value $\vFunc_{\prd+1}$. Here, instead, we want to encapsulate the current {stage}'s problem as a standalone object, which is solved by taking as given an exogenously-provided continuation-value function (in our case, $\vEndStp(a)$). - -When we want to refer to a specific {step} in the {stage} we will do so by using an indicator which identifies that {step}. Here we use the consumption {stage} problem described above to exemplify the usage: -\begin{center} -% \mbox{% - \begin{tabular}{r|c|c|l|l} - {Step} & Indicator & State & Usage & Explanation \\ \hline - {\Arrival} & $ \arvl $ & $k$ & $\vBegStp(k)$ & value at entry to {stage} (before shocks) \\ - {\Decision}(s) & (blank) & $m$ & $\vMidStp(m)$ & value of {stage}-decision (after shocks) \\ - {\Continuation} & $ \cntn $ & $a$ & $\vEndStp(a)$ & value at exit (after decision) \\ \hline - \end{tabular} -% } - \end{center} - - Notice that the value functions at different {step}s of the {stage} have distinct state variables. Only $k$ is known at the beginning of the period, and other variables take on their values with equations like $b = k \mathcal{R}$ and $m = b+\TranShkEmp.$ We will refer to such within-the-{stage} creation of variables as {evolutions}.% Thus, the consumption problem has two {evolutions}: from $\kNrm$ to $\mNrm$ and from $\mNrm$ to $\aNrm$. - -\subsection{Transitions} -v - In the backward-induction world of Bellman solutions, to solve the problem of a particular {period} we must start with an end-of-{period} value function, which we designate by including the {period} indicator in the subscript: - \begin{equation}\begin{gathered}\begin{aligned} - \vEndPrd(a) & \mapsto \beta \vBegPrdNxt(\overbrace{a}^{=k}), \label{eq:trns-single-prd} - \end{aligned}\end{gathered}\end{equation} -and we are not done solving the problem of the entire {period} until we have constructed a beginning-of-{period} value function $\vBegPrd(k)$. - -Once we are inside a {stage}, we will also need an end-of-{stage} value function. For the last {stage} in a {period} the end-of-{stage} function is taken to be end-of-{period} value function: - \begin{equation}\begin{gathered}\begin{aligned} - \vEndStg(a) \mapsto \vEndPrd(a). - \end{aligned}\end{gathered}\end{equation} - -One way to describe this is that when we are considering the solution to the current {stage}, we will be working with what, in computer programming, is called a `local function' $\vEndStg(a)$ whose value at the beginning of the {stage}-solution algorithm has been initialized to the value of a previously-computed `global function' $\vEndPrd(a)$ that had already been constructed by mapping itself to $\beta \vBegPrdNxt$ (equation \eqref{eq:trns-single-prd}). -\hypertarget{decision-problem}{} - -\subsection{The Decision Problem in the New Notation}\label{subsec:decision-problem} - -The {\Decision} problem can now be written much more cleanly than in equation \eqref{eq:vNormed}: - \begin{equation}\begin{gathered}\begin{aligned} - v(m) & = \max_{c}~ \uFunc(c) + v_{\cntn}(\overbrace{m-\cFunc}^{a}) \label{eq:vMid} - \end{aligned}\end{gathered}\end{equation} -whose first order condition with respect to $c$ is -\begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c) &= \vEndStp^{a}(m-c) \label{eq:upEqbetaOp} -\end{aligned}\end{gathered}\end{equation} -which is mathematically equivalent to the usual Euler equation for consumption. (We will reuse this formulation when we turn to section~\ref{subsec:egm}.) - -Having defined these notational conventions, we are now ready to move to substance. - -\begin{comment} - % - \subsection{Implementation in Python} - - The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. - - \subsubsection{Useful auxilliary files} - - In this exercise, two primary user-defined modules are frequently imported and utilized. The first is the \texttt{gothic\_class} module, which contains functions describing the end-of-period value functions found in equations \eqref{eq:vBegStp} - \eqref{eq:vEnd} (and the corresponding first and second derivatives). %The advantage of defining functions in the code which decompose the consumer's optimal behavior in a given period will become evident in section \ref{subsec:transformation} - - The \texttt{resources} module is also used repeatedly throughout the notebook. This file has three primary objectives: (i) providing functions that discretize the continuous distributions from the theoretical model that describe the uncertainty a consumer faces, (ii) defining the utility function over consumption under a number of specifications, and (iii) enhancing the grid of end-of-period assets for which functions (such as those from the \texttt{gothic\_class} module) will be defined. These objectives will be discussed in greater detail and with respect to the numerical methods used to the problem in subsequent sections of this document. -\end{comment} - diff --git a/docs/sec_notation-input.tex b/docs/sec_notation-input.tex deleted file mode 100644 index baf23a3ea..000000000 --- a/docs/sec_notation-input.tex +++ /dev/null @@ -1,85 +0,0 @@ - -\hypertarget{notation}{} -\section{Notation}\label{sec:notation} - -\subsection{Periods, Stages, Steps} - -The problem specified above assumes that the agent has only one decision problem to solve in any {period}. In practice, it is increasingly common to model agents who have multiple choice {stage}s per {period}; an agent's problem might have, say, a consumption decision (call it the $\cFunc$ {stage}), a labor supply {stage} (call it $\labor$) and a choice of what proportion $\Shr$ of their assets to invest in a risky asset (the portfolio-choice {stage}). - -The modeler might well want to explore whether the order in which the {stage}s are solved makes any difference, either to the substantive results or to aspects of the computational solution like speed and accuracy. - -If, as in section \ref{sec:the-problem}, we hard-wire into the solution code for each {stage} an assumption that its successor {stage} will be something in particular (say, the consumption {stage} assumes that the portfolio choice is next), then if we want to change the order of the {stage}s (say, labor supply after consumption, followed by portfolio choice), we will need to re-hard-wire each of the stages to know particular things about its new successor (for example, the specifics of the distribution of the rate of return on the risky asset would need to be known by whatever {stage} precedes the portfolio choice {stage}). - -But one of the cardinal insights of Bellman's (1957, ``Dynamic Programming'') original work is that \emph{everything that matters} for the solution to the current problem is encoded in a `continuation-value function.' %that incorporates \texttt{everything about the future} that is important to solution of the present stage. %This point is important for a number of reasons, but here we will focus on one problem of ignoring it. Actual solution of the maximization problem as specified in \eqref{eq:vNormed} requires the current agent to have knowledge not only of the successor value function, but also of other aspects of the problem like the distributions of the future period's stochastic shocks. So any solution to the problem that directly uses in \eqref{eq:vNormed} will need to hard-wire into itself the specifics of the successor problem. - -Using Bellman's insight, we describe here a framework for isolating the {stage} problems within a {period} from each other, and the {period} from its successors in any future {period}; the advantage of this is that the isolated {stage} and {period} problems will then be `modular': We can solve them in any order \textit{without changing any code}. After considering the {stage}-order $[\ell,\cFunc,\Shr]$, the modeler can costlessly reorder the {stage}s to consider, say, the order $[\ell,\Shr,\cFunc]$.\footnote{As long as the beginning-of-{stage} and end-of-{stage} value functions for the {stage}s all depend on the same state variables; see the discussion in section \ref{sec:multiple-control-variables} for further discussion.} - -\subsection{Steps} - -The key to the framework is distinguishing, within each {stage}'s Bellman problem, three {steps}: - -\begin{enumerate} -\item \textbf{\Arrival}: Incoming state variables (e.g., $\kNrm$) are known, but any shocks associated with the period have not been realized and decision(s) have not yet been made -\item \textbf{\Decision}: All exogenous variables (like income shocks, rate of return shocks, and predictable income growth $\PermGroFac$) have been realized (so that, e.g., $\mNrm$'s value is known) and the agent solves the optimization problem -\item \textbf{\Continuation}: After all decisions have been made, their consequences are measured by evaluation of the continuing-value function at the values of the `outgoing' state variables (sometimes called `post-state' variables). -\end{enumerate} - -%In the standard treatment in the literature, the (implicit) default assumption is that the {step} where the agent is solving a decision problem is the unique {step} at which the problem is defined. This is what was done above, when (for example) in \eqref{eq:vNormed} we related the value $\vFunc$ of the current decision to the expectation of the future value $\vFunc_{\prd+1}$. Here, instead, we want to encapsulate the current {stage}'s problem as a standalone object, which is solved by taking as given an exogenously-provided continuation-value function (in our case, $\vEndStp(a)$). - -When we want to refer to a specific {step} in the {stage} we will do so by using an indicator which identifies that {step}. Here we use the consumption {stage} problem described above to exemplify the usage: -\begin{center} -% \mbox{% - \begin{tabular}{r|c|c|l|l} - {Step} & Indicator & State & Usage & Explanation \\ \hline - {\Arrival} & $ \arvl $ & $\kNrm$ & $\vBegStp({\kNrm})$ & value at entry to {stage} (before shocks) \\ - {\Decision}(s) & (blank) & $\mNrm$ & $\vMidStp(\mNrm)$ & value of {stage}-decision (after shocks) \\ - {\Continuation} & $ \cntn $ & $\aNrm$ & $\vEndStp(\aNrm)$ & value at exit (after decision) \\ \hline - \end{tabular} -% } - \end{center} - - Notice that the value functions at different {step}s of the {stage} have distinct state variables. Only $\kNrm$ is known at the beginning of the period, and other variables take on their values with equations like $b = k \RNrm$ and $\mNrm = \bNrm+\TranShkEmp.$ We will refer to such within-the-{stage} creation of variables as {evolutions}.% Thus, the consumption problem has two {evolutions}: from $\kNrm$ to $\mNrm$ and from $\mNrm$ to $\aNrm$. - -\subsection{Transitions} -v - In the backward-induction world of Bellman solutions, to solve the problem of a particular {period} we must start with an end-of-{period} value function, which we designate by including the {period} indicator in the subscript: - \begin{equation}\begin{gathered}\begin{aligned} - \vEndPrd(\aNrm) & \mapsto \DiscFac \vBegPrdNxt(\overbrace{\aNrm}^{=\kNrm}), \label{eq:trns-single-prd} - \end{aligned}\end{gathered}\end{equation} -and we are not done solving the problem of the entire {period} until we have constructed a beginning-of-{period} value function $\vBegPrd(\kNrm)$. - -Once we are inside a {stage}, we will also need an end-of-{stage} value function. For the last {stage} in a {period} the end-of-{stage} function is taken to be end-of-{period} value function: - \begin{equation}\begin{gathered}\begin{aligned} - \vEndStg(\aNrm) \mapsto \vEndPrd(\aNrm). - \end{aligned}\end{gathered}\end{equation} - -One way to describe this is that when we are considering the solution to the current {stage}, we will be working with what, in computer programming, is called a `local function' $\vEndStg(a)$ whose value at the beginning of the {stage}-solution algorithm has been initialized to the value of a previously-computed `global function' $\vEndPrd(a)$ that had already been constructed by mapping itself to $\DiscFac \vBegPrdNxt$ (equation \eqref{eq:trns-single-prd}). -\hypertarget{decision-problem}{} - -\subsection{The Decision Problem in the New Notation}\label{subsec:decision-problem} - -The {\Decision} problem can now be written much more cleanly than in equation \eqref{eq:vNormed}: - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc(\mNrm) & = \max_{\cNrm}~ \uFunc(\cNrm) + \vFunc_{\cntn}(\overbrace{\mNrm-\cFunc}^{\aNrm}) \label{eq:vMid} - \end{aligned}\end{gathered}\end{equation} -whose first order condition with respect to $\cNrm$ is -\begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{\cNrm}(\cNrm) &= \vEndStp^{\aNrm}(\mNrm-\cNrm) \label{eq:upEqbetaOp} -\end{aligned}\end{gathered}\end{equation} -which is mathematically equivalent to the usual Euler equation for consumption. (We will reuse this formulation when we turn to section~\ref{subsec:egm}.) - -Having defined these notational conventions, we are now ready to move to substance. - -\begin{comment} - % - \subsection{Implementation in Python} - - The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. - - \subsubsection{Useful auxilliary files} - - In this exercise, two primary user-defined modules are frequently imported and utilized. The first is the \texttt{gothic\_class} module, which contains functions describing the end-of-period value functions found in equations \eqref{eq:vBegStp} - \eqref{eq:vEnd} (and the corresponding first and second derivatives). %The advantage of defining functions in the code which decompose the consumer's optimal behavior in a given period will become evident in section \ref{subsec:transformation} - - The \texttt{resources} module is also used repeatedly throughout the notebook. This file has three primary objectives: (i) providing functions that discretize the continuous distributions from the theoretical model that describe the uncertainty a consumer faces, (ii) defining the utility function over consumption under a number of specifications, and (iii) enhancing the grid of end-of-period assets for which functions (such as those from the \texttt{gothic\_class} module) will be defined. These objectives will be discussed in greater detail and with respect to the numerical methods used to the problem in subsequent sections of this document. -\end{comment} - diff --git a/docs/sec_solving-the-next-input-clean.tex b/docs/sec_solving-the-next-input-clean.tex deleted file mode 100644 index 66a74e754..000000000 --- a/docs/sec_solving-the-next-input-clean.tex +++ /dev/null @@ -1,521 +0,0 @@ - -\hypertarget{solving-the-next-to-last-period}{} -\hypertarget{solving-the-next}{} -\section{Solving the Next-to-Last Period}\label{sec:solving-the-next} - -To reduce clutter, we now temporarily assume that $G_{t}=1$ for all $t$, so that the $G$ terms from the earlier derivations disappear, and setting $t=T$ the problem in the second-to-last period of life can now be expressed as -\begin{equation}\begin{gathered}\begin{aligned} - v_{\MidPrdLsT}(\mStte) & = \max_{\cCtrl} ~~ \uFunc(\cCtrl) + - \vEndPrdLsT(\overbrace{\mStte-\cCtrl}^{\aStte}) - \label{eq:vEndTm1} -\end{aligned}\end{gathered}\end{equation} -where -\begin{equation*}\begin{gathered}\begin{aligned} - v_{\EndPrdLsT}(\aStte) & = \beta v_{\BegPrd}(\aStte) -\\ & = \beta \Ex_{\BegPrd} \left[\PermGroFacAdjV v_{\MidPrd}(\underbrace{\aStte \mathcal{R}_{t} + \TranShkEmp_{t}}_{{m}_{t}})\right] - \end{aligned}\end{gathered}\end{equation*} - -% \begin{equation*}\begin{gathered}\begin{aligned} -% \vFunc_{\prdLsT}(\mStte) & = \max_{\cCtrl} ~~ \uFunc(\cCtrl) -% + \DiscFac \Ex_{\EndPrdLsT} \left[\PermGroFacAdjV \vFunc_{\MidPrd}(\underbrace{(\mStte-\cCtrl)\RNrm_{\prdT} + \TranShkEmp_{\prdT}}_{{m}_{\prdT}})\right]. -% \end{aligned}\end{gathered}\end{equation*} - - -Using (0) $t=T$; (1) $v_{t}(m)=\uFunc(m)$; (2) the definition of $\uFunc(m)$; and (3) the definition of the expectations operator, %\newcommand{\TranShkEmpDummy}{\vartheta} -\begin{equation}\begin{gathered}\begin{aligned} - v_{\BegPrd}(\aStte) & = \PermGroFacAdjV\int_{0}^{\infty} \frac{\left(\aStte \mathcal{R}_{t}+ \TranShkEmpDummy\right)^{1-\rho}}{1-\rho} d\FDist(\TranShkEmpDummy) \label{eq:NumDefInt} - \end{aligned}\end{gathered}\end{equation} -where $\FDist(\TranShkEmp)$ is the cumulative distribution function for ${\TranShkEmp}$. - -\lstset{basicstyle=\ttfamily\footnotesize,breaklines=true,language=Python,frame=single} -\lstinputlisting{./Code/Python/snippets/rawsolution.py} - -This maximization problem implicitly defines a `local function' $\cFunc_{\prd-1}(m)$ that yields optimal consumption in period $t-1$ for any specific numerical level of resources like $m=1.7$.% (When we need to use this function from some context outside of the local context in which it was solved, we can reference by its absolute index, $\cFunc_{\prdT-1}$). - -But because there is no general analytical solution to this problem, for any given $m$ we must use numerical computational tools to find the $\cCtrl$ that maximizes the expression. This is excruciatingly slow because for every potential $c$ to be considered, a definite integral over the interval $(0,\infty)$ must be calculated numerically, and numerical integration is \textit{very} slow (especially over an unbounded domain!). - -\hypertarget{discretizing-the-distribution}{} -\subsection{Discretizing the Distribution} -Our first speedup trick is therefore to construct a discrete approximation to the lognormal distribution that can be used in place of numerical integration. That is, we want to approximate the expectation over $\TranShkEmp$ of a function $g(\TranShkEmp)$ by calculating its value at set of $n_{\TranShkEmp}$ points $\TranShkEmp_{i}$, each of which has an associated probability weight $w_{i}$: -\begin{equation*}\begin{gathered}\begin{aligned} - \Ex[g(\TranShkEmp)] & = \int_{\TranShkEmpMin}^{\TranShkEmpMax}(\TranShkEmpDummy)d\FDist(\TranShkEmpDummy) \\ - & \approx \sum_{\TranShkEmp = 1}^{n}w_{i}g(\TranShkEmp_{i}) - \end{aligned}\end{gathered}\end{equation*} -(because adding $n$ weighted values to each other is enormously faster than general-purpose numerical integration). - -Such a procedure is called a `quadrature' method of integration; \cite{Tanaka2013-bc} survey a number of options, but for our purposes we choose the one which is easiest to understand: An `equiprobable' approximation (that is, one where each of the values of $\TranShkEmp_{i}$ has an equal probability, equal to $1/n_{\TranShkEmp}$). - -We calculate such an $n$-point approximation as follows. - -Define a set of points from $\sharp_{0}$ to $\sharp_{n_{\TranShkEmp}}$ on the $[0,1]$ interval -as the elements of the set $\sharp = \{0,1/n,2/n, \ldots,1\}$.\footnote{These points define intervals that constitute a partition of the domain of $\FDist$.} Call the inverse of the $\TranShkEmp$ distribution $\FDist^{-1}_{\phantom{\TranShkEmp}}$, and define the -points $\sharp^{-1}_{i} = \FDist^{-1}_{\phantom{\TranShkEmp}}(\sharp_{i})$. Then -the conditional mean of $\TranShkEmp$ in each of the intervals numbered 1 to $n$ is: -\begin{equation}\begin{gathered}\begin{aligned} - \TranShkEmp_{i} \equiv \Ex[\TranShkEmp | \sharp_{i-1}^{-1} \leq \TranShkEmp < \sharp_{i}^{-1}] & = \int_{\sharp^{-1}_{i-1}}^{\sharp^{-1}_{i}} \vartheta ~ d\FDist_{\phantom{\TranShkEmp}}(\vartheta) , - \end{aligned}\end{gathered}\end{equation} -and when the integral is evaluated numerically for each $i$ the result is a set of values of $\TranShkEmp$ that correspond to the mean value in each of the $n$ intervals. - -The method is illustrated in Figure~\ref{fig:discreteapprox}. The solid continuous curve represents -the ``true'' CDF $\FDist(\TranShkEmp)$ for a lognormal distribution such that $\Ex[\TranShkEmp] = 1$, $\sigma_{\TranShkEmp} = 0.1$. The short vertical line segments represent the $n_{\TranShkEmp}$ -equiprobable values of $\TranShkEmp_{i}$ which are used to approximate this -distribution.\footnote{More sophisticated approximation methods exist - (e.g.\ Gauss-Hermite quadrature; see \cite{kopecky2010finite} for a discussion of other alternatives), but the method described here is easy to understand, quick to calculate, and has additional advantages briefly described in the discussion of simulation below.} - \hypertarget{discreteApprox}{} - \begin{figure} - \includegraphics[width=0.8\textwidth]{\econtexRoot/Figures/discreteApprox} - \caption{Equiprobable Discrete Approximation to Lognormal Distribution $\FDist$} - \label{fig:discreteapprox} - \end{figure} - - -Because one of the purposes of these notes is to connect the math to the code that solves the math, we display here a brief snippet from the notebook that constructs these points. - - -\lstset{basicstyle=\ttfamily\footnotesize,breaklines=true,language=Python,frame=single} -\lstinputlisting{./Code/Python/snippets/equiprobable-make.py}\nopagebreak - - \begin{equation}\begin{gathered}\begin{aligned} - v_{{\prdm}_\cntn}(\aStte) & = \beta \PermGroFacAdjV\left(\frac{1}{n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}} \frac{\left(\mathcal{R}_{t} \aStte + \TranShkEmp_{i}\right)^{1-\rho}}{1-\rho} \label{eq:vDiscrete} - \end{aligned}\end{gathered}\end{equation} - -We now substitute our approximation \eqref{eq:vDiscrete} for $\vEndPrdLsT(a)$ in \eqref{eq:vEndTm1} which is simply the sum of $n_{\TranShkEmp}$ numbers and is therefore easy to calculate (compared to the full-fledged numerical integration \eqref{eq:NumDefInt} that it replaces). - -% so we can rewrite the maximization problem that defines the middle step of period {$\prdLst$} as -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\MidPrdLsT}(\mStte) & = \max_{\cCtrl} -% \left\{ -% \frac{\cCtrl^{1-\CRRA}}{1-\CRRA} + -% \vFunc_{\MidPrd}(\mStte-\cCtrl) -% \right\}. -% \label{eq:vEndTm1} -% \end{aligned}\end{gathered}\end{equation} - -\lstinputlisting{./Code/Python/snippets/equiprobable-max-using.py} - -\begin{comment} - In the {\SMDSOPntbk} notebook, the section ``Discretization of the Income Shock Distribution'' provides code that instantiates the \texttt{DiscreteApproximation} class defined in the \texttt{resources} module. This class creates a 7-point discretization of the continuous log-normal distribution of transitory shocks to income by utilizing seven points, where the mean value is $-.5 \sigma^2$, and the standard deviation is $\sigma = .5$. - - A close look at the \texttt{DiscreteApproximation} class and its subclasses should convince you that the code is simply a computational implementation of the mathematical description of equiprobable discrete approximation in this section. Moreover, the Python code generates a graph of the discretized distribution depicted in \ref{fig:discreteapprox}. -\end{comment} - -\hypertarget{the-approximate-consumption-and-value-functions}{} -\subsection{The Approximate Consumption and Value Functions} - -Given any particular value of $\mStte$, a numerical maximization tool can now find the $\cCtrl$ that solves \eqref{eq:vEndTm1} in a reasonable amount of time. - -\begin{comment} - % The {\SMDSOPntbk} notebook follows a series of steps to achieve this. Initially, parameter values for the coefficient of relative risk aversion (CRRA, $\rho$), the discount factor ($\beta$), the permanent income growth factor ($\PermGroFac$), and the risk-free interest rate ($R$ are specified in ``Define Parameters, Grids, and the Utility Function.'') - - % After defining the utility function, the `natural borrowing constraint' is defined as $\underline{a}_{\prdT-1}=-\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$, which will be discussed in greater depth in section \ref{subsec:LiqConstrSelfImposed}. %Following the reformulation of the maximization problem, an instance of the \texttt{gothic\_class} is created using the specifications and the discretized distribution described in the prior lines of code; this is required to provide the numerical solution. -\end{comment} - -The notebook code responsible for computing an estimated consumption function begins in ``Solving the Model by Value Function Maximization,'' where a vector containing a set of possible values of market resources $m$ is created (in the code, various $m$ vectors have names beginning {\mVec}; in these notes we will use boldface italics to represent vectors, so we can refer to our collection of $m$ points as $\vctr{m}$ with values indexed by brackets: $\vctr{m}[1]$ is the first entry in the vector, up to a last entry $\vctr{m}[-1]$; we arbitrarily (and suboptimally) pick the first five integers as our five {\mVec} gridpoints (in the code, \code{mVec\_int}= $\{0.,1.,2.,3.,4.\}$)). - -% Finally, the previously computed values of optimal $c$ and the grid of market resources are combined to generate a graph of the approximated consumption function for this specific instance of the problem. To reduce the computational challenge of solving the problem, the process is evaluated only at a small number of gridpoints. - - -\hypertarget{an-interpolated-consumption-function}{} -\subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} - -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $c$ that corresponds to a linear `connect-the-dots' interpolation of the value of $c$ from the values of the two nearest computed $\{m,c\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % AL: Please provide href for your interpolation package - -This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\cFunc}_{\MidStpLsT}(\mStte)$. %When called with an $\mStte$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mStte$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mStte$. - -Figures \ref{fig:PlotcTm1Simple} and~\ref{fig:PlotVTm1Simple} show -plots of the constructed $\Aprx{\cFunc}_{\prd-1}$ and $\Aprx{v}_{\prd-1}$. While the $\Aprx{\cFunc}_{\prd-1}$ function looks very smooth, the fact that the $\Aprx{v}_{\prd-1}$ function is a set of line segments is very evident. This figure provides the beginning of the intuition for why trying to approximate the value function directly is a bad idea (in this context).\footnote{For some problems, especially ones with discrete choices, value function approximation is unavoidable; nevertheless, even in such problems, the techniques sketched below can be very useful across much of the range over which the problem is defined.} - -\hypertarget{PlotcTm1Simple}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotcTm1Simple}} - \caption{$\cFunc_{T-1}(\mStte)$ (solid) versus $\Aprx{\cFunc}_{T-1}(\mStte)$ (dashed)} - \label{fig:PlotcTm1Simple} -\end{figure} - -\hypertarget{PlotvTm1Simple}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotVTm1Simple}} - \caption{$v_{T-1}$ (solid) versus $\Aprx{v}_{T-1}(\mStte)$ (dashed)} - \label{fig:PlotVTm1Simple} -\end{figure} - - -\hypertarget{interpolating-expectations}{} -\subsection{Interpolating Expectations} - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Good approximation in the sense that increasing the number of points makes no discernable difference.}}{} - -Piecewise linear `spline' interpolation as described above works well for generating a good approximation to the true optimal consumption function. However, there is a clear inefficiency in the program: Since it uses equation \eqref{eq:vEndTm1}, for every value of $\mStte$ the program must calculate the utility consequences of various possible choices of $\cCtrl$ (and therefore $a_{\prd-1}$) as it searches for the best choice. - -For any given index $j$ in $\vctr{m}[j]$, the algorithm, as it searches for the corresponding optimal $a$, the algorithm will end up calculating $v_{\EndPrdLsT}(\tilde{a})$ for many $\tilde{a}$ values close to the optimal $a_{\prd-1}$. Indeed, even when searching for the optimal $a$ for a \emph{different} $m$ (say $\vctr{m}[k]$ for $k \neq j$) the search process might compute $v_{\EndPrdLsT}(a)$ for an $a$ close to the correct optimal $a$ for $\vctr{m}[j]$. But if that difficult computation does not correspond to the exact solution to the $\vctr{m}[k]$ problem, it is discarded. - -To avoid solving the problem independently over and over again for multitudes of values of $a$ that are close to each other, we can employ the same interpolation technique used above to construct a direct numerical approximation to the value function: Define a vector of possible values for end-of-period assets at time $t-1$, $\vctr{a}$ (\code{aVec} in the code). Next, construct $\vctr{v} = v_{\MidStpLsT}(\vctr{a})$ using equation (\ref{eq:vDiscrete}); then construct an approximation $\Aprx{v}_{({\prd-1})_\cntn}(a)$ by passing the vectors $\vctr{a}$ and $\vctr{v}$ as arguments to a piecewise-linear interpolator (e.g., the one in \texttt{scipy.interpolate}).% -% (These lists contain the points of the $\vctr{a}_{{\prdT-1}}$ and $\vctr{v}_{{\prdT-1}}$ vectors, respectively.) - -The notebook section ``Interpolating Expectations,'' now interpolates the expected value of \textit{ending} the period with a given amount of assets.\footnote{What we are doing here is closely related to `the method of parameterized expectations' of \cite{denHaanMarcet:parameterized}; the only difference is that our method is essentially a nonparametric version.} %The problem is solved in the same block with the remaining lines of code. - -Figure~\ref{fig:PlotOTm1RawVSInt} compares the true value function to the approximation produced by following the interpolation procedure; the approximated and exact functions are of course identical at the gridpoints of $\vctr{a}$ and they appear reasonably close except in the region below $\mStte=1$. - -\hypertarget{PlotOTm1RawVSInt}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotOTm1RawVSInt}} - \caption{End-Of-Period Value $v_{({\prd-1})_\cntn}(a_{\prd-1})$ (solid) versus $\Aprx{v}_{({T-1})_\cntn}(a_{T-1})$ (dashed)} - \label{fig:PlotOTm1RawVSInt} -\end{figure} - -\hypertarget{PlotComparecTm1AB}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotComparecTm1AB}} - \caption{$\cFunc_{T-1}(\mStte)$ (solid) versus $\Aprx{\cFunc}_{T-1}(\mStte)$ (dashed)} - \label{fig:PlotComparecTm1AB} -\end{figure} - -\Fix{\marginpar{\tiny In all figs, replace gothic h with notation corresponding to the lecture notes.}} - -% \ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Don't skip the 2-3-3-4 example in the text - it will be used again in a moment.}}{} -Nevertheless, the consumption rule obtained when the approximating $\Aprx{v}_{({\prd-1})_\cntn}(a_{\prd-1})$ is used instead of $v_{({\prd-1})_\cntn}(a_{\prd-1})$ is surprisingly bad, as shown in figure \ref{fig:PlotComparecTm1AB}. For example, when $\mStte$ goes from 2 to 3, $\Aprx{\cFunc}_{\prd-1}$ goes from about 1 to about 2, yet when $\mStte$ goes from 3 to 4, $\Aprx\cCtrl$ goes from about 2 to about 2.05. The function fails even to be concave, which is distressing because Carroll and Kimball~\citeyearpar{ckConcavity} prove that the correct consumption function is strictly concave in a wide class of problems that includes this one. - -\hypertarget{value-function-versus-first-order-condition}{} -\subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} - -Loosely speaking, our difficulty reflects the fact that the -consumption choice is governed by the \textit{marginal} value function, -not by the \textit{level} of the value function (which is the object that -we approximated). To understand this point, recall that a quadratic -utility function -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Intuitively speaking, if one's goal is to accurately capture behavior that is governed by marginal utility or the marginal value function, numerical techniques that approximate the \textit{marginal} value function are likely to work better.}} exhibits -risk aversion because with a stochastic $c$, -\begin{equation} - \Ex[-(c - \cancel{c})^{2}] < - (\Ex[c] - \cancel{c})^{2} -\end{equation} -(where $\cancel{c}$ is the `bliss point' which is assumed always to exceed feasible $c$). However, unlike the CRRA utility function, -with quadratic utility the consumption/saving \textit{behavior} of consumers -is unaffected by risk since behavior is determined by the first order condition, which -depends on \textit{marginal} utility, and when utility is quadratic, marginal utility is unaffected -by risk: -\begin{equation} - \Ex[-2(c - \cancel{c})] = - 2(\Ex[c] - \cancel{c}). -\end{equation} - -Intuitively, if one's goal is to accurately capture choices -that are governed by marginal value, -numerical techniques that approximate the \textit{marginal} value -function will yield a more accurate approximation to -optimal behavior than techniques that approximate the \textit{level} -of the value function. - -The first order condition of the maximization problem in period $T-1$ is: - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(\cCtrl) & = \beta \Ex_{\cntn(T-1)} [\PermGroFacAdjMu R \uFunc^{c}(c_{t})] %\label{eq:focraw} - \\ \cCtrl^{-\rho} & = R \beta \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \PermGroFacAdjMu\left(R (\mStte-\cCtrl) + \TranShkEmp_{i}\right)^{-\rho} \label{eq:FOCTm1}. - \end{aligned}\end{gathered}\end{equation} -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Go from the first to the last equation in \eqref{eq:FOCTm1} by substituting $\uFunc(c)=c^{-\rho}$ and use the approximation to the integral.}}{} -\hypertarget{PlotuPrimeVSOPrime}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotuPrimeVSOPrime}} - \caption{$\uFunc^{c}(c)$ versus $v_{({T-1})_\cntn}^{a}(3-c), v_{({T-1})_\cntn}^{a}(4-c), \Aprx{v}_{({T-1})_\cntn}^{a}(3-c), \Aprx{v}_{({T-1})_\cntn}^{a}(4-c)$} - \label{fig:PlotuPrimeVSOPrime} -\end{figure} - -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package. %Notice that the use of \texttt{u.prime} and \texttt{gothic.VP\_Tminus1} is possible since they are already defined in the \texttt{resources} and \texttt{gothic\_class} modules. - -The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} -shows the value of $\cCtrl^{-\rho}$ for our baseline parameter values -for $0 \leq \cCtrl \leq 4$ (the horizontal axis). The solid -upward-sloping curve shows the value of the RHS of (\ref{eq:FOCTm1}) -as a function of $\cCtrl$ under the assumption that $\mStte=3$. -Constructing this figure is time-consuming, because for every -value of $\cCtrl$ plotted we must calculate the RHS of -(\ref{eq:FOCTm1}). The value of $\cCtrl$ for which the RHS and LHS -of (\ref{eq:FOCTm1}) are equal is the optimal level of consumption -given that $\mStte=3$, so the intersection of the downward-sloping -and the upward-sloping curves gives the (approximated) optimal value of $\cCtrl$. -As we can see, the two curves intersect just below $\cCtrl=2$. -Similarly, the upward-sloping dashed curve shows the expected value -of the RHS of (\ref{eq:FOCTm1}) under the assumption that $\mStte=4$, -and the intersection of this curve with $\uFunc^{c}(\cCtrl)$ yields the -optimal level of consumption if $\mStte=4$. These two curves -intersect slightly below $\cCtrl=2.5$. Thus, increasing $\mStte$ -from 3 to 4 increases optimal consumption by about 0.5. - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Flip back to Figure - 4 to make the point that $\Aprx{\vEnd}^{a}$ is a step - function.}}{} Now consider the derivative of our function -$\Aprx{v}_{({\prd-1})}(a_{\prd-1})$. Because we have constructed -$\Aprx{v}_{({\prd-1})}$ as a linear interpolation, the slope of -$\Aprx{v}_{({\prd-1})}(a_{\prd-1})$ between any two adjacent -points $\{\vctr{a}[i],\vctr{},\vctr{a}[{i+1}]\}$ is constant. The -level of the slope immediately below any particular gridpoint is -different, of course, from the slope above that gridpoint, a fact -which implies that the derivative of -$\Aprx{v}_{({\prd-1})_\cntn}(a_{\prd-1})$ follows a step function. - -The solid-line step function in Figure \ref{fig:PlotuPrimeVSOPrime} -depicts the actual value of -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(3-\cCtrl)$. When we attempt to find -optimal values of $\cCtrl$ given $\mStte$ using -$\Aprx{v}_{({\prd-1})_\cntn}(a_{\prd-1})$, the numerical optimization -routine will return the $\cCtrl$ for which $\uFunc^{c}(\cCtrl) = -\Aprx{v}^{a}_{({\prd-1})_\cntn}(\mStte-\cCtrl)$. Thus, for -$\mStte=3$ the program will return the value of $\cCtrl$ for -which the downward-sloping $\uFunc^{c}(\cCtrl)$ curve intersects with the -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(3-\cCtrl)$; as the diagram shows, -this value is exactly equal to 2. Similarly, if we ask the routine -to find the optimal $\cCtrl$ for $\mStte=4$, it finds the point -of intersection of $\uFunc^{c}(\cCtrl)$ with -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(4-\cCtrl)$; and as the diagram shows, -this intersection is only slightly above 2. Hence, this figure -illustrates why the numerical consumption function plotted earlier -returned values very close to $\cCtrl=2$ for both $\mStte=3$ and -$\mStte=4$. - -We would obviously obtain much better estimates of the point of intersection between $\uFunc^{c}(\cCtrl)$ and $v_{({\prd-1})_\cntn}^{a}(\mStte-\cCtrl)$ if our estimate of $\Aprx{v}^{a}_{({\prd-1})_\cntn}$ were not a step function. In fact, we already know how to construct linear interpolations to functions, so the obvious next step is to construct a linear interpolating approximation to the \textit{expected marginal value of end-of-period assets function} at the points in $\vctr{a}$: -\begin{equation}\begin{gathered}\begin{aligned} - v_{({\prd-1})_\cntn}^{a}(\vctr{a}) & = \beta R \PermGroFacAdjMu \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \left(\mathcal{R}_{t} \vctr{a} + \TranShkEmp_{i}\right)^{-\rho} \label{eq:vEndPrimeTm1} - \end{aligned}\end{gathered}\end{equation} -yielding $\vctr{v}{^{a}_{({\prd-1})_\cntn}}$ (the vector of expected end-of-period-$(T-1)$ marginal values of assets corresponding to \code{aVec}), %$\{\{\vctr{a}}\code{_{\prdT-1}},\vFunc_{({\prdT-1})_\cntn}^{a}(\vctr{{a}[1]}_{\prdT-1}\},\{\vctr{a}_{(T-1)},\vFunc_{({\prdT-1})_\cntn}^{a}\}\ldots\}$ -and construct -$\Aprx{v}_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$ as the linear -interpolating function that fits this set of points. - -\hypertarget{PlotOPRawVSFOC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotOPRawVSFOC}} - \caption{$v_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$ versus $\Aprx{v}_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$} - \label{fig:PlotOPRawVSFOC} -\end{figure} - - -% This is done by making a call to the \texttt{InterpolatedUnivariateSpline} function, passing it \code{aVec} and \texttt{vpVec} as arguments. Note that in defining the list of values \texttt{vpVec}, we again make use of the predefined \texttt{gothic.VP\_Tminus1} function. These steps are the embodiment of equation~(\ref{eq:vEndPrimeTm1}), and construct the interpolation of the expected marginal value of end-of-period assets as described above. - -The results are shown in Figure \ref{fig:PlotOPRawVSFOC}. The linear interpolating approximation looks roughly as good (or bad) for the \textit{marginal} value function as it was for the level of the value function. However, Figure \ref{fig:PlotcTm1ABC} shows that the new consumption function (long dashes) is a considerably better approximation of the true consumption function (solid) than was the consumption function obtained by approximating the level of the value function (short dashes). - -\hypertarget{PlotcTm1ABC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotcTm1ABC}} - \caption{$\cFunc_{\prd-1}(\mStte)$ (solid) Versus Two Methods for Constructing $\Aprx{\cFunc}_{\prd-1}(\mStte)$} - \label{fig:PlotcTm1ABC} -\end{figure} - -\hypertarget{transformation}{} -\subsection{Transformation}\label{subsec:transformation} - -Even the new-and-improved consumption function diverges notably from the true -solution, especially at lower values of $m$. That is because the -linear interpolation does an increasingly poor job of capturing the -nonlinearity of $v_{({\prd-1})_\cntn}^{a}(a_{\prd-1})$ at -lower and lower levels of $a$. - -This is where we unveil our next trick. To understand the logic, -start by considering the case where $\mathcal{R}_{t} = \beta = -G_{t} = 1$ and there is no uncertainty -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Go over this - carefully.}}{} (that is, we know for sure that income next period -will be $\TranShkEmp_{t} = 1$). The final Euler equation (recall that we are still assuming that $t=T$) is then: -\begin{equation}\begin{gathered}\begin{aligned} - c_{\prd-1}^{-\rho} & = c_{t}^{-\rho}. - \end{aligned}\end{gathered}\end{equation} - -In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $t$ or the current period $t-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $\mStte$ and `human wealth' (the PDV of future income) of $h_{\prd-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). - -The well-known optimal solution is to spend half of total lifetime resources in period $t-1$ and the remainder in period $t (=T)$. Since total resources are known with certainty to be $\mStte+h_{\prd-1}= \mStte+1$, and since $v_{\MidStp}^{m}(\mStte) = \uFunc^{c}(\cCtrl)$, this implies that\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Crucial point: this is \textit{marginal} value function in period $t-1$, which we were trying to approximate with a linear interpolating function earlier.}}{} -\begin{equation} - v^{m}_{\MidStpLsT}(\mStte) = \left(\frac{\mStte+1}{2}\right)^{-\rho} \label{eq:vPLin}. -\end{equation} -Of course, this is a highly nonlinear function. However, if we raise both sides of \eqref{eq:vPLin} to the power $(-1/\rho)$ the result is a linear function: -\begin{equation}\begin{gathered}\begin{aligned} - % \vInv^{m}_{\prdT-1}(\mStte) \equiv - \left[v^{m}_{\MidStpLsT}(\mStte)\right]^{-1/\rho} & = \frac{\mStte+1}{2} . - \end{aligned}\end{gathered}\end{equation} -This is a specific example of a general phenomenon: A theoretical literature discussed in~\cite{ckConcavity} establishes that under perfect certainty, if the period-by-period marginal utility function is of the form $c_{t}^{-\rho}$, the marginal value function will be of the form $(\gamma m_{t}+\zeta)^{-\rho}$ for some constants $\{\gamma,\zeta\}$. This means that if we were solving the perfect foresight problem numerically, we could always calculate a numerically exact (because linear) interpolation. - -To put the key insight in intuitive terms, the nonlinearity we are facing springs in large part from the fact that the marginal value function is highly nonlinear. But we have a compelling solution to that problem, because the nonlinearity springs largely from the fact that we are raising something to the power $-\rho$. In effect, we can `unwind' all of the nonlinearity owing to that operation and the remaining nonlinearity will not be nearly so great. Specifically, applying the foregoing insights to the end-of-period value function $v^{a}_{\MidStpLst}(a)$, we can define an `inverse marginal value' function -\begin{equation}\begin{gathered}\begin{aligned} - \vInv_{t_\cntn}^{a}(a) & \equiv \left(v^{a}_{t_\cntn}(a)\right)^{-1/\rho} \label{eq:cGoth} - \end{aligned}\end{gathered}\end{equation} -which would be linear in the perfect foresight case.\footnote{There is a corresponding inverse for the value function: $\vInv_{t_\cntn}(a_{t})=((1-\rho)v_{t_\cntn})^{1/(1-\rho)}$, and for the marginal marginal value function etc.} We then construct a piecewise-linear interpolating approximation to the $\vInv_{t}^{a}$ function, $\Aprx{\vInv}_{t_\cntn}^{a}(a_{t})$, and for any $a$ that falls in the range $\{\vctr{a}[1],\vctr{a}[-1]\}$ we obtain our approximation of marginal value from: -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{v}_{t}^{a}(a) & = - [\Aprx{\vInv}_{t}^{a}(a)]^{-\rho} - \end{aligned}\end{gathered}\end{equation} - -The most interesting thing about all of this, though, is that the $\vInv^{a}_{t}$ function has another interpretation. Recall our point in \eqref{eq:upEqbetaOp} that $\uFunc^{c}(c_{t}) = \vEndStp^{a}(m_{t}-c_{t})$. Since with CRRA utility $\uFunc^{c}(c)=c^{-\rho}$, this can be rewritten -and inverted -\begin{equation}\begin{gathered}\begin{aligned} - (c_{t})^{-\rho} & = \vEndStp^{a}(a_{t}) - \\ c_{t} & = \left(\vEndPrd^{a}(a)\right)^{-1/\rho}. - \end{aligned}\end{gathered}\end{equation} - -What this means is that for any given $a$, if we can calculate the marginal value associated with ending the period with that $a$, then we can learn the level of $c$ that the consumer must have chosen if they ended up with that $a$ as the result of an optimal unconstrained choice. This leads us to an alternative interpretation of $\vInv^{a}$. It is the function that reveals, for any ending $a$, how much the agent must have consumed to (optimally) get to that $a$. We will therefore henceforth refer to it as the `consumed function:' -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\cFunc}_{t_\cntn}(a_{t}) & \equiv \Aprx{\vInv}^{a}_{t_\cntn}(a_{t}) \label{eq:consumedfn}. - \end{aligned}\end{gathered}\end{equation} - -%\renewcommand{\prd}{T} -Thus, for example, for period $\prdLsT$ our procedure is to calculate the vector of $\vctr{c}$ points on the consumed function: -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{c} & = \cFunc_{(\prdLsT)_\cntn}(\vctr{a}) \label{eq:consumedfnvecs} - \end{aligned}\end{gathered}\end{equation} -with the idea that we will construct an approximation of the consumed function $\Aprx{\cFunc}_{(\prdLsT)_\cntn}$ as the interpolating function connecting these $\{\vctr{a},\vctr{c}\}$ points. - -\hypertarget{the-natural-borrowing-constraint-and-the-a-lower-bound}{} -\subsection{The Natural Borrowing Constraint and the $a_{\prdLsT}$ Lower Bound} \label{subsec:LiqConstrSelfImposed} - -%\renewcommand{\prd}{T} -This is the appropriate moment to ask an awkward question: How should an interpolated, approximated `consumed' function like $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a_{\prdLsT})$ be extrapolated to return an estimated `consumed' amount when evaluated at an $a_{\prdLsT}$ outside the range spanned by $\{\vctr{a}[1],...,\vctr{a}[n]\}$? - - -For most canned piecewise-linear interpolation tools like \href{https://docs.scipy.org/doc/scipy/tutorial/interpolate.html}{scipy.interpolate}, when the `interpolating' function is evaluated at a point outside the provided range, the algorithm extrapolates under the assumption that the slope of the function remains constant beyond its measured boundaries (that is, the slope is assumed to be equal to the slope of nearest piecewise segment \emph{within} the interpolated range); for example, if the bottommost gridpoint is $\aVecMin = \vctratm[1]$ and the corresponding consumed level is $\cMin = \cFunc_{(\prdLsT)_\cntn}(a_1)$ we could calculate the `marginal propensity to have consumed' $\varkappa_{1}= -\Aprx{\cFunc}_{(\prdLsT)_\cntn}^{a}(\aVecMin)$ and construct the approximation as the linear extrapolation below $\vctratm[1]$ from: -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\cFunc}_{(\prdLsT)_\cntn}(a) & \equiv \cMin + (a-\aVecMin)\varkappa_{1} \label{eq:ExtrapLin}. - \end{aligned}\end{gathered}\end{equation} - -To see that this will lead us into difficulties, consider what happens to the true (not approximated) $v^{a}_{(\prdLsT)_\cntn}(a_{\prdLsT})$ as $a_{\prdLsT}$ approaches a quantity we will call the `natural borrowing constraint': $\NatBoroCnstra_{\prdLsT}=-\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$. From -\eqref{eq:vEndPrimeTm1} we have -\begin{equation}\begin{gathered}\begin{aligned} - \lim_{a \downarrow \NatBoroCnstra_{\prdLsT}} v^{a}_{(\prdLsT)_\cntn}(a) - & = \lim_{a \downarrow \NatBoroCnstra_{\prdLsT}} \beta R \PermGroFacAdjMu \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \left( a \mathcal{R}_{t}+ \TranShkEmp_{i}\right)^{-\rho}. - \end{aligned}\end{gathered}\end{equation} - -But since $\TranShkEmpMin=\TranShkEmp_{1}$, exactly at $a=\NatBoroCnstra_{\prdLsT}$ the first term in the summation would be $(-\TranShkEmpMin+\TranShkEmp_{1})^{-\rho}=1/0^{\rho}$ which is infinity. The reason is simple: $-\NatBoroCnstra_{\prdLsT}$ is the PDV, as of $\prdLsT$, of the \emph{minimum possible realization of income} in $t$ ($\mathcal{R}_{t}\NatBoroCnstra_{\prdLsT} = -\TranShkEmp_{1}$). Thus, if the consumer borrows an amount greater than or equal to $\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$ (that is, if the consumer ends $\prdLsT$ with $a_{\prdLsT} \leq -\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$) and then draws the worst possible income shock in period $t$, they will have to consume zero in period $t$, which yields $-\infty$ utility and $+\infty$ marginal utility. - -As \cite{zeldesStochastic} first noticed, this means that the consumer faces a `self-imposed' (or, as above, `natural') borrowing constraint (which springs from the precautionary motive): They will never borrow an amount greater than or equal to $\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$ (that is, assets will never reach the lower bound of $\NatBoroCnstra_{\prdLsT}$). The constraint is `self-imposed' in the precise sense that if the utility function were different (say, Constant Absolute Risk Aversion), the consumer might be willing to borrow more than $\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$ because a choice of zero or negative consumption in period $t$ would yield some finite amount of utility.\footnote{Though it is very unclear what a proper economic interpretation of negative consumption might be -- this is an important reason why CARA utility, like quadratic utility, is increasingly not used for serious quantitative work, though it is still useful for teaching purposes.} - -%\providecommand{\aMin}{\underline{\aNrm}} -This self-imposed constraint cannot be captured well when the $v^{a}_{(\prdLsT)_\cntn}$ function is approximated by a piecewise linear function like $\Aprx{v}^{m}_{(\prdLsT)_\cntn}$, because it is impossible for the linear extrapolation below $\aMin$ to correctly predict $v^{a}_{(\prdLsT)_\cntn}(\NatBoroCnstra_{\prdLsT})=\infty.$ %To see what will happen instead, note first that if we are approximating $\vFunc^{a}_{(\prdLsT)_\cntn}$ the smallest value in \code{aVec} must be greater than $\NatBoroCnstra_{\prdLsT}$ (because the expectation for any $a_{\prdLsT} \leq \NatBoroCnstra_{\prdLsT}$ is undefined). - -% When the approximating $\vFunc^{a}_{(\prdLsT)_\cntn}$ function is evaluated at some value less than the first element in \code{aVec}, a piecewise linear approximating function will linearly extrapolate the slope that characterized the lowest segment of the piecewise linear approximation (between \texttt{aVec[1]} and \texttt{aVec[2]}), a procedure that will return a positive finite number, even if the requested $a_{\prdLsT}$ point is below $\NatBoroCnstra_{\prdLsT}$. This means that the precautionary saving motive is understated, and by an arbitrarily large amount as the level of assets approaches its true theoretical minimum $\NatBoroCnstra_{\prdLsT}$. - -%\renewcommand{\prd}{T} -So, the marginal value of saving approaches infinity as $a \downarrow \NatBoroCnstra_{\prdLsT}=-\underline{\TranShkEmp}\mathcal{R}_{t}^{-1}$. But this implies that $\lim_{a \downarrow \NatBoroCnstra_{\prdLsT}} \cFunc_{(\prdLsT)_\cntn}(a) = (v^{a}_{(\prdLsT)_\cntn}(a))^{-1/\rho} = 0$; that is, as $a$ approaches its `natural borrowing constraint' minimum possible value, the corresponding amount of worst-case $c$ must approach \textit{its} lower bound: zero. - -The upshot is a realization that all we need to do to address these problems is to prepend each of the $\vctr{a}_{\code{\prdLsT}}$ and $\vctr{c}_{\code{\prdLsT}}$ from \eqref{eq:consumedfnvecs} with an extra point so that the first element in the mapping that produces our interpolation function is $\{\NatBoroCnstra_{\prdLsT},0.\}$. This is done in section ``The Self-Imposed `Natural' Borrowing Constraint and the $a_{\prdLsT}$ Lower Bound'' of the notebook.%which can be seen in the defined lists \texttt{aVecBot} and \texttt{cVec3Bot}. - -\Fix{\marginpar{\tiny The vertical axis should be relabeled - not gothic c anymore, instead $\vInv^{a}$}}{} - -\hypertarget{GothVInvVSGothC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVInvVSGothC}} - \caption{True $\vInv^{a}_{(\prdLsT)_\cntn}(a)$ vs its approximation $\Aprx{\vInv}^{a}_{(\prdLsT)_\cntn}(a)$} - \label{fig:GothVInvVSGothC} -\end{figure} -% \caption{True $\cFunc_{(\prdLsT)_\cntn}(\aNrm)$ vs its approximation $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(\aNrm)$} - -Figure\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny True $\cEndFunc$ is solid, linear approx is dashed.}}{} \ref{fig:GothVInvVSGothC} shows the result. The solid line calculates the exact numerical value of the consumed function $\cFunc_{(\prdLsT)_\cntn}(a)$ while the dashed line is the linear interpolating approximation $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a).$ This figure illustrates the value of the transformation: The true function is close to linear, and so the linear approximation is almost indistinguishable from the true function except at the very lowest values of $a$. - -Figure~\ref{fig:GothVVSGothCInv} similarly shows that when we generate $\Aprx{\Aprx{v}}_{(\prdLsT)_\cntn}^{a}(a)$ using our augmented $[\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a)]^{-\rho}$ (dashed line) we obtain a \textit{much} closer approximation to the true marginal value function $v^{a}_{(\prdLsT)_\cntn}(a)$ (solid line) than we obtained in the previous exercise which did not do the transformation (Figure~\ref{fig:PlotOPRawVSFOC}).\footnote{The vertical axis label uses $\mathfrak{v}^{\prime}$ as an alternative notation for what in these notes we designate as $v^{a}_{\EndStpLsT}$). This will be fixed.} -\Fix{\marginpar{\tiny fix the problem articulated in the footnote}}{} - -\hypertarget{GothVVSGothCInv}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVVSGothCInv}} - \caption{True $v^{a}_{(\prdLsT)_\cntn}(a)$ vs. $\Aprx{\Aprx{v}}_{(\prdLsT)_\cntn}^{a}(a)$ Constructed Using $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a)$} - \label{fig:GothVVSGothCInv} -\end{figure} - -\hypertarget{the-method-of-endogenous-gridpoints}{} -\subsection{The Method of Endogenous Gridpoints (`EGM')}\label{subsec:egm} - -The solution procedure we articulated above for finding $\cFunc_{\prdLsT}(m)$ still requires us, for each point in $\vctr{m}\code{_{\prdLsT}}$, to use a numerical rootfinding algorithm to search for the value of $\cCtrl$ that solves $\uFunc^{c}(\cCtrl) = v^{a}_{(\prdLsT)_\cntn}(m-\cCtrl)$. Though sections \ref{subsec:transformation} and \ref{subsec:LiqConstrSelfImposed} developed a highly efficient and accurate procedure to calculate $\Aprx{v}^{a}_{(\prdLsT)_\cntn}$, those approximations do nothing to eliminate the need for using a rootfinding operation for calculating, for an arbitrary $m$, the optimal $c$. And rootfinding is a notoriously computation-intensive (that is, slow!) operation. - -Fortunately, it turns out that there is a way to completely skip this slow rootfinding step. The method can be understood by noting that we have already calculated, for a set of arbitrary values of $\vctr{a}=\vctr{a}\code{_{\prdLsT}}$, the corresponding $\vctr{c}$ values for which this $\vctr{a}$ is optimal. - -% (greater than its lower bound value $\aVecMin$) will be associated with \textit{some} marginal valuation as of the continuation ($\cntn$) step of $\prdLsT$ (that is, at the end of the period), and the further observation that it is trivial to find the value of $c$ that yields the same marginal valuation, using the first order condition, -% \begin{equation}\begin{gathered}\begin{aligned} -% \uFunc^{c}({\vctr{\cNrm}\code{_{\prdLsT}}}) & = -% \vFunc^{a}_{(\prdLsT)_\cntn}(\vctr{a}_{\prdLsT}) \label{eq:eulerTm1} -% \end{aligned}\end{gathered}\end{equation} -% by using the inverse of the marginal utility function, -% \begin{equation}\begin{gathered}\begin{aligned} -% c^{-\CRRA} & = \mu -% \\ c & = \mu^{-1/\CRRA} -% \end{aligned}\end{gathered}\end{equation} -% which yields the level of consumption that corresponds to marginal utility of $\mu.$ -% Using this to invert both sides of \eqref{eq:eulerTm1}, we get -% \begin{equation}\begin{gathered}\begin{aligned} -% {\vctr{\cNrm}\code{_{\prdLsT}}} & = \left(\vFunc^{a}_{(\prdLsT)_\cntn}(\vctr{a}_{\prdLsT})\right)^{-1/\CRRA} -% % \\ & = (\vFunc^{a}_{(\prdLsT)_\cntn}(a_{T-1,i}))^{-1/\CRRA} -% % \\ & \equiv \cFunc_{(\prdLsT)_\cntn}(a_{T-1,i}) -% % \\ & \equiv \cFunc_{(\prdLsT)_\cntn,i} -% \end{aligned}\end{gathered}\end{equation} -% where the $\cntn$ emphasizes that these are points on the `consumed' function (that is, the function that reveals how much an optimizing consumer must have consumed in order to have ended the period with $a_{T-1}$). - -But with mutually consistent values of $\vctr{c}\code{_{\prdLsT}}$ and $\vctr{a}\code{_{\prdLsT}}$ (consistent, in the sense that they are the unique optimal values that correspond to the solution to the problem), we can obtain the $\vctr{m}\code{_{\prdLsT}}$ vector that corresponds to both of them from -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{m}\code{_{\prdLsT}} & = {\vctr{c}\code{_{\prdLsT}}+\vctr{a}\code{_{\prdLsT}}}. - \end{aligned}\end{gathered}\end{equation} - -\ifthenelse{\boolean{ToFix}}{\marginpar{\tiny Rename gothic class, maybe to: EndPrd. Also, harmonize the notation in the notebook in the paper - for example, everwhere in the text we use cNrm for normalized consumption, but for some reason it is capital C in the gothic function.}}{} - -These $m$ gridpoints are ``endogenous'' in contrast to the usual solution method of specifying some \textit{ex-ante} (exogenous) grid of values of $\vctr{m}$ and then using a rootfinding routine to locate the corresponding optimal consumption vector $\vctr{c}$. - - -This routine is performed in the ``Endogenous Gridpoints'' section of the notebook. First, the \texttt{gothic.C\_Tminus1} function is called for each of the pre-specfied values of end-of-period assets stored in \code{aVec}. These values of consumption and assets are used to produce the list of endogenous gridpoints, stored in the object \texttt{mVec\_egm}. With the $\vctr{\cFunc}$ values in hand, the notebook can generate a set of $\vctr{m}\code{_{\prdLsT}}$ and ${\vctr{c}\code{_{\prdLsT}}}$ pairs that can be interpolated between in order to yield $\Aprx{\cFunc}_{\MidStpLsT}(m)$ at virtually zero computational cost!\footnote{This is the essential point of \cite{carrollEGM}.} %This is done in the final line of code in this block, and the following code block produces the graph of the interpolated consumption function using this procedure. - -\hypertarget{PlotComparecTm1AD}{} -One might worry about whether the $\{{m},c\}$ points obtained in this way will provide a good representation of the consumption function as a whole, but in practice there are good reasons why they work well (basically, this procedure generates a set of gridpoints that is naturally dense right around the parts of the function with the greatest nonlinearity). -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotComparecTm1AD}} - \caption{$\cFunc_{\prdLsT}(m)$ (solid) versus $\Aprx{\cFunc}_{\prdLsT}(m)$ (dashed)} - \label{fig:ComparecTm1AD} -\end{figure} -Figure~\ref{fig:ComparecTm1AD} plots the actual consumption function $\cFunc_{\prdLsT}$ and the approximated consumption function $\Aprx{\cFunc}_{\prdLsT}$ derived by the method of endogenous grid points. Compared to the approximate consumption functions illustrated in Figure~\ref{fig:PlotcTm1ABC}, $\Aprx{\cFunc}_{\prdLsT}$ is quite close to the actual consumption function. - - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Different transformation for $v$ than for $v^{a}$.}}{} - -\hypertarget{improving-the-a-grid}{} -\subsection{Improving the $a$ Grid}\label{subsec:improving-the-a-grid} - -Thus far, we have arbitrarily used $a$ gridpoints of $\{0.,1.,2.,3.,4.\}$ (augmented in the last subsection by $\NatBoroCnstra_{\prdLsT}$). But it has been obvious from the figures that the approximated $\Aprx{\cFunc}_{(\prdLsT)_\cntn}$ function tends to be farthest from its true value at low values of $a$. Combining this with our insight that $\NatBoroCnstra_{\prdLsT}$ is a lower bound, we are now in position to define a more deliberate method for constructing gridpoints for $a$ -- a method that yields values that are more densely spaced at low values of $a$ where the function is more nonlinear. - -A pragmatic choice that works well is to find the values such that (1) the last value \textit{exceeds the lower bound} by the same amount $\bar a$ as our original maximum gridpoint (in our case, 4.); (2) we have the same number of gridpoints as before; and (3) the \textit{multi-exponential growth rate} (that is, $e^{e^{e^{...}}}$ for some number of exponentiations $n$ -- our default is 3) from each point to the next point is constant (instead of, as previously, imposing constancy of the absolute gap between points). - -\hypertarget{GothVInvVSGothCEEE}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVInvVSGothCEEE}} - \caption{$\cFunc_{(\prdLsT)_\cntn}(a)$ versus - $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a)$, Multi-Exponential \code{aVec}} - \label{fig:GothVInvVSGothCEE} -\end{figure} - - -\hypertarget{GothVVSGothCInvEEE}{} -\begin{figure} - \includegraphics[width=6in]{\FigDir/GothVVSGothCInvEEE} - \caption{$v^{a}_{(\prdLsT)_\cntn}(a)$ vs. - $\Aprx{\Aprx{v}}_{(\prdLsT)_\cntn}^{a}(a)$, Multi-Exponential \code{aVec}} - \label{fig:GothVVSGothCInvEE} -\end{figure} - -Section ``Improve the $\mathbb{A}_{grid}$'' begins by defining a function which takes as arguments the specifications of an initial grid of assets and returns the new grid incorporating the multi-exponential approach outlined above. - - -Notice that the graphs depicted in Figures~\ref{fig:GothVInvVSGothCEE} and \ref{fig:GothVVSGothCInvEE} are notably closer to their respective truths than the corresponding figures that used the original grid. - -\subsection{Program Structure} - -In section ``Solve for $c_t(m)$ in Multiple Periods,'' the natural and artificial borrowing constraints are combined with the endogenous gridpoints method to approximate the optimal consumption function for a specific period. Then, this function is used to compute the approximated consumption in the previous period, and this process is repeated for some specified number of periods. - -The essential structure of the program is a loop that iteratively solves for consumption functions by working backward from an assumed final period, using the dictionary \texttt{cFunc\_life} to store the interpolated consumption functions up to the beginning period. Consumption in a given period is utilized to determine the endogenous gridpoints for the preceding period. This is the sense in which the computation of optimal consumption is done recursively. - -For a realistic life cycle problem, it would also be necessary at a -minimum to calibrate a nonconstant path of expected income growth over the -lifetime that matches the empirical profile; allowing for such -a calibration is the reason we have included the $\{G\}_{t}^{T}$ -vector in our computational specification of the problem. - -\hypertarget{results}{} -\subsection{Results} - -The code creates the relevant $\Aprx{\cFunc}_{t}(m)$ functions for any period in the horizon, at the given values of $m$. Figure \ref{fig:PlotCFuncsConverge} shows $\Aprx{\cFunc}_{T-n}(m)$ for $n=\{20,15,10,5,1\}$. At least one feature of this figure is encouraging: the consumption functions converge as the horizon extends, something that \cite{BufferStockTheory} shows must be true under certain parametric conditions that are satisfied by the baseline parameter values being used here. - -\hypertarget{PlotCFuncsConverge}{} -\begin{figure} - \includegraphics[width=6in]{\FigDir/PlotCFuncsConverge} - \caption{Converging $\Aprx{\cFunc}_{T-n}(m)$ Functions as $n$ Increases} - \label{fig:PlotCFuncsConverge} -\end{figure} - - diff --git a/docs/sec_solving-the-next-input.tex b/docs/sec_solving-the-next-input.tex deleted file mode 100644 index b98e0f607..000000000 --- a/docs/sec_solving-the-next-input.tex +++ /dev/null @@ -1,521 +0,0 @@ - -\hypertarget{solving-the-next-to-last-period}{} -\hypertarget{solving-the-next}{} -\section{Solving the Next-to-Last Period}\label{sec:solving-the-next} - -To reduce clutter, we now temporarily assume that $\PermGroFac_{\prd}=1$ for all $\prd$, so that the $\PermGroFac$ terms from the earlier derivations disappear, and setting $t=T$ the problem in the second-to-last period of life can now be expressed as -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\MidPrdLsT}(\mStte) & = \max_{\cCtrl} ~~ \uFunc(\cCtrl) + - \vEndPrdLsT(\overbrace{\mStte-\cCtrl}^{\aStte}) - \label{eq:vEndTm1} -\end{aligned}\end{gathered}\end{equation} -where -\begin{equation*}\begin{gathered}\begin{aligned} - \vFunc_{\EndPrdLsT}(\aStte) & = \DiscFac \vFunc_{\BegPrd}(\aStte) -\\ & = \DiscFac \Ex_{\BegPrd} \left[\PermGroFacAdjV \vFunc_{\MidPrd}(\underbrace{\aStte \RNrm_{\prdT} + \TranShkEmp_{\prdT}}_{{m}_{\prdT}})\right] - \end{aligned}\end{gathered}\end{equation*} - -% \begin{equation*}\begin{gathered}\begin{aligned} -% \vFunc_{\prdLsT}(\mStte) & = \max_{\cCtrl} ~~ \uFunc(\cCtrl) -% + \DiscFac \Ex_{\EndPrdLsT} \left[\PermGroFacAdjV \vFunc_{\MidPrd}(\underbrace{(\mStte-\cCtrl)\RNrm_{\prdT} + \TranShkEmp_{\prdT}}_{{m}_{\prdT}})\right]. -% \end{aligned}\end{gathered}\end{equation*} - - -Using (0) $\prd=\trmT$; (1) $\vFunc_{\prdT}(m)=\uFunc(m)$; (2) the definition of $\uFunc(m)$; and (3) the definition of the expectations operator, %\newcommand{\TranShkEmpDummy}{\vartheta} -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\BegPrd}(\aStte) & = \PermGroFacAdjV\int_{0}^{\infty} \frac{\left(\aStte \RNrm_{\prd}+ \TranShkEmpDummy\right)^{1-\CRRA}}{1-\CRRA} d\FDist(\TranShkEmpDummy) \label{eq:NumDefInt} - \end{aligned}\end{gathered}\end{equation} -where $\FDist(\TranShkEmp)$ is the cumulative distribution function for ${\TranShkEmp}$. - -\lstset{basicstyle=\ttfamily\footnotesize,breaklines=true,language=Python,frame=single} -\lstinputlisting{./Code/Python/snippets/rawsolution.py} - -This maximization problem implicitly defines a `local function' $\cFunc_{\prdT-1}(\mNrm)$ that yields optimal consumption in period $\prdt-1$ for any specific numerical level of resources like $m=1.7$.% (When we need to use this function from some context outside of the local context in which it was solved, we can reference by its absolute index, $\cFunc_{\prdT-1}$). - -But because there is no general analytical solution to this problem, for any given $m$ we must use numerical computational tools to find the $\cCtrl$ that maximizes the expression. This is excruciatingly slow because for every potential $c$ to be considered, a definite integral over the interval $(0,\infty)$ must be calculated numerically, and numerical integration is \textit{very} slow (especially over an unbounded domain!). - -\hypertarget{discretizing-the-distribution}{} -\subsection{Discretizing the Distribution} -Our first speedup trick is therefore to construct a discrete approximation to the lognormal distribution that can be used in place of numerical integration. That is, we want to approximate the expectation over $\TranShkEmp$ of a function $g(\TranShkEmp)$ by calculating its value at set of $n_{\TranShkEmp}$ points $\TranShkEmp_{i}$, each of which has an associated probability weight $w_{i}$: -\begin{equation*}\begin{gathered}\begin{aligned} - \Ex[g(\TranShkEmp)] & = \int_{\TranShkEmpMin}^{\TranShkEmpMax}(\TranShkEmpDummy)d\FDist(\TranShkEmpDummy) \\ - & \approx \sum_{\TranShkEmp = 1}^{n}w_{i}g(\TranShkEmp_{i}) - \end{aligned}\end{gathered}\end{equation*} -(because adding $n$ weighted values to each other is enormously faster than general-purpose numerical integration). - -Such a procedure is called a `quadrature' method of integration; \cite{Tanaka2013-bc} survey a number of options, but for our purposes we choose the one which is easiest to understand: An `equiprobable' approximation (that is, one where each of the values of $\TranShkEmp_{i}$ has an equal probability, equal to $1/n_{\TranShkEmp}$). - -We calculate such an $n$-point approximation as follows. - -Define a set of points from $\sharp_{0}$ to $\sharp_{n_{\TranShkEmp}}$ on the $[0,1]$ interval -as the elements of the set $\sharp = \{0,1/n,2/n, \ldots,1\}$.\footnote{These points define intervals that constitute a partition of the domain of $\FDist$.} Call the inverse of the $\TranShkEmp$ distribution $\FDist^{-1}_{\phantom{\TranShkEmp}}$, and define the -points $\sharp^{-1}_{i} = \FDist^{-1}_{\phantom{\TranShkEmp}}(\sharp_{i})$. Then -the conditional mean of $\TranShkEmp$ in each of the intervals numbered 1 to $n$ is: -\begin{equation}\begin{gathered}\begin{aligned} - \TranShkEmp_{i} \equiv \Ex[\TranShkEmp | \sharp_{i-1}^{-1} \leq \TranShkEmp < \sharp_{i}^{-1}] & = \int_{\sharp^{-1}_{i-1}}^{\sharp^{-1}_{i}} \vartheta ~ d\FDist_{\phantom{\TranShkEmp}}(\vartheta) , - \end{aligned}\end{gathered}\end{equation} -and when the integral is evaluated numerically for each $i$ the result is a set of values of $\TranShkEmp$ that correspond to the mean value in each of the $n$ intervals. - -The method is illustrated in Figure~\ref{fig:discreteapprox}. The solid continuous curve represents -the ``true'' CDF $\FDist(\TranShkEmp)$ for a lognormal distribution such that $\Ex[\TranShkEmp] = 1$, $\sigma_{\TranShkEmp} = 0.1$. The short vertical line segments represent the $n_{\TranShkEmp}$ -equiprobable values of $\TranShkEmp_{i}$ which are used to approximate this -distribution.\footnote{More sophisticated approximation methods exist - (e.g.\ Gauss-Hermite quadrature; see \cite{kopecky2010finite} for a discussion of other alternatives), but the method described here is easy to understand, quick to calculate, and has additional advantages briefly described in the discussion of simulation below.} - \hypertarget{discreteApprox}{} - \begin{figure} - \includegraphics[width=0.8\textwidth]{\econtexRoot/Figures/discreteApprox} - \caption{Equiprobable Discrete Approximation to Lognormal Distribution $\FDist$} - \label{fig:discreteapprox} - \end{figure} - - -Because one of the purposes of these notes is to connect the math to the code that solves the math, we display here a brief snippet from the notebook that constructs these points. - - -\lstset{basicstyle=\ttfamily\footnotesize,breaklines=true,language=Python,frame=single} -\lstinputlisting{./Code/Python/snippets/equiprobable-make.py}\nopagebreak - - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{{\prdm}_\cntn}(\aStte) & = \DiscFac \PermGroFacAdjV\left(\frac{1}{n_{\TranShkEmp}}\right)\sum_{i=1}^{n_{\TranShkEmp}} \frac{\left(\RNrm_{\prd} \aStte + \TranShkEmp_{i}\right)^{1-\CRRA}}{1-\CRRA} \label{eq:vDiscrete} - \end{aligned}\end{gathered}\end{equation} - -We now substitute our approximation \eqref{eq:vDiscrete} for $\vEndPrdLsT(a)$ in \eqref{eq:vEndTm1} which is simply the sum of $n_{\TranShkEmp}$ numbers and is therefore easy to calculate (compared to the full-fledged numerical integration \eqref{eq:NumDefInt} that it replaces). - -% so we can rewrite the maximization problem that defines the middle step of period {$\prdLst$} as -% \begin{equation}\begin{gathered}\begin{aligned} -% \vFunc_{\MidPrdLsT}(\mStte) & = \max_{\cCtrl} -% \left\{ -% \frac{\cCtrl^{1-\CRRA}}{1-\CRRA} + -% \vFunc_{\MidPrd}(\mStte-\cCtrl) -% \right\}. -% \label{eq:vEndTm1} -% \end{aligned}\end{gathered}\end{equation} - -\lstinputlisting{./Code/Python/snippets/equiprobable-max-using.py} - -\begin{comment} - In the {\SMDSOPntbk} notebook, the section ``Discretization of the Income Shock Distribution'' provides code that instantiates the \texttt{DiscreteApproximation} class defined in the \texttt{resources} module. This class creates a 7-point discretization of the continuous log-normal distribution of transitory shocks to income by utilizing seven points, where the mean value is $-.5 \sigma^2$, and the standard deviation is $\sigma = .5$. - - A close look at the \texttt{DiscreteApproximation} class and its subclasses should convince you that the code is simply a computational implementation of the mathematical description of equiprobable discrete approximation in this section. Moreover, the Python code generates a graph of the discretized distribution depicted in \ref{fig:discreteapprox}. -\end{comment} - -\hypertarget{the-approximate-consumption-and-value-functions}{} -\subsection{The Approximate Consumption and Value Functions} - -Given any particular value of $\mStte$, a numerical maximization tool can now find the $\cCtrl$ that solves \eqref{eq:vEndTm1} in a reasonable amount of time. - -\begin{comment} - % The {\SMDSOPntbk} notebook follows a series of steps to achieve this. Initially, parameter values for the coefficient of relative risk aversion (CRRA, $\rho$), the discount factor ($\beta$), the permanent income growth factor ($\PermGroFac$), and the risk-free interest rate ($R$ are specified in ``Define Parameters, Grids, and the Utility Function.'') - - % After defining the utility function, the `natural borrowing constraint' is defined as $\underline{a}_{\prdT-1}=-\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$, which will be discussed in greater depth in section \ref{subsec:LiqConstrSelfImposed}. %Following the reformulation of the maximization problem, an instance of the \texttt{gothic\_class} is created using the specifications and the discretized distribution described in the prior lines of code; this is required to provide the numerical solution. -\end{comment} - -The notebook code responsible for computing an estimated consumption function begins in ``Solving the Model by Value Function Maximization,'' where a vector containing a set of possible values of market resources $m$ is created (in the code, various $m$ vectors have names beginning {\mVec}; in these notes we will use boldface italics to represent vectors, so we can refer to our collection of $m$ points as $\vctr{m}$ with values indexed by brackets: $\vctr{m}[1]$ is the first entry in the vector, up to a last entry $\vctr{m}[-1]$; we arbitrarily (and suboptimally) pick the first five integers as our five {\mVec} gridpoints (in the code, \code{mVec\_int}= $\{0.,1.,2.,3.,4.\}$)). - -% Finally, the previously computed values of optimal $c$ and the grid of market resources are combined to generate a graph of the approximated consumption function for this specific instance of the problem. To reduce the computational challenge of solving the problem, the process is evaluated only at a small number of gridpoints. - - -\hypertarget{an-interpolated-consumption-function}{} -\subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} - -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $\cNrm$ that corresponds to a linear `connect-the-dots' interpolation of the value of $\cNrm$ from the values of the two nearest computed $\{\mNrm,\cNrm\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % AL: Please provide href for your interpolation package - -This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\cFunc}_{\MidStpLsT}(\mStte)$. %When called with an $\mStte$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mStte$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mStte$. - -Figures \ref{fig:PlotcTm1Simple} and~\ref{fig:PlotVTm1Simple} show -plots of the constructed $\Aprx{\cFunc}_{\prdT-1}$ and $\Aprx{\vFunc}_{\prdT-1}$. While the $\Aprx{\cFunc}_{\prdT-1}$ function looks very smooth, the fact that the $\Aprx{\vFunc}_{\prdT-1}$ function is a set of line segments is very evident. This figure provides the beginning of the intuition for why trying to approximate the value function directly is a bad idea (in this context).\footnote{For some problems, especially ones with discrete choices, value function approximation is unavoidable; nevertheless, even in such problems, the techniques sketched below can be very useful across much of the range over which the problem is defined.} - -\hypertarget{PlotcTm1Simple}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotcTm1Simple}} - \caption{$\cFunc_{\trmT-1}(\mStte)$ (solid) versus $\Aprx{\cFunc}_{\trmT-1}(\mStte)$ (dashed)} - \label{fig:PlotcTm1Simple} -\end{figure} - -\hypertarget{PlotvTm1Simple}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotVTm1Simple}} - \caption{$\vFunc_{\trmT-1}$ (solid) versus $\Aprx{\vFunc}_{\trmT-1}(\mStte)$ (dashed)} - \label{fig:PlotVTm1Simple} -\end{figure} - - -\hypertarget{interpolating-expectations}{} -\subsection{Interpolating Expectations} - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Good approximation in the sense that increasing the number of points makes no discernable difference.}}{} - -Piecewise linear `spline' interpolation as described above works well for generating a good approximation to the true optimal consumption function. However, there is a clear inefficiency in the program: Since it uses equation \eqref{eq:vEndTm1}, for every value of $\mStte$ the program must calculate the utility consequences of various possible choices of $\cCtrl$ (and therefore $\aNrm_{\prdT-1}$) as it searches for the best choice. - -For any given index $j$ in $\vctr{m}[j]$, the algorithm, as it searches for the corresponding optimal $a$, the algorithm will end up calculating $\vFunc_{\EndPrdLsT}(\tilde{a})$ for many $\tilde{a}$ values close to the optimal $a_{\prdT-1}$. Indeed, even when searching for the optimal $a$ for a \emph{different} $m$ (say $\vctr{m}[k]$ for $k \neq j$) the search process might compute $\vFunc_{\EndPrdLsT}(a)$ for an $a$ close to the correct optimal $a$ for $\vctr{m}[j]$. But if that difficult computation does not correspond to the exact solution to the $\vctr{m}[k]$ problem, it is discarded. - -To avoid solving the problem independently over and over again for multitudes of values of $a$ that are close to each other, we can employ the same interpolation technique used above to construct a direct numerical approximation to the value function: Define a vector of possible values for end-of-period assets at time $\prdT-1$, $\vctr{a}$ (\code{aVec} in the code). Next, construct $\vctr{v} = \vFunc_{\MidStpLsT}(\vctr{a})$ using equation (\ref{eq:vDiscrete}); then construct an approximation $\Aprx{\vFunc}_{({\prdT-1})_\cntn}(a)$ by passing the vectors $\vctr{a}$ and $\vctr{v}$ as arguments to a piecewise-linear interpolator (e.g., the one in \texttt{scipy.interpolate}).% -% (These lists contain the points of the $\vctr{a}_{{\prdT-1}}$ and $\vctr{v}_{{\prdT-1}}$ vectors, respectively.) - -The notebook section ``Interpolating Expectations,'' now interpolates the expected value of \textit{ending} the period with a given amount of assets.\footnote{What we are doing here is closely related to `the method of parameterized expectations' of \cite{denHaanMarcet:parameterized}; the only difference is that our method is essentially a nonparametric version.} %The problem is solved in the same block with the remaining lines of code. - -Figure~\ref{fig:PlotOTm1RawVSInt} compares the true value function to the approximation produced by following the interpolation procedure; the approximated and exact functions are of course identical at the gridpoints of $\vctr{a}$ and they appear reasonably close except in the region below $\mStte=1$. - -\hypertarget{PlotOTm1RawVSInt}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotOTm1RawVSInt}} - \caption{End-Of-Period Value $\vFunc_{({\prdT-1})_\cntn}(a_{\prdT-1})$ (solid) versus $\Aprx{\vFunc}_{({\trmT-1})_\cntn}(a_{\trmT-1})$ (dashed)} - \label{fig:PlotOTm1RawVSInt} -\end{figure} - -\hypertarget{PlotComparecTm1AB}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotComparecTm1AB}} - \caption{$\cFunc_{\trmT-1}(\mStte)$ (solid) versus $\Aprx{\cFunc}_{\trmT-1}(\mStte)$ (dashed)} - \label{fig:PlotComparecTm1AB} -\end{figure} - -\Fix{\marginpar{\tiny In all figs, replace gothic h with notation corresponding to the lecture notes.}} - -% \ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Don't skip the 2-3-3-4 example in the text - it will be used again in a moment.}}{} -Nevertheless, the consumption rule obtained when the approximating $\Aprx{\vFunc}_{({\prdT-1})_\cntn}(a_{\prdT-1})$ is used instead of $\vFunc_{({\prdT-1})_\cntn}(a_{\prdT-1})$ is surprisingly bad, as shown in figure \ref{fig:PlotComparecTm1AB}. For example, when $\mStte$ goes from 2 to 3, $\Aprx{\cFunc}_{\prdT-1}$ goes from about 1 to about 2, yet when $\mStte$ goes from 3 to 4, $\Aprx\cCtrl$ goes from about 2 to about 2.05. The function fails even to be concave, which is distressing because Carroll and Kimball~\citeyearpar{ckConcavity} prove that the correct consumption function is strictly concave in a wide class of problems that includes this one. - -\hypertarget{value-function-versus-first-order-condition}{} -\subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} - -Loosely speaking, our difficulty reflects the fact that the -consumption choice is governed by the \textit{marginal} value function, -not by the \textit{level} of the value function (which is the object that -we approximated). To understand this point, recall that a quadratic -utility function -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Intuitively speaking, if one's goal is to accurately capture behavior that is governed by marginal utility or the marginal value function, numerical techniques that approximate the \textit{marginal} value function are likely to work better.}} exhibits -risk aversion because with a stochastic $c$, -\begin{equation} - \Ex[-(c - \cancel{c})^{2}] < - (\Ex[c] - \cancel{c})^{2} -\end{equation} -(where $\cancel{c}$ is the `bliss point' which is assumed always to exceed feasible $c$). However, unlike the CRRA utility function, -with quadratic utility the consumption/saving \textit{behavior} of consumers -is unaffected by risk since behavior is determined by the first order condition, which -depends on \textit{marginal} utility, and when utility is quadratic, marginal utility is unaffected -by risk: -\begin{equation} - \Ex[-2(c - \cancel{c})] = - 2(\Ex[c] - \cancel{c}). -\end{equation} - -Intuitively, if one's goal is to accurately capture choices -that are governed by marginal value, -numerical techniques that approximate the \textit{marginal} value -function will yield a more accurate approximation to -optimal behavior than techniques that approximate the \textit{level} -of the value function. - -The first order condition of the maximization problem in period $\trmT-1$ is: - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(\cCtrl) & = \DiscFac \Ex_{\cntn(T-1)} [\PermGroFacAdjMu\Rfree \uFunc^{c}(c_{\prdT})] %\label{eq:focraw} - \\ \cCtrl^{-\CRRA} & = \Rfree \DiscFac \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \PermGroFacAdjMu\left(\Rfree (\mStte-\cCtrl) + \TranShkEmp_{i}\right)^{-\CRRA} \label{eq:FOCTm1}. - \end{aligned}\end{gathered}\end{equation} -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Go from the first to the last equation in \eqref{eq:FOCTm1} by substituting $\uFunc(c)=c^{-\CRRA}$ and use the approximation to the integral.}}{} -\hypertarget{PlotuPrimeVSOPrime}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotuPrimeVSOPrime}} - \caption{$\uFunc^{c}(c)$ versus $\vFunc_{({\trmT-1})_\cntn}^{a}(3-c), \vFunc_{({\trmT-1})_\cntn}^{a}(4-c), \Aprx{\vFunc}_{({\trmT-1})_\cntn}^{a}(3-c), \Aprx{\vFunc}_{({\trmT-1})_\cntn}^{a}(4-c)$} - \label{fig:PlotuPrimeVSOPrime} -\end{figure} - -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package. %Notice that the use of \texttt{u.prime} and \texttt{gothic.VP\_Tminus1} is possible since they are already defined in the \texttt{resources} and \texttt{gothic\_class} modules. - -The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} -shows the value of $\cCtrl^{-\CRRA}$ for our baseline parameter values -for $0 \leq \cCtrl \leq 4$ (the horizontal axis). The solid -upward-sloping curve shows the value of the RHS of (\ref{eq:FOCTm1}) -as a function of $\cCtrl$ under the assumption that $\mStte=3$. -Constructing this figure is time-consuming, because for every -value of $\cCtrl$ plotted we must calculate the RHS of -(\ref{eq:FOCTm1}). The value of $\cCtrl$ for which the RHS and LHS -of (\ref{eq:FOCTm1}) are equal is the optimal level of consumption -given that $\mStte=3$, so the intersection of the downward-sloping -and the upward-sloping curves gives the (approximated) optimal value of $\cCtrl$. -As we can see, the two curves intersect just below $\cCtrl=2$. -Similarly, the upward-sloping dashed curve shows the expected value -of the RHS of (\ref{eq:FOCTm1}) under the assumption that $\mStte=4$, -and the intersection of this curve with $\uFunc^{c}(\cCtrl)$ yields the -optimal level of consumption if $\mStte=4$. These two curves -intersect slightly below $\cCtrl=2.5$. Thus, increasing $\mStte$ -from 3 to 4 increases optimal consumption by about 0.5. - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Flip back to Figure - 4 to make the point that $\Aprx{\vEnd}^{a}$ is a step - function.}}{} Now consider the derivative of our function -$\Aprx{\vFunc}_{({\prdT-1})}(a_{\prdT-1})$. Because we have constructed -$\Aprx{\vFunc}_{({\prdT-1})}$ as a linear interpolation, the slope of -$\Aprx{\vFunc}_{({\prdT-1})}(a_{\prdT-1})$ between any two adjacent -points $\{\vctr{a}[i],\vctr{},\vctr{a}[{i+1}]\}$ is constant. The -level of the slope immediately below any particular gridpoint is -different, of course, from the slope above that gridpoint, a fact -which implies that the derivative of -$\Aprx{\vFunc}_{({\prdT-1})_\cntn}(a_{\prdT-1})$ follows a step function. - -The solid-line step function in Figure \ref{fig:PlotuPrimeVSOPrime} -depicts the actual value of -$\Aprx{\vFunc}_{({\prdT-1})_\cntn}^{a}(3-\cCtrl)$. When we attempt to find -optimal values of $\cCtrl$ given $\mStte$ using -$\Aprx{\vFunc}_{({\prdT-1})_\cntn}(a_{\prdT-1})$, the numerical optimization -routine will return the $\cCtrl$ for which $\uFunc^{c}(\cCtrl) = -\Aprx{\vFunc}^{a}_{({\prdT-1})_\cntn}(\mStte-\cCtrl)$. Thus, for -$\mStte=3$ the program will return the value of $\cCtrl$ for -which the downward-sloping $\uFunc^{c}(\cCtrl)$ curve intersects with the -$\Aprx{\vFunc}_{({\prdT-1})_\cntn}^{a}(3-\cCtrl)$; as the diagram shows, -this value is exactly equal to 2. Similarly, if we ask the routine -to find the optimal $\cCtrl$ for $\mStte=4$, it finds the point -of intersection of $\uFunc^{c}(\cCtrl)$ with -$\Aprx{\vFunc}_{({\prdT-1})_\cntn}^{a}(4-\cCtrl)$; and as the diagram shows, -this intersection is only slightly above 2. Hence, this figure -illustrates why the numerical consumption function plotted earlier -returned values very close to $\cCtrl=2$ for both $\mStte=3$ and -$\mStte=4$. - -We would obviously obtain much better estimates of the point of intersection between $\uFunc^{c}(\cCtrl)$ and $\vFunc_{({\prdT-1})_\cntn}^{a}(\mStte-\cCtrl)$ if our estimate of $\Aprx{\vFunc}^{a}_{({\prdT-1})_\cntn}$ were not a step function. In fact, we already know how to construct linear interpolations to functions, so the obvious next step is to construct a linear interpolating approximation to the \textit{expected marginal value of end-of-period assets function} at the points in $\vctr{a}$: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{({\prdT-1})_\cntn}^{a}(\vctr{a}) & = \DiscFac \Rfree \PermGroFacAdjMu \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \left(\RNrm_{\prdT} \vctr{a} + \TranShkEmp_{i}\right)^{-\CRRA} \label{eq:vEndPrimeTm1} - \end{aligned}\end{gathered}\end{equation} -yielding $\vctr{v}{^{a}_{({\prdT-1})_\cntn}}$ (the vector of expected end-of-period-$(T-1)$ marginal values of assets corresponding to \code{aVec}), %$\{\{\vctr{a}}\code{_{\prdT-1}},\vFunc_{({\prdT-1})_\cntn}^{a}(\vctr{{a}[1]}_{\prdT-1}\},\{\vctr{a}_{(T-1)},\vFunc_{({\prdT-1})_\cntn}^{a}\}\ldots\}$ -and construct -$\Aprx{\vFunc}_{({\prdT-1})_\cntn}^{a}(a_{\prdT-1})$ as the linear -interpolating function that fits this set of points. - -\hypertarget{PlotOPRawVSFOC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotOPRawVSFOC}} - \caption{$\vFunc_{({\prdT-1})_\cntn}^{a}(a_{\prdT-1})$ versus $\Aprx{\vFunc}_{({\prdT-1})_\cntn}^{a}(a_{\prdT-1})$} - \label{fig:PlotOPRawVSFOC} -\end{figure} - - -% This is done by making a call to the \texttt{InterpolatedUnivariateSpline} function, passing it \code{aVec} and \texttt{vpVec} as arguments. Note that in defining the list of values \texttt{vpVec}, we again make use of the predefined \texttt{gothic.VP\_Tminus1} function. These steps are the embodiment of equation~(\ref{eq:vEndPrimeTm1}), and construct the interpolation of the expected marginal value of end-of-period assets as described above. - -The results are shown in Figure \ref{fig:PlotOPRawVSFOC}. The linear interpolating approximation looks roughly as good (or bad) for the \textit{marginal} value function as it was for the level of the value function. However, Figure \ref{fig:PlotcTm1ABC} shows that the new consumption function (long dashes) is a considerably better approximation of the true consumption function (solid) than was the consumption function obtained by approximating the level of the value function (short dashes). - -\hypertarget{PlotcTm1ABC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotcTm1ABC}} - \caption{$\cFunc_{\prdT-1}(\mStte)$ (solid) Versus Two Methods for Constructing $\Aprx{\cFunc}_{\prdT-1}(\mStte)$} - \label{fig:PlotcTm1ABC} -\end{figure} - -\hypertarget{transformation}{} -\subsection{Transformation}\label{subsec:transformation} - -Even the new-and-improved consumption function diverges notably from the true -solution, especially at lower values of $m$. That is because the -linear interpolation does an increasingly poor job of capturing the -nonlinearity of $\vFunc_{({\prdT-1})_\cntn}^{a}(a_{\prdT-1})$ at -lower and lower levels of $a$. - -This is where we unveil our next trick. To understand the logic, -start by considering the case where $\RNrm_{\prdT} = \DiscFac = -\PermGroFac_{\prdT} = 1$ and there is no uncertainty -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Go over this - carefully.}}{} (that is, we know for sure that income next period -will be $\TranShkEmp_{\prdT} = 1$). The final Euler equation (recall that we are still assuming that $\prd=\trmT$) is then: -\begin{equation}\begin{gathered}\begin{aligned} - \cNrm_{\prdT-1}^{-\CRRA} & = c_{\prdT}^{-\CRRA}. - \end{aligned}\end{gathered}\end{equation} - -In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $\prdT$ or the current period $\prdT-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $\mStte$ and `human wealth' (the PDV of future income) of $\hNrm_{\prdT-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). - -The well-known optimal solution is to spend half of total lifetime resources in period $\prdT-1$ and the remainder in period $\prdT (=\trmT)$. Since total resources are known with certainty to be $\mStte+\hNrm_{\prdT-1}= \mStte+1$, and since $\vFunc_{\MidStp}^{m}(\mStte) = \uFunc^{c}(\cCtrl)$, this implies that\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Crucial point: this is \textit{marginal} value function in period $\prdT-1$, which we were trying to approximate with a linear interpolating function earlier.}}{} -\begin{equation} - \vFunc^{m}_{\MidStpLsT}(\mStte) = \left(\frac{\mStte+1}{2}\right)^{-\CRRA} \label{eq:vPLin}. -\end{equation} -Of course, this is a highly nonlinear function. However, if we raise both sides of \eqref{eq:vPLin} to the power $(-1/\CRRA)$ the result is a linear function: -\begin{equation}\begin{gathered}\begin{aligned} - % \vInv^{m}_{\prdT-1}(\mStte) \equiv - \left[\vFunc^{m}_{\MidStpLsT}(\mStte)\right]^{-1/\CRRA} & = \frac{\mStte+1}{2} . - \end{aligned}\end{gathered}\end{equation} -This is a specific example of a general phenomenon: A theoretical literature discussed in~\cite{ckConcavity} establishes that under perfect certainty, if the period-by-period marginal utility function is of the form $\cNrm_{\prd}^{-\CRRA}$, the marginal value function will be of the form $(\gamma m_{\prd}+\zeta)^{-\CRRA}$ for some constants $\{\gamma,\zeta\}$. This means that if we were solving the perfect foresight problem numerically, we could always calculate a numerically exact (because linear) interpolation. - -To put the key insight in intuitive terms, the nonlinearity we are facing springs in large part from the fact that the marginal value function is highly nonlinear. But we have a compelling solution to that problem, because the nonlinearity springs largely from the fact that we are raising something to the power $-\CRRA$. In effect, we can `unwind' all of the nonlinearity owing to that operation and the remaining nonlinearity will not be nearly so great. Specifically, applying the foregoing insights to the end-of-period value function $\vFunc^{a}_{\MidStpLst}(\aNrm)$, we can define an `inverse marginal value' function -\begin{equation}\begin{gathered}\begin{aligned} - \vInv_{\prd_\cntn}^{a}(a) & \equiv \left(\vFunc^{a}_{\prd_\cntn}(a)\right)^{-1/\CRRA} \label{eq:cGoth} - \end{aligned}\end{gathered}\end{equation} -which would be linear in the perfect foresight case.\footnote{There is a corresponding inverse for the value function: $\vInv_{\prd_\cntn}(a_{\prd})=((1-\CRRA)\vFunc_{\prd_\cntn})^{1/(1-\CRRA)}$, and for the marginal marginal value function etc.} We then construct a piecewise-linear interpolating approximation to the $\vInv_{\prd}^{a}$ function, $\Aprx{\vInv}_{\prd_\cntn}^{a}(a_{\prd})$, and for any $a$ that falls in the range $\{\vctr{a}[1],\vctr{a}[-1]\}$ we obtain our approximation of marginal value from: -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\vFunc}_{\prd}^{a}(a) & = - [\Aprx{\vInv}_{\prd}^{a}(a)]^{-\CRRA} - \end{aligned}\end{gathered}\end{equation} - -The most interesting thing about all of this, though, is that the $\vInv^{a}_{\prd}$ function has another interpretation. Recall our point in \eqref{eq:upEqbetaOp} that $\uFunc^{c}(c_{\prd}) = \vEndStp^{a}(m_{\prd}-c_{\prd})$. Since with CRRA utility $\uFunc^{c}(c)=c^{-\CRRA}$, this can be rewritten -and inverted -\begin{equation}\begin{gathered}\begin{aligned} - (c_{\prd})^{-\CRRA} & = \vEndStp^{a}(a_{\prd}) - \\ c_{\prd} & = \left(\vEndPrd^{a}(a)\right)^{-1/\CRRA}. - \end{aligned}\end{gathered}\end{equation} - -What this means is that for any given $a$, if we can calculate the marginal value associated with ending the period with that $a$, then we can learn the level of $c$ that the consumer must have chosen if they ended up with that $a$ as the result of an optimal unconstrained choice. This leads us to an alternative interpretation of $\vInv^{a}$. It is the function that reveals, for any ending $a$, how much the agent must have consumed to (optimally) get to that $a$. We will therefore henceforth refer to it as the `consumed function:' -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\cFunc}_{\prd_\cntn}(a_{\prd}) & \equiv \Aprx{\vInv}^{a}_{\prd_\cntn}(a_{\prd}) \label{eq:consumedfn}. - \end{aligned}\end{gathered}\end{equation} - -%\renewcommand{\prd}{T} -Thus, for example, for period $\prdLsT$ our procedure is to calculate the vector of $\vctr{c}$ points on the consumed function: -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{c} & = \cFunc_{(\prdLsT)_\cntn}(\vctr{a}) \label{eq:consumedfnvecs} - \end{aligned}\end{gathered}\end{equation} -with the idea that we will construct an approximation of the consumed function $\Aprx{\cFunc}_{(\prdLsT)_\cntn}$ as the interpolating function connecting these $\{\vctr{a},\vctr{c}\}$ points. - -\hypertarget{the-natural-borrowing-constraint-and-the-a-lower-bound}{} -\subsection{The Natural Borrowing Constraint and the $a_{\prdLsT}$ Lower Bound} \label{subsec:LiqConstrSelfImposed} - -%\renewcommand{\prd}{T} -This is the appropriate moment to ask an awkward question: How should an interpolated, approximated `consumed' function like $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a_{\prdLsT})$ be extrapolated to return an estimated `consumed' amount when evaluated at an $a_{\prdLsT}$ outside the range spanned by $\{\vctr{a}[1],...,\vctr{a}[n]\}$? - - -For most canned piecewise-linear interpolation tools like \href{https://docs.scipy.org/doc/scipy/tutorial/interpolate.html}{scipy.interpolate}, when the `interpolating' function is evaluated at a point outside the provided range, the algorithm extrapolates under the assumption that the slope of the function remains constant beyond its measured boundaries (that is, the slope is assumed to be equal to the slope of nearest piecewise segment \emph{within} the interpolated range); for example, if the bottommost gridpoint is $\aVecMin = \vctratm[1]$ and the corresponding consumed level is $\cMin = \cFunc_{(\prdLsT)_\cntn}(a_1)$ we could calculate the `marginal propensity to have consumed' $\varkappa_{1}= -\Aprx{\cFunc}_{(\prdLsT)_\cntn}^{a}(\aVecMin)$ and construct the approximation as the linear extrapolation below $\vctratm[1]$ from: -\begin{equation}\begin{gathered}\begin{aligned} - \Aprx{\cFunc}_{(\prdLsT)_\cntn}(a) & \equiv \cMin + (a-\aVecMin)\varkappa_{1} \label{eq:ExtrapLin}. - \end{aligned}\end{gathered}\end{equation} - -To see that this will lead us into difficulties, consider what happens to the true (not approximated) $\vFunc^{a}_{(\prdLsT)_\cntn}(a_{\prdLsT})$ as $a_{\prdLsT}$ approaches a quantity we will call the `natural borrowing constraint': $\NatBoroCnstra_{\prdLsT}=-\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$. From -\eqref{eq:vEndPrimeTm1} we have -\begin{equation}\begin{gathered}\begin{aligned} - \lim_{\aNrm \downarrow \NatBoroCnstra_{\prdLsT}} \vFunc^{a}_{(\prdLsT)_\cntn}(\aNrm) - & = \lim_{\aNrm \downarrow \NatBoroCnstra_{\prdLsT}} \DiscFac \Rfree \PermGroFacAdjMu \left(\frac{1}{n_{\TranShkEmp}}\right) \sum_{i=1}^{n_{\TranShkEmp}} \left( \aNrm \RNrm_{\prd}+ \TranShkEmp_{i}\right)^{-\CRRA}. - \end{aligned}\end{gathered}\end{equation} - -But since $\TranShkEmpMin=\TranShkEmp_{1}$, exactly at $\aNrm=\NatBoroCnstra_{\prdLsT}$ the first term in the summation would be $(-\TranShkEmpMin+\TranShkEmp_{1})^{-\CRRA}=1/0^{\CRRA}$ which is infinity. The reason is simple: $-\NatBoroCnstra_{\prdLsT}$ is the PDV, as of $\prdLsT$, of the \emph{minimum possible realization of income} in $\prdT$ ($\RNrm_{\prdT}\NatBoroCnstra_{\prdLsT} = -\TranShkEmp_{1}$). Thus, if the consumer borrows an amount greater than or equal to $\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$ (that is, if the consumer ends $\prdLsT$ with $a_{\prdLsT} \leq -\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$) and then draws the worst possible income shock in period $\prdT$, they will have to consume zero in period $\prdT$, which yields $-\infty$ utility and $+\infty$ marginal utility. - -As \cite{zeldesStochastic} first noticed, this means that the consumer faces a `self-imposed' (or, as above, `natural') borrowing constraint (which springs from the precautionary motive): They will never borrow an amount greater than or equal to $\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$ (that is, assets will never reach the lower bound of $\NatBoroCnstra_{\prdLsT}$). The constraint is `self-imposed' in the precise sense that if the utility function were different (say, Constant Absolute Risk Aversion), the consumer might be willing to borrow more than $\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$ because a choice of zero or negative consumption in period $\prdT$ would yield some finite amount of utility.\footnote{Though it is very unclear what a proper economic interpretation of negative consumption might be -- this is an important reason why CARA utility, like quadratic utility, is increasingly not used for serious quantitative work, though it is still useful for teaching purposes.} - -%\providecommand{\aMin}{\underline{\aNrm}} -This self-imposed constraint cannot be captured well when the $\vFunc^{a}_{(\prdLsT)_\cntn}$ function is approximated by a piecewise linear function like $\Aprx{\vFunc}^{m}_{(\prdLsT)_\cntn}$, because it is impossible for the linear extrapolation below $\aMin$ to correctly predict $\vFunc^{a}_{(\prdLsT)_\cntn}(\NatBoroCnstra_{\prdLsT})=\infty.$ %To see what will happen instead, note first that if we are approximating $\vFunc^{a}_{(\prdLsT)_\cntn}$ the smallest value in \code{aVec} must be greater than $\NatBoroCnstra_{\prdLsT}$ (because the expectation for any $a_{\prdLsT} \leq \NatBoroCnstra_{\prdLsT}$ is undefined). - -% When the approximating $\vFunc^{a}_{(\prdLsT)_\cntn}$ function is evaluated at some value less than the first element in \code{aVec}, a piecewise linear approximating function will linearly extrapolate the slope that characterized the lowest segment of the piecewise linear approximation (between \texttt{aVec[1]} and \texttt{aVec[2]}), a procedure that will return a positive finite number, even if the requested $a_{\prdLsT}$ point is below $\NatBoroCnstra_{\prdLsT}$. This means that the precautionary saving motive is understated, and by an arbitrarily large amount as the level of assets approaches its true theoretical minimum $\NatBoroCnstra_{\prdLsT}$. - -%\renewcommand{\prd}{T} -So, the marginal value of saving approaches infinity as $\aNrm \downarrow \NatBoroCnstra_{\prdLsT}=-\underline{\TranShkEmp}\RNrm_{\prdT}^{-1}$. But this implies that $\lim_{\aNrm \downarrow \NatBoroCnstra_{\prdLsT}} \cFunc_{(\prdLsT)_\cntn}(\aNrm) = (\vFunc^{a}_{(\prdLsT)_\cntn}(\aNrm))^{-1/\CRRA} = 0$; that is, as $a$ approaches its `natural borrowing constraint' minimum possible value, the corresponding amount of worst-case $c$ must approach \textit{its} lower bound: zero. - -The upshot is a realization that all we need to do to address these problems is to prepend each of the $\vctr{a}_{\code{\prdLsT}}$ and $\vctr{c}_{\code{\prdLsT}}$ from \eqref{eq:consumedfnvecs} with an extra point so that the first element in the mapping that produces our interpolation function is $\{\NatBoroCnstra_{\prdLsT},0.\}$. This is done in section ``The Self-Imposed `Natural' Borrowing Constraint and the $a_{\prdLsT}$ Lower Bound'' of the notebook.%which can be seen in the defined lists \texttt{aVecBot} and \texttt{cVec3Bot}. - -\Fix{\marginpar{\tiny The vertical axis should be relabeled - not gothic c anymore, instead $\vInv^{a}$}}{} - -\hypertarget{GothVInvVSGothC}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVInvVSGothC}} - \caption{True $\vInv^{a}_{(\prdLsT)_\cntn}(\aNrm)$ vs its approximation $\Aprx{\vInv}^{a}_{(\prdLsT)_\cntn}(\aNrm)$} - \label{fig:GothVInvVSGothC} -\end{figure} -% \caption{True $\cFunc_{(\prdLsT)_\cntn}(\aNrm)$ vs its approximation $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(\aNrm)$} - -Figure\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny True $\cEndFunc$ is solid, linear approx is dashed.}}{} \ref{fig:GothVInvVSGothC} shows the result. The solid line calculates the exact numerical value of the consumed function $\cFunc_{(\prdLsT)_\cntn}(\aNrm)$ while the dashed line is the linear interpolating approximation $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(\aNrm).$ This figure illustrates the value of the transformation: The true function is close to linear, and so the linear approximation is almost indistinguishable from the true function except at the very lowest values of $\aNrm$. - -Figure~\ref{fig:GothVVSGothCInv} similarly shows that when we generate $\Aprx{\Aprx{\vFunc}}_{(\prdLsT)_\cntn}^{a}(a)$ using our augmented $[\Aprx{\cFunc}_{(\prdLsT)_\cntn}(a)]^{-\CRRA}$ (dashed line) we obtain a \textit{much} closer approximation to the true marginal value function $\vFunc^{a}_{(\prdLsT)_\cntn}(a)$ (solid line) than we obtained in the previous exercise which did not do the transformation (Figure~\ref{fig:PlotOPRawVSFOC}).\footnote{The vertical axis label uses $\mathfrak{v}^{\prime}$ as an alternative notation for what in these notes we designate as $\vFunc^{a}_{\EndStpLsT}$). This will be fixed.} -\Fix{\marginpar{\tiny fix the problem articulated in the footnote}}{} - -\hypertarget{GothVVSGothCInv}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVVSGothCInv}} - \caption{True $\vFunc^{a}_{(\prdLsT)_\cntn}(\aNrm)$ vs. $\Aprx{\Aprx{\vFunc}}_{(\prdLsT)_\cntn}^{a}(\aNrm)$ Constructed Using $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(\aNrm)$} - \label{fig:GothVVSGothCInv} -\end{figure} - -\hypertarget{the-method-of-endogenous-gridpoints}{} -\subsection{The Method of Endogenous Gridpoints (`EGM')}\label{subsec:egm} - -The solution procedure we articulated above for finding $\cFunc_{\prdLsT}(m)$ still requires us, for each point in $\vctr{m}\code{_{\prdLsT}}$, to use a numerical rootfinding algorithm to search for the value of $\cCtrl$ that solves $\uFunc^{c}(\cCtrl) = \vFunc^{a}_{(\prdLsT)_\cntn}(m-\cCtrl)$. Though sections \ref{subsec:transformation} and \ref{subsec:LiqConstrSelfImposed} developed a highly efficient and accurate procedure to calculate $\Aprx{\vFunc}^{a}_{(\prdLsT)_\cntn}$, those approximations do nothing to eliminate the need for using a rootfinding operation for calculating, for an arbitrary $m$, the optimal $c$. And rootfinding is a notoriously computation-intensive (that is, slow!) operation. - -Fortunately, it turns out that there is a way to completely skip this slow rootfinding step. The method can be understood by noting that we have already calculated, for a set of arbitrary values of $\vctr{a}=\vctr{a}\code{_{\prdLsT}}$, the corresponding $\vctr{c}$ values for which this $\vctr{a}$ is optimal. - -% (greater than its lower bound value $\aVecMin$) will be associated with \textit{some} marginal valuation as of the continuation ($\cntn$) step of $\prdLsT$ (that is, at the end of the period), and the further observation that it is trivial to find the value of $c$ that yields the same marginal valuation, using the first order condition, -% \begin{equation}\begin{gathered}\begin{aligned} -% \uFunc^{c}({\vctr{\cNrm}\code{_{\prdLsT}}}) & = -% \vFunc^{a}_{(\prdLsT)_\cntn}(\vctr{a}_{\prdLsT}) \label{eq:eulerTm1} -% \end{aligned}\end{gathered}\end{equation} -% by using the inverse of the marginal utility function, -% \begin{equation}\begin{gathered}\begin{aligned} -% c^{-\CRRA} & = \mu -% \\ c & = \mu^{-1/\CRRA} -% \end{aligned}\end{gathered}\end{equation} -% which yields the level of consumption that corresponds to marginal utility of $\mu.$ -% Using this to invert both sides of \eqref{eq:eulerTm1}, we get -% \begin{equation}\begin{gathered}\begin{aligned} -% {\vctr{\cNrm}\code{_{\prdLsT}}} & = \left(\vFunc^{a}_{(\prdLsT)_\cntn}(\vctr{a}_{\prdLsT})\right)^{-1/\CRRA} -% % \\ & = (\vFunc^{a}_{(\prdLsT)_\cntn}(a_{T-1,i}))^{-1/\CRRA} -% % \\ & \equiv \cFunc_{(\prdLsT)_\cntn}(a_{T-1,i}) -% % \\ & \equiv \cFunc_{(\prdLsT)_\cntn,i} -% \end{aligned}\end{gathered}\end{equation} -% where the $\cntn$ emphasizes that these are points on the `consumed' function (that is, the function that reveals how much an optimizing consumer must have consumed in order to have ended the period with $a_{T-1}$). - -But with mutually consistent values of $\vctr{c}\code{_{\prdLsT}}$ and $\vctr{a}\code{_{\prdLsT}}$ (consistent, in the sense that they are the unique optimal values that correspond to the solution to the problem), we can obtain the $\vctr{m}\code{_{\prdLsT}}$ vector that corresponds to both of them from -\begin{equation}\begin{gathered}\begin{aligned} - \vctr{m}\code{_{\prdLsT}} & = {\vctr{\cNrm}\code{_{\prdLsT}}+\vctr{a}\code{_{\prdLsT}}}. - \end{aligned}\end{gathered}\end{equation} - -\ifthenelse{\boolean{ToFix}}{\marginpar{\tiny Rename gothic class, maybe to: EndPrd. Also, harmonize the notation in the notebook in the paper - for example, everwhere in the text we use cNrm for normalized consumption, but for some reason it is capital C in the gothic function.}}{} - -These $m$ gridpoints are ``endogenous'' in contrast to the usual solution method of specifying some \textit{ex-ante} (exogenous) grid of values of $\vctr{m}$ and then using a rootfinding routine to locate the corresponding optimal consumption vector $\vctr{c}$. - - -This routine is performed in the ``Endogenous Gridpoints'' section of the notebook. First, the \texttt{gothic.C\_Tminus1} function is called for each of the pre-specfied values of end-of-period assets stored in \code{aVec}. These values of consumption and assets are used to produce the list of endogenous gridpoints, stored in the object \texttt{mVec\_egm}. With the $\vctr{\cFunc}$ values in hand, the notebook can generate a set of $\vctr{m}\code{_{\prdLsT}}$ and ${\vctr{\cNrm}\code{_{\prdLsT}}}$ pairs that can be interpolated between in order to yield $\Aprx{\cFunc}_{\MidStpLsT}(\mNrm)$ at virtually zero computational cost!\footnote{This is the essential point of \cite{carrollEGM}.} %This is done in the final line of code in this block, and the following code block produces the graph of the interpolated consumption function using this procedure. - -\hypertarget{PlotComparecTm1AD}{} -One might worry about whether the $\{{m},c\}$ points obtained in this way will provide a good representation of the consumption function as a whole, but in practice there are good reasons why they work well (basically, this procedure generates a set of gridpoints that is naturally dense right around the parts of the function with the greatest nonlinearity). -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/PlotComparecTm1AD}} - \caption{$\cFunc_{\prdLsT}(m)$ (solid) versus $\Aprx{\cFunc}_{\prdLsT}(m)$ (dashed)} - \label{fig:ComparecTm1AD} -\end{figure} -Figure~\ref{fig:ComparecTm1AD} plots the actual consumption function $\cFunc_{\prdLsT}$ and the approximated consumption function $\Aprx{\cFunc}_{\prdLsT}$ derived by the method of endogenous grid points. Compared to the approximate consumption functions illustrated in Figure~\ref{fig:PlotcTm1ABC}, $\Aprx{\cFunc}_{\prdLsT}$ is quite close to the actual consumption function. - - -\ifthenelse{\boolean{MyNotes}}{\marginpar{\tiny Different transformation for $\vFunc$ than for $\vFunc^{a}$.}}{} - -\hypertarget{improving-the-a-grid}{} -\subsection{Improving the $\aNrm$ Grid}\label{subsec:improving-the-a-grid} - -Thus far, we have arbitrarily used $\aNrm$ gridpoints of $\{0.,1.,2.,3.,4.\}$ (augmented in the last subsection by $\NatBoroCnstra_{\prdLsT}$). But it has been obvious from the figures that the approximated $\Aprx{\cFunc}_{(\prdLsT)_\cntn}$ function tends to be farthest from its true value at low values of $a$. Combining this with our insight that $\NatBoroCnstra_{\prdLsT}$ is a lower bound, we are now in position to define a more deliberate method for constructing gridpoints for $\aNrm$ -- a method that yields values that are more densely spaced at low values of $a$ where the function is more nonlinear. - -A pragmatic choice that works well is to find the values such that (1) the last value \textit{exceeds the lower bound} by the same amount $\bar\aNrm$ as our original maximum gridpoint (in our case, 4.); (2) we have the same number of gridpoints as before; and (3) the \textit{multi-exponential growth rate} (that is, $e^{e^{e^{...}}}$ for some number of exponentiations $n$ -- our default is 3) from each point to the next point is constant (instead of, as previously, imposing constancy of the absolute gap between points). - -\hypertarget{GothVInvVSGothCEEE}{} -\begin{figure} - \centerline{\includegraphics[width=6in]{\FigDir/GothVInvVSGothCEEE}} - \caption{$\cFunc_{(\prdLsT)_\cntn}(\aNrm)$ versus - $\Aprx{\cFunc}_{(\prdLsT)_\cntn}(\aNrm)$, Multi-Exponential \code{aVec}} - \label{fig:GothVInvVSGothCEE} -\end{figure} - - -\hypertarget{GothVVSGothCInvEEE}{} -\begin{figure} - \includegraphics[width=6in]{\FigDir/GothVVSGothCInvEEE} - \caption{$\vFunc^{a}_{(\prdLsT)_\cntn}(\aNrm)$ vs. - $\Aprx{\Aprx{\vFunc}}_{(\prdLsT)_\cntn}^{a}(\aNrm)$, Multi-Exponential \code{aVec}} - \label{fig:GothVVSGothCInvEE} -\end{figure} - -Section ``Improve the $\mathbb{A}_{grid}$'' begins by defining a function which takes as arguments the specifications of an initial grid of assets and returns the new grid incorporating the multi-exponential approach outlined above. - - -Notice that the graphs depicted in Figures~\ref{fig:GothVInvVSGothCEE} and \ref{fig:GothVVSGothCInvEE} are notably closer to their respective truths than the corresponding figures that used the original grid. - -\subsection{Program Structure} - -In section ``Solve for $c_t(\mNrm)$ in Multiple Periods,'' the natural and artificial borrowing constraints are combined with the endogenous gridpoints method to approximate the optimal consumption function for a specific period. Then, this function is used to compute the approximated consumption in the previous period, and this process is repeated for some specified number of periods. - -The essential structure of the program is a loop that iteratively solves for consumption functions by working backward from an assumed final period, using the dictionary \texttt{cFunc\_life} to store the interpolated consumption functions up to the beginning period. Consumption in a given period is utilized to determine the endogenous gridpoints for the preceding period. This is the sense in which the computation of optimal consumption is done recursively. - -For a realistic life cycle problem, it would also be necessary at a -minimum to calibrate a nonconstant path of expected income growth over the -lifetime that matches the empirical profile; allowing for such -a calibration is the reason we have included the $\{\PermGroFac\}_{\prd}^{T}$ -vector in our computational specification of the problem. - -\hypertarget{results}{} -\subsection{Results} - -The code creates the relevant $\Aprx{\cFunc}_{\prd}(\mNrm)$ functions for any period in the horizon, at the given values of $\mNrm$. Figure \ref{fig:PlotCFuncsConverge} shows $\Aprx{\cFunc}_{T-n}(m)$ for $n=\{20,15,10,5,1\}$. At least one feature of this figure is encouraging: the consumption functions converge as the horizon extends, something that \cite{BufferStockTheory} shows must be true under certain parametric conditions that are satisfied by the baseline parameter values being used here. - -\hypertarget{PlotCFuncsConverge}{} -\begin{figure} - \includegraphics[width=6in]{\FigDir/PlotCFuncsConverge} - \caption{Converging $\Aprx{\cFunc}_{T-n}(\mNrm)$ Functions as $n$ Increases} - \label{fig:PlotCFuncsConverge} -\end{figure} - - diff --git a/docs/sec_structural-estimation-input-clean.tex b/docs/sec_structural-estimation-input-clean.tex deleted file mode 100644 index c63514792..000000000 --- a/docs/sec_structural-estimation-input-clean.tex +++ /dev/null @@ -1,297 +0,0 @@ -\hypertarget{structural-estimation}{} -\section{Structural Estimation}\label{sec:structural-estimation} - -This section describes how to use the methods developed above to -structurally estimate a life-cycle consumption model, following -closely the work of -\cite{cagettiWprofiles}.\footnote{Similar structural - estimation exercises have been also performed by - \cite{palumbo:medical} and \cite{gpLifecycle}.} The key idea of -structural estimation is to look for the parameter values (for the -time preference rate, relative risk aversion, or other parameters) -which lead to the best possible match between simulated and empirical -moments. %(The code for the structural estimation is in the self-containedsubfolder \texttt{StructuralEstimation} in the Matlab and {\Mma} directories.) - -\hypertarget{life-cycle-model}{} -\subsection{Life Cycle Model}\label{subsec:life-cycle-model} -\newcommand{\byage}{\hat} - -Realistic calibration of a life cycle model needs to take into account a few things that we omitted from the bare-bones model described above. For example, the whole point of the life cycle model is that life is finite, so we need to include a realistic treatment of life expectancy; this is done easily enough, by assuming that utility accrues only if you live, so effectively the rising mortality rate with age is treated as an extra reason for discounting the future. Similarly, we may want to capture the demographic evolution of the household (e.g., arrival and departure of kids). A common way to handle that, too, is by modifying the discount factor (arrival of a kid might increase the total utility of the household by, say, 0.2, so if the `pure' rate of time preference were $1.0$ the `household-size-adjusted' discount factor might be 1.2. We therefore modify the model presented above to allow age-varying discount factors that capture both mortality and family-size changes (we just adopt the factors used by \cite{cagettiWprofiles} directly), with the probability of remaining alive between $t$ and $t+n$ captured by $\Alive$ and with $\hat{\beta}$ now reflecting all the age-varying discount factor adjustments (mortality, family-size, etc). Using $\beth$ (the Hebrew cognate of $\beta$) for the `pure' time preference factor, the value function for the revised problem is - \begin{equation}\begin{gathered}\begin{aligned} - v_{t}(\pLvl_{t},\mLvl_{t}) & = \max_{\{\cFunc\}_{t}^{T}}~~ \uFunc(\cLvl_{t})+\ExEndPrd\left[\sum_{n=1}^{T-t}\beth^{n} \Alive_{t}^{t+n}\hat{\beta}_{t}^{t+n} \uFunc(\cLvl_{t+n}) \right] \label{eq:lifecyclemax} - \end{aligned}\end{gathered} \end{equation} -subject to the constraints - \begin{equation*}\begin{gathered}\begin{aligned} - \aLvl_{t} & = \mLvl_{t}-\cLvl_{t} - \\ \pLvl_{t+1} & = G_{t+1}\pLvl_{t}\permShk_{t+1} - \\ \yLvl_{t+1} & = \pLvl_{t+1}\TranShkEmp _{t+1} - \\ \mLvl_{t+1} & = R \aLvl_{t}+\yLvl_{t+1} - \end{aligned}\end{gathered}\end{equation*} -where - \begin{equation*}\begin{gathered}\begin{aligned} - \Alive _{t}^{t+n} &:\text{probability to }\Alive\text{ive until age $t+n$ given alive at age $t$} - \\ \hat{\beta}_{t}^{t+n} &:\text{age-varying discount factor between ages $t$ and $t+n$} - \\ \permShk_{t} &:\text{mean-one shock to permanent income} - \\ \beth &:\text{time-invariant `pure' discount factor} - \end{aligned}\end{gathered}\end{equation*} -and all the other variables are defined as in section \ref{sec:the-problem}. - -Households start life at age $s=25$ and live with probability 1 until retirement -($s=65$). Thereafter the survival probability shrinks every year and -agents are dead by $s=91$ as assumed by Cagetti. % Note that in addition to a typical time-invariant discount factor $\beth$, there is a time-varying discount factor $\hat{\DiscFac}_{s}$ in (\ref{eq:lifecyclemax}) which can be used to capture the effect of age-varying demographic variables (e.g.\ changes in family size). - - Transitory and permanent shocks are distributed as follows: - \begin{equation}\begin{gathered}\begin{aligned} - \Xi_{s} & = - \begin{cases} - 0\phantom{/\pZero} & \text{with probability $\pZero>0$} \\ - \TranShkEmp_{s}/\pZero & \text{with probability $(1-\pZero)$, where $\log \TranShkEmp_{s}\thicksim \Nrml(-\sigma_{\TranShkEmp}^{2}/2,\sigma_{\TranShkEmp}^{2})$}\\ - \end{cases}\\ - \log \permShk_{s} &\thicksim \Nrml(-\sigma_{\permShk}^{2}/2,\sigma_{\permShk}^{2}) - \end{aligned}\end{gathered}\end{equation} - where $\pZero$ is the probability of unemployment (and unemployment shocks are turned off after retirement). - -The parameter values for the shocks are taken from Carroll~\citeyearpar{carroll:brookings}, $\pZero=0.5/100$, $\sigma _{\TranShkEmp }=0.1$, and $\sigma_{\permShk}=0.1$.\footnote{Note that $\sigma _{\TranShkEmp}=0.1$ is smaller than the estimate for college graduates estimated in - Carroll and Samwick~\citeyearpar{carroll&samwick:nature} ($=0.197=\sqrt{0.039}$) which is used by Cagetti~\citeyearpar{cagettiWprofiles}. The reason for this choice is that Carroll and Samwick~\citeyearpar{carroll&samwick:nature} themselves argue that their estimate of $\sigma_{\TranShkEmp }$ is almost certainly increased by measurement error.} The income growth profile $G_{t}$ is from Carroll~\citeyearpar{carrollBSLCPIH} and the values of $\Alive_{t}$ and $\hat{\beta}_{t}$ are obtained from Cagetti~\citeyearpar{cagettiWprofiles} (Figure \ref{fig:TimeVaryingParam}).\footnote{The income growth profile is the one used by Caroll for operatives. Cagetti computes the time-varying discount factor by educational groups using the methodology proposed by Attanasio et al.~\citeyearpar{AttanasioBanksMeghirWeber} and the survival probabilities from the 1995 Life Tables (National Center for Health Statistics 1998).} The interest rate is assumed to equal $1.03$. The model parameters are included in Table \ref{table:StrEstParams}. - -\hypertarget{PlotTimeVaryingParam}{} -\begin{figure}[h] - \includegraphics[width=6in]{./Figures/PlotTimeVaryingParam} - \caption{Time Varying Parameters} - \label{fig:TimeVaryingParam} -\end{figure} - -\begin{table}[h] - \caption{Parameter Values}\label{table:StrEstParams} - \begin{center} - \begin{tabular}{ccl} - \hline\hline - $\sigma _{\TranShkEmp}$ & $0.1$ & Carroll~\citeyearpar{carroll:brookings} - \\ $\sigma _{\permShk}$ & $0.1$ & Carroll~\citeyearpar{carroll:brookings} - \\ $\pZero$ & $0.005$ & Carroll~\citeyearpar{carroll:brookings} - \\ $G_{s}$ & figure \ref{fig:TimeVaryingParam} & Carroll~\citeyearpar{carrollBSLCPIH} - \\ $\hat{\beta}_{s},\Alive_{s}$ & figure \ref{fig:TimeVaryingParam} & Cagetti~\citeyearpar{cagettiWprofiles} - \\$R$ & $1.03$ & Cagetti~\citeyearpar{cagettiWprofiles}\\ - \hline - \end{tabular} - \end{center} -\end{table} - -The structural estimation of the parameters $\beth$ and $\rho$ is carried out using -the procedure specified in the following section, which is then implemented in -the \texttt{StructEstimation.py} file. This file consists of two main components. The -first section defines the objects required to execute the structural estimation procedure, -while the second section executes the procedure and various optional -experiments with their corresponding commands. The next section elaborates on the procedure -and its accompanying code implementation in greater detail. - -\subsection{Estimation} - -When economists say that they are performing ``structural estimation'' -of a model like this, they mean that they have devised a -formal procedure for searching for values for the parameters $\beth$ -and $\rho$ at which some measure of the model's outcome (like -``median wealth by age'') is as close as possible to an empirical measure -of the same thing. Here, we choose to match the median of the -wealth to permanent income ratio across 7 age groups, from age $26-30$ -up to $56-60$.\footnote{\cite{cagettiWprofiles} - matches wealth levels rather than wealth to income ratios. We - believe it is more appropriate to match ratios both because the - ratios are the state variable in the theory and because empirical - moments for ratios of wealth to income are not influenced by the - method used to remove the effects of inflation and productivity - growth.} The choice of matching the medians rather the means is -motivated by the fact that the wealth distribution is much more -concentrated at the top than the model is capable of explaining using a single -set of parameter values. This means that in practice one must pick -some portion of the population who one wants to match well; since the -model has little hope of capturing the behavior of Bill Gates, but -might conceivably match the behavior of Homer Simpson, we choose to -match medians rather than means. - -As explained in section \ref{sec:normalization}, it is convenient to work with the normalized version of the model which can be written in Bellman form as: - \begin{equation*}\begin{gathered}\begin{aligned} - v_{t}(m_{t}) & = \max_{{c}_{t}}~~~ \uFunc(c_{t})+\beth\Alive_{t+1}\hat{\beta}_{t+1} - \Ex_{t}[(\permShk_{t+1}G_{t+1})^{1-\rho}v_{t+1}(m_{t+1})] \\ - & \text{s.t.} \nonumber \\ - a_{t} & = m_{t}-c_{t} \nonumber - \\ m_{t+1} & = a_{t}\underbrace{\left(\frac{R}{\permShk_{t+1}G_{t+1}}\right)}_{\equiv \mathcal{R}_{t+1}}+ ~\TranShkEmp_{t+1} - \end{aligned}\end{gathered}\end{equation*} -with the first order condition: - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{t}) & = \beth\Alive_{t+1}\hat{\beta}_{t+1}R \Ex_{t}\left[\uFunc^{c}\left(\permShk_{t+1}G_{t+1}\cFunc_{t+1}\left(a_{t}\mathcal{R}_{t+1}+\TranShkEmp_{t+1}\right)\right)\right]\label{eq:FOCLifeCycle} - . - \end{aligned}\end{gathered}\end{equation} - -The first substantive {step} in this estimation procedure is -to solve for the consumption functions at each age. We need to -discretize the shock distribution and solve for the policy -functions by backward induction using equation (\ref{eq:FOCLifeCycle}) -following the procedure in sections \ref{sec:solving-the-next} and -`Recursion.' The latter routine -is slightly complicated by the fact that we are considering a -life-cycle model and therefore the growth rate of permanent income, -the probability of death, the time-varying discount factor and the -distribution of shocks will be different across the years. We thus -must ensure that at each backward iteration the right parameter -values are used. - -Correspondingly, the first part of the \texttt{StructEstimation.py} file begins by defining the agent type by inheriting from the baseline agent type \texttt{IndShockConsumerType}, with the modification to include time-varying discount factors. Next, an instance of this ``life-cycle'' consumer is created for the estimation procedure. The number of periods for the life cycle of a given agent is set and, following Cagetti, ~\citeyearpar{cagettiWprofiles}, we initialize the wealth to income ratio of agents at age $25$ by randomly assigning the equal probability values to $0.17$, $0.50$ and $0.83$. In particular, we consider a population of agents at age 25 and follow their consumption and wealth accumulation dynamics as they reach the age of $60$, using the appropriate age-specific consumption functions and the age-varying parameters. The simulated medians are obtained by taking the medians of the wealth to income ratio of the $7$ age groups. - -To complete the creation of the consumer type needed for the simulation, a history of shocks is drawn for each agent across all periods by invoking the \texttt{make\_shock\_history} function. This involves discretizing the shock distribution for as many points as the number of agents we want to simulate and then randomly permuting this shock vector as many times as we need to simulate the model for. In this way, we obtain a time varying shock for each agent. This is much more time efficient than drawing at each time from the shock distribution a shock for each agent, and also ensures a stable distribution of shocks across the simulation periods even for a small number of agents. (Similarly, in order to speed up the process, at each backward iteration we compute the consumption function and other variables as a vector at once.) - -With the age-varying consumption functions derived from the life-cycle agent, we can proceed to generate simulated data and compute the corresponding medians. Estimating the model involves comparing these simulated medians with empirical medians, measuring the model's success by calculating the difference between the two. However, before performing the necessary steps of solving and simulating the model to generate simulated moments, it's important to note a difficulty in producing the target moments using the available data. - -Specifically, defining $\xi$ as the set of parameters -to be estimated (in the current case $\xi =\{\rho ,\beth\}$), we could search for -the parameter values which solve - \begin{equation} - \begin{gathered} - \begin{aligned} - \min_{\xi} \sum_{\tau=1}^{7} |\Shr^{\tau} -\mathbf{s}^{\tau}(\xi)| \label{eq:naivePowell} - \end{aligned} - \end{gathered} - \end{equation} -where $\Shr^{\tau }$ and $\mathbf{s}^{\tau}$ are respectively the empirical -and simulated medians of the wealth to permanent income ratio for age group $\tau$. -A drawback of proceeding in this way is that it treats the empirically -estimated medians as though they reflected perfect measurements of the -truth. Imagine, however, that one of the age groups happened to have -(in the consumer survey) four times as many data observations as -another age group; then we would expect the median to be more -precisely estimated for the age group with more observations; yet -\eqref{eq:naivePowell} assigns equal importance to a deviation between -the model and the data for all age groups. - -We can get around this problem (and a variety of others) by instead minimizing a slightly more complex object: - \begin{equation} - \min_{\xi}\sum_{i}^{N}\weight _{i}\left|\Shr_{i}^{\tau }-\mathbf{s}^{\tau}(\xi )\right|\label{eq:StructEstim} - \end{equation} -where $\weight_{i}$ is the weight of household $i$ in the entire -population,\footnote{The Survey of Consumer Finances includes many - more high-wealth households than exist in the population as a whole; - therefore if one wants to produce population-representative - statistics, one must be careful to weight each observation by the - factor that reflects its ``true'' weight in the population.} and -$\Shr_{i}^{\tau }$ is the empirical wealth to permanent income -ratio of household $i$ whose head belongs to age group -$\tau$. $\weight _{i}$ is needed because unequal weight is assigned to -each observation in the Survey of Consumer Finances (SCF). The -absolute value is used since the formula is based on the fact that the -median is the value that minimizes the sum of the absolute deviations -from itself. - -% In the absence of observation specific weights, equation (\ref{eq:MinStructEstim}) can be simplified to require the minimization of the distance between the empirical and simulated medians. - -With this in mind, we turn our attention to the computation -of the weighted median wealth target moments for each age cohort -using this data from the 2004 Survery of Consumer Finances on household -wealth. The objects necessary to accomplish this task are \texttt{weighted\_median} and -\texttt{get\_targeted\_moments}. The actual data are taken from several waves of the SCF and the medians -and means for each age category are plotted in figure \ref{fig:MeanMedianSCF}. -More details on the SCF data are included in appendix \ref{app:scf-data}. - -\hypertarget{PlotMeanMedianSCFcollegeGrads}{} -\begin{figure} - % \includegraphics[width=6in]{./Figures/PlotMeanMedianSCF}} % weird mean value - \includegraphics[width=6in]{./Figures/PlotMeanMedianSCFcollegeGrads} - \caption{Wealth to Permanent Income Ratios from SCF (means (dashed) and medians (solid))} - \label{fig:MeanMedianSCF} -\end{figure} - -We now turn our attention to the the two key functions in this section of the code file. The first, \texttt{simulate\_moments}, executes the solving (\texttt{solve}) and simulation (\texttt{simulation}) steps for the defined life-cycle agent. Subsequently, the function uses the agents' tracked levels of wealth based on their optimal consumption behavior to compute and store the simulated median wealth to income ratio for each age cohort. The second function, \texttt{smmObjectiveFxn}, calls the \texttt{simulate\_moments} function to create the objective function described in (\ref{eq:StructEstim}), which is necessary to perform the SMM estimation. - - -% \begin{equation}\begin{gathered}\begin{aligned} -% \lefteqn{ \texttt{GapEmpiricalSimulatedMedians$[\CRRA,\beth]$:=}} \nonumber \\ -% &[&\texttt{ConstructcFuncLife$[\CRRA,\beth]$;}\nonumber\\ -% &\texttt{Simulate;}\nonumber\\ -% &\sum\limits_{i}^{N}\weight _{i}\left|\Shr_{i}^{\tau }-\mathbf{s}^{\tau}(\xi )\right| \nonumber\\ -% &];&\nonumber -% \end{aligned}\end{gathered}\end{equation} - -Thus, for a given pair of the parameters to be estimated, the single -call to the function \texttt{smmObjectiveFxn} executes the following: -\begin{enumerate} -\item solves for the consumption functions for the life-cycle agent -\item simulates the data and computes the simulated medians -\item returns the value of equation (\ref{eq:StructEstim}) -\end{enumerate} - -We delegate the task of finding the coefficients that minimize the \texttt{smmObjectiveFxn} function to the \texttt{minimize\_nelder\_mead} function, which is defined elsewhere and called in the second part of this file. This task can be quite slow and rather problematic if the \texttt{smmObjectiveFxn} function has very flat regions or sharp features. It is thus wise to verify the accuracy of the solution, for example by experimenting with a variety of alternative starting values for the parameter search. - -The final object defined in this first part of the \texttt{StructEstimation.py} -file is \texttt{calculateStandardErrorsByBootstrap}. As the name suggsts, the -purpose of this function is to compute the standard errors by bootstrap.\footnote{For a - treatment of the advantages of the bootstrap see - Horowitz~\citeyearpar{horowitzBootstrap}} This involves: -\begin{enumerate} -\item drawing new shocks for the simulation -\item drawing a random sample (with replacement) of actual data from the SCF -\item obtaining new estimates for $\rho$ and $\beth$ -\end{enumerate} -We repeat the above procedure several times (\texttt{Bootstrap}) and -take the standard deviation for each of the estimated parameters across the various bootstrap iterations. - -\subsubsection{An Aside to Computing Sensitivity Measures}\label{subsubsec:sensmeas} - - -A common drawback in commonly used structural estimation procedures is a lack of transparency in its estimates. As \cite{andrews2017measuring} notes, a researcher employing such structural empirical methods may be interested in how alternative assumptions (such as misspecification or measurement bias in the data) would ``change the moments of the data that the estimator uses as inputs, and how changes in these moments affect the estimates.'' The authors provide a measure of sensitivity for given estimator that makes it easy to map the effects of different assumptions on the moments into predictable bias in the estimates for non-linear models. - -In the language of \cite{andrews2017measuring}, section \ref{sec:structural-estimation} is aimed at providing an estimator $\xi =\{\rho ,\beth\}$ that has some true value $\xi_0 $ by assumption. Under the assumption $a_0$ of the researcher, the empirical targets computed from the SCF is measured accurately. These moments of the data are precisely what determine our estimate $\hat{\xi}$, which minimizes (\ref{eq:StructEstim}). Under alternative assumptions $a$, such that a given cohort is mismeasured in the survey, a different estimate is computed. Using the plug-in estimate provided by the authors, we can see quantitatively how our estimate changes under these alternative assumptions $a$ which correspond to mismeasurement in the median wealth to income ratio for a given age cohort. - -\subsection{Results} -The second part of the file \texttt{StructEstimation.py} -defines a function \texttt{main} which produces our $\rho$ and -$\beth$ estimates with standard errors using 10,000 simulated -agents by setting the positional arguments \texttt{estimate\_model} and -\texttt{compute\_standard\_errors} to true.\footnote{The procedure is: First we calculate the $\rho$ and - $\beth$ estimates as the minimizer of equation - (\ref{eq:StructEstim}) using the actual SCF data. Then, we apply the - \texttt{Bootstrap} function several times to obtain the standard - error of our estimates.} Results are reported in Table -\ref{tab:EstResults}.\footnote{Differently from Cagetti - ~\citeyearpar{cagettiWprofiles} who estimates a different set of - parameters for college graduates, high school graduates and high - school dropouts graduates, we perform the structural estimation on - the full population.} - - - \begin{table}[h] - \caption{Estimation Results}\label{tab:EstResults} - \center - \begin{tabular}{cc} - \hline - $\rho $ & $\beth$\\ - \hline - $3.69$ & $0.88$\\ - $(0.047)$ & $(0.002)$\\ - \hline - \end{tabular} - \end{table} - -The literature on consumption and saving behavior over the lifecycle in the presenece of labor income uncertainty\footnote{For example, see \cite{gpLifecycle} for an exposition of this.} warns us to be careful in disentangling the effect of time preference and risk aversion when describing the optimal behavior of households in this setting. Since the precautionary saving motive dominates in the early stages of life, the coefficient of relative risk aversion (as well as expected labor income growth) has a larger effect on optimal consumption and saving behavior through their magnitude relative to the interest rate. Over time, life-cycle considerations (such as saving for retirement) become more important and the time preference factor plays a larger role in determining optimal behavior for this cohort. - -Using the positional argument \texttt{compute\_sensitivity}, Figure \ref{fig:PlotSensitivityMeasure} provides a plot of the plug-in estimate of the sensitivity measure described in \ref{subsubsec:sensmeas}. As you can see from the figure the inverse relationship between $\rho$ and $\beth$ over the life-cycle is retained by the sensitivity measure. Specifically, under the alternative assumption that \textit{a particular cohort is mismeasured in the SCF dataset}, we see that the y-axis suggests that our estimate of $\rho$ and $\beth$ change in a predictable way. - -Suppose that there are not enough observations of the oldest cohort of households in the sample. Suppose further that the researcher predicts that adding more observations of these households to correct this mismeasurement would correspond to a higher median wealth to income ratio for this cohort. In this case, our estimate of the time preference factor should increase: the behavior of these older households is driven by their time preference, so a higher value of $\beth$ is required to match the affected wealth to income targets under this alternative assumption. Since risk aversion is less important in explaining the behavior of this cohort, a lower value of $\rho$ is required to match the affected empirical moments. - -To recap, the sensitivity measure not only matches our intuition about the inverse relationship between $\rho$ and $\beth$ over the life-cycle, but provides a quantitative estimate of what would happen to our estimates of these parameters under the alternative assumption that the data is mismeasured in some way. - -\hypertarget{PlotSensitivityMeasure}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/Sensitivity.pdf} - \caption{Sensitivty of Estimates $\{\rho,\beth\}$ regarding Alternative Mismeasurement Assumptions.} - \label{fig:PlotSensitivityMeasure} -\end{figure} - -By setting the positional argument \texttt{make\_contour\_plot} to true, Figure \ref{fig:PlotContourMedianStrEst} shows the contour plot of the \texttt{smmObjectiveFxn} function and the parameter estimates. The contour plot shows equally spaced isoquants of the \texttt{smmObjectiveFxn} function, i.e.\ the pairs of $\rho$ and $\beth$ which lead to the same deviations between simulated and empirical medians (equivalent values of equation (\ref{eq:StructEstim})). Interestingly, there is a large rather flat region; or, more formally speaking, there exists a broad set of parameter pairs which leads to similar simulated wealth to income ratios. Intuitively, the flatter and larger is this region, the harder it is for the structural estimation procedure to precisely identify the parameters. - - -\hypertarget{PlotContourMedianStrEst}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/SMMcontour.pdf} - \caption{Contour Plot (larger values are shown lighter) with $\{\rho,\beth\}$ Estimates (red dot).} - \label{fig:PlotContourMedianStrEst} -\end{figure} - diff --git a/docs/sec_structural-estimation-input.tex b/docs/sec_structural-estimation-input.tex deleted file mode 100644 index 4ff95a3b3..000000000 --- a/docs/sec_structural-estimation-input.tex +++ /dev/null @@ -1,297 +0,0 @@ -\hypertarget{structural-estimation}{} -\section{Structural Estimation}\label{sec:structural-estimation} - -This section describes how to use the methods developed above to -structurally estimate a life-cycle consumption model, following -closely the work of -\cite{cagettiWprofiles}.\footnote{Similar structural - estimation exercises have been also performed by - \cite{palumbo:medical} and \cite{gpLifecycle}.} The key idea of -structural estimation is to look for the parameter values (for the -time preference rate, relative risk aversion, or other parameters) -which lead to the best possible match between simulated and empirical -moments. %(The code for the structural estimation is in the self-containedsubfolder \texttt{StructuralEstimation} in the Matlab and {\Mma} directories.) - -\hypertarget{life-cycle-model}{} -\subsection{Life Cycle Model}\label{subsec:life-cycle-model} -\newcommand{\byage}{\hat} - -Realistic calibration of a life cycle model needs to take into account a few things that we omitted from the bare-bones model described above. For example, the whole point of the life cycle model is that life is finite, so we need to include a realistic treatment of life expectancy; this is done easily enough, by assuming that utility accrues only if you live, so effectively the rising mortality rate with age is treated as an extra reason for discounting the future. Similarly, we may want to capture the demographic evolution of the household (e.g., arrival and departure of kids). A common way to handle that, too, is by modifying the discount factor (arrival of a kid might increase the total utility of the household by, say, 0.2, so if the `pure' rate of time preference were $1.0$ the `household-size-adjusted' discount factor might be 1.2. We therefore modify the model presented above to allow age-varying discount factors that capture both mortality and family-size changes (we just adopt the factors used by \cite{cagettiWprofiles} directly), with the probability of remaining alive between $t$ and $t+n$ captured by $\Alive$ and with $\hat{\DiscFac}$ now reflecting all the age-varying discount factor adjustments (mortality, family-size, etc). Using $\beth$ (the Hebrew cognate of $\beta$) for the `pure' time preference factor, the value function for the revised problem is - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\prd}(\pLvl_{\prd},\mLvl_{\prd}) & = \max_{\{\cFunc\}_{\prd}^{T}}~~ \uFunc(\cLvl_{\prd})+\ExEndPrd\left[\sum_{n=1}^{T-t}\beth^{n} \Alive_{\prd}^{t+n}\hat{\DiscFac}_{\prd}^{t+n} \uFunc(\cLvl_{t+n}) \right] \label{eq:lifecyclemax} - \end{aligned}\end{gathered} \end{equation} -subject to the constraints - \begin{equation*}\begin{gathered}\begin{aligned} - \aLvl_{\prd} & = \mLvl_{\prd}-\cLvl_{\prd} - \\ \pLvl_{\prd+1} & = \PermGroFac_{\prd+1}\pLvl_{\prd}\permShk_{\prd+1} - \\ \yLvl_{\prd+1} & = \pLvl_{\prd+1}\TranShkEmp _{\prd+1} - \\ \mLvl_{\prd+1} & = \Rfree \aLvl_{\prd}+\yLvl_{\prd+1} - \end{aligned}\end{gathered}\end{equation*} -where - \begin{equation*}\begin{gathered}\begin{aligned} - \Alive _{\prd}^{t+n} &:\text{probability to }\Alive\text{ive until age $t+n$ given alive at age $t$} - \\ \hat{\DiscFac}_{\prd}^{t+n} &:\text{age-varying discount factor between ages $t$ and $t+n$} - \\ \permShk_{\prd} &:\text{mean-one shock to permanent income} - \\ \beth &:\text{time-invariant `pure' discount factor} - \end{aligned}\end{gathered}\end{equation*} -and all the other variables are defined as in section \ref{sec:the-problem}. - -Households start life at age $s=25$ and live with probability 1 until retirement -($s=65$). Thereafter the survival probability shrinks every year and -agents are dead by $s=91$ as assumed by Cagetti. % Note that in addition to a typical time-invariant discount factor $\beth$, there is a time-varying discount factor $\hat{\DiscFac}_{s}$ in (\ref{eq:lifecyclemax}) which can be used to capture the effect of age-varying demographic variables (e.g.\ changes in family size). - - Transitory and permanent shocks are distributed as follows: - \begin{equation}\begin{gathered}\begin{aligned} - \Xi_{s} & = - \begin{cases} - 0\phantom{/\pZero} & \text{with probability $\pZero>0$} \\ - \TranShkEmp_{s}/\pZero & \text{with probability $(1-\pZero)$, where $\log \TranShkEmp_{s}\thicksim \Nrml(-\sigma_{\TranShkEmp}^{2}/2,\sigma_{\TranShkEmp}^{2})$}\\ - \end{cases}\\ - \log \permShk_{s} &\thicksim \Nrml(-\sigma_{\permShk}^{2}/2,\sigma_{\permShk}^{2}) - \end{aligned}\end{gathered}\end{equation} - where $\pZero$ is the probability of unemployment (and unemployment shocks are turned off after retirement). - -The parameter values for the shocks are taken from Carroll~\citeyearpar{carroll:brookings}, $\pZero=0.5/100$, $\sigma _{\TranShkEmp }=0.1$, and $\sigma_{\permShk}=0.1$.\footnote{Note that $\sigma _{\TranShkEmp}=0.1$ is smaller than the estimate for college graduates estimated in - Carroll and Samwick~\citeyearpar{carroll&samwick:nature} ($=0.197=\sqrt{0.039}$) which is used by Cagetti~\citeyearpar{cagettiWprofiles}. The reason for this choice is that Carroll and Samwick~\citeyearpar{carroll&samwick:nature} themselves argue that their estimate of $\sigma_{\TranShkEmp }$ is almost certainly increased by measurement error.} The income growth profile $\PermGroFac_{\prd}$ is from Carroll~\citeyearpar{carrollBSLCPIH} and the values of $\Alive_{\prd}$ and $\hat{\DiscFac}_{\prd}$ are obtained from Cagetti~\citeyearpar{cagettiWprofiles} (Figure \ref{fig:TimeVaryingParam}).\footnote{The income growth profile is the one used by Caroll for operatives. Cagetti computes the time-varying discount factor by educational groups using the methodology proposed by Attanasio et al.~\citeyearpar{AttanasioBanksMeghirWeber} and the survival probabilities from the 1995 Life Tables (National Center for Health Statistics 1998).} The interest rate is assumed to equal $1.03$. The model parameters are included in Table \ref{table:StrEstParams}. - -\hypertarget{PlotTimeVaryingParam}{} -\begin{figure}[h] - \includegraphics[width=6in]{./Figures/PlotTimeVaryingParam} - \caption{Time Varying Parameters} - \label{fig:TimeVaryingParam} -\end{figure} - -\begin{table}[h] - \caption{Parameter Values}\label{table:StrEstParams} - \begin{center} - \begin{tabular}{ccl} - \hline\hline - $\sigma _{\TranShkEmp}$ & $0.1$ & Carroll~\citeyearpar{carroll:brookings} - \\ $\sigma _{\permShk}$ & $0.1$ & Carroll~\citeyearpar{carroll:brookings} - \\ $\pZero$ & $0.005$ & Carroll~\citeyearpar{carroll:brookings} - \\ $\PermGroFac_{s}$ & figure \ref{fig:TimeVaryingParam} & Carroll~\citeyearpar{carrollBSLCPIH} - \\ $\hat{\DiscFac}_{s},\Alive_{s}$ & figure \ref{fig:TimeVaryingParam} & Cagetti~\citeyearpar{cagettiWprofiles} - \\$\Rfree$ & $1.03$ & Cagetti~\citeyearpar{cagettiWprofiles}\\ - \hline - \end{tabular} - \end{center} -\end{table} - -The structural estimation of the parameters $\beth$ and $\CRRA$ is carried out using -the procedure specified in the following section, which is then implemented in -the \texttt{StructEstimation.py} file. This file consists of two main components. The -first section defines the objects required to execute the structural estimation procedure, -while the second section executes the procedure and various optional -experiments with their corresponding commands. The next section elaborates on the procedure -and its accompanying code implementation in greater detail. - -\subsection{Estimation} - -When economists say that they are performing ``structural estimation'' -of a model like this, they mean that they have devised a -formal procedure for searching for values for the parameters $\beth$ -and $\CRRA$ at which some measure of the model's outcome (like -``median wealth by age'') is as close as possible to an empirical measure -of the same thing. Here, we choose to match the median of the -wealth to permanent income ratio across 7 age groups, from age $26-30$ -up to $56-60$.\footnote{\cite{cagettiWprofiles} - matches wealth levels rather than wealth to income ratios. We - believe it is more appropriate to match ratios both because the - ratios are the state variable in the theory and because empirical - moments for ratios of wealth to income are not influenced by the - method used to remove the effects of inflation and productivity - growth.} The choice of matching the medians rather the means is -motivated by the fact that the wealth distribution is much more -concentrated at the top than the model is capable of explaining using a single -set of parameter values. This means that in practice one must pick -some portion of the population who one wants to match well; since the -model has little hope of capturing the behavior of Bill Gates, but -might conceivably match the behavior of Homer Simpson, we choose to -match medians rather than means. - -As explained in section \ref{sec:normalization}, it is convenient to work with the normalized version of the model which can be written in Bellman form as: - \begin{equation*}\begin{gathered}\begin{aligned} - \vFunc_{\prd}(m_{\prd}) & = \max_{{c}_{\prd}}~~~ \uFunc(c_{\prd})+\beth\Alive_{\prd+1}\hat{\DiscFac}_{\prd+1} - \Ex_{\prd}[(\permShk_{\prd+1}\PermGroFac_{\prd+1})^{1-\CRRA}\vFunc_{\prd+1}(m_{\prd+1})] \\ - & \text{s.t.} \nonumber \\ - a_{\prd} & = m_{\prd}-c_{\prd} \nonumber - \\ m_{\prd+1} & = a_{\prd}\underbrace{\left(\frac{\Rfree}{\permShk_{\prd+1}\PermGroFac_{\prd+1}}\right)}_{\equiv \RNrm_{\prd+1}}+ ~\TranShkEmp_{\prd+1} - \end{aligned}\end{gathered}\end{equation*} -with the first order condition: - \begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(c_{\prd}) & = \beth\Alive_{\prd+1}\hat{\DiscFac}_{\prd+1}\Rfree \Ex_{\prd}\left[\uFunc^{c}\left(\permShk_{\prd+1}\PermGroFac_{\prd+1}\cFunc_{\prd+1}\left(a_{\prd}\RNrm_{\prd+1}+\TranShkEmp_{\prd+1}\right)\right)\right]\label{eq:FOCLifeCycle} - . - \end{aligned}\end{gathered}\end{equation} - -The first substantive {step} in this estimation procedure is -to solve for the consumption functions at each age. We need to -discretize the shock distribution and solve for the policy -functions by backward induction using equation (\ref{eq:FOCLifeCycle}) -following the procedure in sections \ref{sec:solving-the-next} and -`Recursion.' The latter routine -is slightly complicated by the fact that we are considering a -life-cycle model and therefore the growth rate of permanent income, -the probability of death, the time-varying discount factor and the -distribution of shocks will be different across the years. We thus -must ensure that at each backward iteration the right parameter -values are used. - -Correspondingly, the first part of the \texttt{StructEstimation.py} file begins by defining the agent type by inheriting from the baseline agent type \texttt{IndShockConsumerType}, with the modification to include time-varying discount factors. Next, an instance of this ``life-cycle'' consumer is created for the estimation procedure. The number of periods for the life cycle of a given agent is set and, following Cagetti, ~\citeyearpar{cagettiWprofiles}, we initialize the wealth to income ratio of agents at age $25$ by randomly assigning the equal probability values to $0.17$, $0.50$ and $0.83$. In particular, we consider a population of agents at age 25 and follow their consumption and wealth accumulation dynamics as they reach the age of $60$, using the appropriate age-specific consumption functions and the age-varying parameters. The simulated medians are obtained by taking the medians of the wealth to income ratio of the $7$ age groups. - -To complete the creation of the consumer type needed for the simulation, a history of shocks is drawn for each agent across all periods by invoking the \texttt{make\_shock\_history} function. This involves discretizing the shock distribution for as many points as the number of agents we want to simulate and then randomly permuting this shock vector as many times as we need to simulate the model for. In this way, we obtain a time varying shock for each agent. This is much more time efficient than drawing at each time from the shock distribution a shock for each agent, and also ensures a stable distribution of shocks across the simulation periods even for a small number of agents. (Similarly, in order to speed up the process, at each backward iteration we compute the consumption function and other variables as a vector at once.) - -With the age-varying consumption functions derived from the life-cycle agent, we can proceed to generate simulated data and compute the corresponding medians. Estimating the model involves comparing these simulated medians with empirical medians, measuring the model's success by calculating the difference between the two. However, before performing the necessary steps of solving and simulating the model to generate simulated moments, it's important to note a difficulty in producing the target moments using the available data. - -Specifically, defining $\xi$ as the set of parameters -to be estimated (in the current case $\xi =\{\CRRA ,\beth\}$), we could search for -the parameter values which solve - \begin{equation} - \begin{gathered} - \begin{aligned} - \min_{\xi} \sum_{\tau=1}^{7} |\Shr^{\tau} -\mathbf{s}^{\tau}(\xi)| \label{eq:naivePowell} - \end{aligned} - \end{gathered} - \end{equation} -where $\Shr^{\tau }$ and $\mathbf{s}^{\tau}$ are respectively the empirical -and simulated medians of the wealth to permanent income ratio for age group $\tau$. -A drawback of proceeding in this way is that it treats the empirically -estimated medians as though they reflected perfect measurements of the -truth. Imagine, however, that one of the age groups happened to have -(in the consumer survey) four times as many data observations as -another age group; then we would expect the median to be more -precisely estimated for the age group with more observations; yet -\eqref{eq:naivePowell} assigns equal importance to a deviation between -the model and the data for all age groups. - -We can get around this problem (and a variety of others) by instead minimizing a slightly more complex object: - \begin{equation} - \min_{\xi}\sum_{i}^{N}\weight _{i}\left|\Shr_{i}^{\tau }-\mathbf{s}^{\tau}(\xi )\right|\label{eq:StructEstim} - \end{equation} -where $\weight_{i}$ is the weight of household $i$ in the entire -population,\footnote{The Survey of Consumer Finances includes many - more high-wealth households than exist in the population as a whole; - therefore if one wants to produce population-representative - statistics, one must be careful to weight each observation by the - factor that reflects its ``true'' weight in the population.} and -$\Shr_{i}^{\tau }$ is the empirical wealth to permanent income -ratio of household $i$ whose head belongs to age group -$\tau$. $\weight _{i}$ is needed because unequal weight is assigned to -each observation in the Survey of Consumer Finances (SCF). The -absolute value is used since the formula is based on the fact that the -median is the value that minimizes the sum of the absolute deviations -from itself. - -% In the absence of observation specific weights, equation (\ref{eq:MinStructEstim}) can be simplified to require the minimization of the distance between the empirical and simulated medians. - -With this in mind, we turn our attention to the computation -of the weighted median wealth target moments for each age cohort -using this data from the 2004 Survery of Consumer Finances on household -wealth. The objects necessary to accomplish this task are \texttt{weighted\_median} and -\texttt{get\_targeted\_moments}. The actual data are taken from several waves of the SCF and the medians -and means for each age category are plotted in figure \ref{fig:MeanMedianSCF}. -More details on the SCF data are included in appendix \ref{app:scf-data}. - -\hypertarget{PlotMeanMedianSCFcollegeGrads}{} -\begin{figure} - % \includegraphics[width=6in]{./Figures/PlotMeanMedianSCF}} % weird mean value - \includegraphics[width=6in]{./Figures/PlotMeanMedianSCFcollegeGrads} - \caption{Wealth to Permanent Income Ratios from SCF (means (dashed) and medians (solid))} - \label{fig:MeanMedianSCF} -\end{figure} - -We now turn our attention to the the two key functions in this section of the code file. The first, \texttt{simulate\_moments}, executes the solving (\texttt{solve}) and simulation (\texttt{simulation}) steps for the defined life-cycle agent. Subsequently, the function uses the agents' tracked levels of wealth based on their optimal consumption behavior to compute and store the simulated median wealth to income ratio for each age cohort. The second function, \texttt{smmObjectiveFxn}, calls the \texttt{simulate\_moments} function to create the objective function described in (\ref{eq:StructEstim}), which is necessary to perform the SMM estimation. - - -% \begin{equation}\begin{gathered}\begin{aligned} -% \lefteqn{ \texttt{GapEmpiricalSimulatedMedians$[\CRRA,\beth]$:=}} \nonumber \\ -% &[&\texttt{ConstructcFuncLife$[\CRRA,\beth]$;}\nonumber\\ -% &\texttt{Simulate;}\nonumber\\ -% &\sum\limits_{i}^{N}\weight _{i}\left|\Shr_{i}^{\tau }-\mathbf{s}^{\tau}(\xi )\right| \nonumber\\ -% &];&\nonumber -% \end{aligned}\end{gathered}\end{equation} - -Thus, for a given pair of the parameters to be estimated, the single -call to the function \texttt{smmObjectiveFxn} executes the following: -\begin{enumerate} -\item solves for the consumption functions for the life-cycle agent -\item simulates the data and computes the simulated medians -\item returns the value of equation (\ref{eq:StructEstim}) -\end{enumerate} - -We delegate the task of finding the coefficients that minimize the \texttt{smmObjectiveFxn} function to the \texttt{minimize\_nelder\_mead} function, which is defined elsewhere and called in the second part of this file. This task can be quite slow and rather problematic if the \texttt{smmObjectiveFxn} function has very flat regions or sharp features. It is thus wise to verify the accuracy of the solution, for example by experimenting with a variety of alternative starting values for the parameter search. - -The final object defined in this first part of the \texttt{StructEstimation.py} -file is \texttt{calculateStandardErrorsByBootstrap}. As the name suggsts, the -purpose of this function is to compute the standard errors by bootstrap.\footnote{For a - treatment of the advantages of the bootstrap see - Horowitz~\citeyearpar{horowitzBootstrap}} This involves: -\begin{enumerate} -\item drawing new shocks for the simulation -\item drawing a random sample (with replacement) of actual data from the SCF -\item obtaining new estimates for $\CRRA$ and $\beth$ -\end{enumerate} -We repeat the above procedure several times (\texttt{Bootstrap}) and -take the standard deviation for each of the estimated parameters across the various bootstrap iterations. - -\subsubsection{An Aside to Computing Sensitivity Measures}\label{subsubsec:sensmeas} - - -A common drawback in commonly used structural estimation procedures is a lack of transparency in its estimates. As \cite{andrews2017measuring} notes, a researcher employing such structural empirical methods may be interested in how alternative assumptions (such as misspecification or measurement bias in the data) would ``change the moments of the data that the estimator uses as inputs, and how changes in these moments affect the estimates.'' The authors provide a measure of sensitivity for given estimator that makes it easy to map the effects of different assumptions on the moments into predictable bias in the estimates for non-linear models. - -In the language of \cite{andrews2017measuring}, section \ref{sec:structural-estimation} is aimed at providing an estimator $\xi =\{\CRRA ,\beth\}$ that has some true value $\xi_0 $ by assumption. Under the assumption $a_0$ of the researcher, the empirical targets computed from the SCF is measured accurately. These moments of the data are precisely what determine our estimate $\hat{\xi}$, which minimizes (\ref{eq:StructEstim}). Under alternative assumptions $a$, such that a given cohort is mismeasured in the survey, a different estimate is computed. Using the plug-in estimate provided by the authors, we can see quantitatively how our estimate changes under these alternative assumptions $a$ which correspond to mismeasurement in the median wealth to income ratio for a given age cohort. - -\subsection{Results} -The second part of the file \texttt{StructEstimation.py} -defines a function \texttt{main} which produces our $\CRRA$ and -$\beth$ estimates with standard errors using 10,000 simulated -agents by setting the positional arguments \texttt{estimate\_model} and -\texttt{compute\_standard\_errors} to true.\footnote{The procedure is: First we calculate the $\CRRA$ and - $\beth$ estimates as the minimizer of equation - (\ref{eq:StructEstim}) using the actual SCF data. Then, we apply the - \texttt{Bootstrap} function several times to obtain the standard - error of our estimates.} Results are reported in Table -\ref{tab:EstResults}.\footnote{Differently from Cagetti - ~\citeyearpar{cagettiWprofiles} who estimates a different set of - parameters for college graduates, high school graduates and high - school dropouts graduates, we perform the structural estimation on - the full population.} - - - \begin{table}[h] - \caption{Estimation Results}\label{tab:EstResults} - \center - \begin{tabular}{cc} - \hline - $\CRRA $ & $\beth$\\ - \hline - $3.69$ & $0.88$\\ - $(0.047)$ & $(0.002)$\\ - \hline - \end{tabular} - \end{table} - -The literature on consumption and saving behavior over the lifecycle in the presenece of labor income uncertainty\footnote{For example, see \cite{gpLifecycle} for an exposition of this.} warns us to be careful in disentangling the effect of time preference and risk aversion when describing the optimal behavior of households in this setting. Since the precautionary saving motive dominates in the early stages of life, the coefficient of relative risk aversion (as well as expected labor income growth) has a larger effect on optimal consumption and saving behavior through their magnitude relative to the interest rate. Over time, life-cycle considerations (such as saving for retirement) become more important and the time preference factor plays a larger role in determining optimal behavior for this cohort. - -Using the positional argument \texttt{compute\_sensitivity}, Figure \ref{fig:PlotSensitivityMeasure} provides a plot of the plug-in estimate of the sensitivity measure described in \ref{subsubsec:sensmeas}. As you can see from the figure the inverse relationship between $\rho$ and $\beth$ over the life-cycle is retained by the sensitivity measure. Specifically, under the alternative assumption that \textit{a particular cohort is mismeasured in the SCF dataset}, we see that the y-axis suggests that our estimate of $\rho$ and $\beth$ change in a predictable way. - -Suppose that there are not enough observations of the oldest cohort of households in the sample. Suppose further that the researcher predicts that adding more observations of these households to correct this mismeasurement would correspond to a higher median wealth to income ratio for this cohort. In this case, our estimate of the time preference factor should increase: the behavior of these older households is driven by their time preference, so a higher value of $\beth$ is required to match the affected wealth to income targets under this alternative assumption. Since risk aversion is less important in explaining the behavior of this cohort, a lower value of $\rho$ is required to match the affected empirical moments. - -To recap, the sensitivity measure not only matches our intuition about the inverse relationship between $\rho$ and $\beth$ over the life-cycle, but provides a quantitative estimate of what would happen to our estimates of these parameters under the alternative assumption that the data is mismeasured in some way. - -\hypertarget{PlotSensitivityMeasure}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/Sensitivity.pdf} - \caption{Sensitivty of Estimates $\{\CRRA,\beth\}$ regarding Alternative Mismeasurement Assumptions.} - \label{fig:PlotSensitivityMeasure} -\end{figure} - -By setting the positional argument \texttt{make\_contour\_plot} to true, Figure \ref{fig:PlotContourMedianStrEst} shows the contour plot of the \texttt{smmObjectiveFxn} function and the parameter estimates. The contour plot shows equally spaced isoquants of the \texttt{smmObjectiveFxn} function, i.e.\ the pairs of $\CRRA$ and $\beth$ which lead to the same deviations between simulated and empirical medians (equivalent values of equation (\ref{eq:StructEstim})). Interestingly, there is a large rather flat region; or, more formally speaking, there exists a broad set of parameter pairs which leads to similar simulated wealth to income ratios. Intuitively, the flatter and larger is this region, the harder it is for the structural estimation procedure to precisely identify the parameters. - - -\hypertarget{PlotContourMedianStrEst}{} -\begin{figure} - \includegraphics[width=6in]{./Figures/SMMcontour.pdf} - \caption{Contour Plot (larger values are shown lighter) with $\{\CRRA,\beth\}$ Estimates (red dot).} - \label{fig:PlotContourMedianStrEst} -\end{figure} - diff --git a/docs/sec_the-infinite-horizon-input-clean.tex b/docs/sec_the-infinite-horizon-input-clean.tex deleted file mode 100644 index eda356696..000000000 --- a/docs/sec_the-infinite-horizon-input-clean.tex +++ /dev/null @@ -1,102 +0,0 @@ -\hypertarget{the-infinite-horizon}{} -\section{The Infinite Horizon}\label{sec:the-infinite-horizon} - -All of the solution methods presented so far have involved period-by-period iteration from an assumed last period of life, as is appropriate for life cycle problems. However, if the parameter values for the problem satisfy certain conditions (detailed in \cite{BufferStockTheory}), the consumption rules (and the rest of the problem) will converge to a fixed rule as the horizon (remaining lifetime) gets large, as illustrated in Figure~\ref{fig:PlotCFuncsConverge}. Furthermore, Deaton~\citeyearpar{deatonLiqConstr}, Carroll~\citeyearpar{carroll:brookings,carrollBSLCPIH} and others have argued that the `buffer-stock' saving behavior that emerges under some further restrictions on parameter values is a good approximation of the behavior of typical consumers over much of the lifetime. Methods for finding the converged functions are therefore of interest, and are dealt with in this section. - -Of course, the simplest such method is to solve the problem as -specified above for a large number of periods. This is feasible, but -there are much faster methods. - -\subsection{Convergence} - -In solving an infinite-horizon problem, it is necessary to have some -metric that determines when to stop because a solution that is `good -enough' has been found. - -A natural metric is defined by the unique `target' level of wealth that \cite{BufferStockTheory} proves -will exist in problems of this kind \href{https://llorracc.github.io/BufferStockTheory#GICNrm}{under certain conditions}: The $\mTrgNrm$ such that -\begin{equation} - \Ex_t [{m}_{t+1}/m_t] = 1 \mbox{~if~} m_t = \mTrgNrm \label{eq:mTrgNrmet} -\end{equation} -where the accent is meant to signify that this is the value -that other $m$'s `point to.' - -Given a consumption rule $\cFunc(m)$ it is straightforward to find -the corresponding $\mTrgNrm$. So for our problem, a solution is declared -to have converged if the following criterion is met: -$\left|\mTrgNrm_{t+1}-\mTrgNrm_{t}\right| < \epsilon$, where $\epsilon$ is -a very small number and defines our degree of convergence tolerance. - -Similar criteria can obviously be specified for other problems. -However, it is always wise to plot successive function differences and -to experiment a bit with convergence criteria to verify that the -function has converged for all practical purposes. - -\begin{comment} % at suggestion of WW, this section was removed as unnecessary for the current model, which solves for the converged rule very fast - \subsection{The Last Period} - - For the last period of a finite-horizon lifetime, in the absence of a - bequest motive it is obvious that the optimal policy is to spend - everything. However, in an infinite-horizon problem there is no last - period, and the policy of spending everything is obviously very far - from optimal. Generally speaking, it is much better to start off with - a `last-period' consumption rule and value function equal to those - corresponding to the infinite-horizon solution to the perfect - foresight problem (assuming such a solution is known). - - For the perfect foresight infinite horizon consumption problem, - the solution is - \begin{equation}\begin{gathered}\begin{aligned} - \bar{\cFunc}(m_{t}) & = \overbrace{(1-R^{-1}(R - \beta)^{1/\rho})}^{\equiv - \underline{\kappa}}\left[{m}_{t}-1+\left(\frac{1}{1-1/R}\right)\right] - \label{eq:pfinfhorc} - \end{aligned}\end{gathered}\end{equation} - where $\underline{\kappa}$ is the MPC in the - infinite-horizon perfect foresight problem. In our baseline problem, - we set $G = \pLvl_{t} = 1$. It is straightforward to show that the - infinite-horizon perfect-foresight value function and marginal value - function are given by - \begin{equation}\begin{gathered}\begin{aligned} - \bar{v}(m_{t}) - & = \left(\frac{\bar{\cFunc}(m_{t})^{1-\rho}}{ - (1-\rho)\underline{\kappa} }\right) - \\ \bar{v}^{m}(m_{t}) & = (\bar{\cFunc}(m_{t}))^{-\rho} - \\ \Opt{v}^{m}(a_{t}) & = \beta R G_{t+1}^{-\rho} \bar{v}^{m}(\mathcal{R}_{t+1} a_{t}+1). - \end{aligned}\end{gathered}\end{equation} - - % WW delete the text on 2011-06-21 because we no longer start from the infinite horizon perfect foresight solution. - % If we choose to pursue that starting point, we need to derive the optimist's and pessimist's consumption function, - % when the last period is given by the infinite horizon perfect-foresight solution. That will change the program significantly. - % In our case, with \epsilon being 10^(-4), iteration requires only 51 periods, and 0.032 minutes. -\end{comment} - -\begin{comment}% At suggestion of WW this section was deleted because the technique is obvious and can be captured by the footnote that has been added - \subsection{Coarse Then Fine \code{aVec} } - - The speed of each iteration is directly proportional to the number - of gridpoints at which the problem must be solved. Therefore - reducing the number of points in \code{aVec} can increase - the speed of solution greatly. Of course, this also decreases the - accuracy of the solution. However, once the converged solution is - obtained for a coarse \code{aVec}, the density of the grid - can be increased and iteration can continue until a converged - solution is found for the finer \code{aVec}. - % WW delete the text on 2011-06-21 because we no longer need a finer \code{aVec}. I add a footnote in next subsection instead. - - \subsection{Coarse then Fine \texttt{$\TranShkEmp$Vec}} - - The speed of solution is roughly proportionate\footnote{It is also - true that the speed of each iteration is directly proportional to - the number of gridpoints in \code{aVec}, at which the problem must - be solved. However given our method of moderation, now the problem - could be solved very precisely based on five gridpoints only. Hence - we do not pursue the process of ``Coarse then Fine \code{aVec}.''} - to the number of points used in approximating the distribution of - shocks. At least 3 gridpoints should probably be used as an initial - minimum, and my experience is that increasing the number of gridpoints - beyond 7 generally yields only very small changes in the solution. The program - \texttt{multiperiodCon\_infhor.m} - begins with three gridpoints, and then solves for successively finer - \texttt{$\TranShkEmp$Vec}. -\end{comment} diff --git a/docs/sec_the-infinite-horizon-input.tex b/docs/sec_the-infinite-horizon-input.tex deleted file mode 100644 index f423fa5bc..000000000 --- a/docs/sec_the-infinite-horizon-input.tex +++ /dev/null @@ -1,102 +0,0 @@ -\hypertarget{the-infinite-horizon}{} -\section{The Infinite Horizon}\label{sec:the-infinite-horizon} - -All of the solution methods presented so far have involved period-by-period iteration from an assumed last period of life, as is appropriate for life cycle problems. However, if the parameter values for the problem satisfy certain conditions (detailed in \cite{BufferStockTheory}), the consumption rules (and the rest of the problem) will converge to a fixed rule as the horizon (remaining lifetime) gets large, as illustrated in Figure~\ref{fig:PlotCFuncsConverge}. Furthermore, Deaton~\citeyearpar{deatonLiqConstr}, Carroll~\citeyearpar{carroll:brookings,carrollBSLCPIH} and others have argued that the `buffer-stock' saving behavior that emerges under some further restrictions on parameter values is a good approximation of the behavior of typical consumers over much of the lifetime. Methods for finding the converged functions are therefore of interest, and are dealt with in this section. - -Of course, the simplest such method is to solve the problem as -specified above for a large number of periods. This is feasible, but -there are much faster methods. - -\subsection{Convergence} - -In solving an infinite-horizon problem, it is necessary to have some -metric that determines when to stop because a solution that is `good -enough' has been found. - -A natural metric is defined by the unique `target' level of wealth that \cite{BufferStockTheory} proves -will exist in problems of this kind \href{https://llorracc.github.io/BufferStockTheory#GICNrm}{under certain conditions}: The $\mTrgNrm$ such that -\begin{equation} - \Ex_t [{\mNrm}_{\prd+1}/\mNrm_t] = 1 \mbox{~if~} \mNrm_t = \mTrgNrm \label{eq:mTrgNrmet} -\end{equation} -where the accent is meant to signify that this is the value -that other $\mNrm$'s `point to.' - -Given a consumption rule $\cFunc(\mNrm)$ it is straightforward to find -the corresponding $\mTrgNrm$. So for our problem, a solution is declared -to have converged if the following criterion is met: -$\left|\mTrgNrm_{\prd+1}-\mTrgNrm_{\prd}\right| < \epsilon$, where $\epsilon$ is -a very small number and defines our degree of convergence tolerance. - -Similar criteria can obviously be specified for other problems. -However, it is always wise to plot successive function differences and -to experiment a bit with convergence criteria to verify that the -function has converged for all practical purposes. - -\begin{comment} % at suggestion of WW, this section was removed as unnecessary for the current model, which solves for the converged rule very fast - \subsection{The Last Period} - - For the last period of a finite-horizon lifetime, in the absence of a - bequest motive it is obvious that the optimal policy is to spend - everything. However, in an infinite-horizon problem there is no last - period, and the policy of spending everything is obviously very far - from optimal. Generally speaking, it is much better to start off with - a `last-period' consumption rule and value function equal to those - corresponding to the infinite-horizon solution to the perfect - foresight problem (assuming such a solution is known). - - For the perfect foresight infinite horizon consumption problem, - the solution is - \begin{equation}\begin{gathered}\begin{aligned} - \bar{\cFunc}(m_{\prd}) & = \overbrace{(1-\Rfree^{-1}(\Rfree - \DiscFac)^{1/\CRRA})}^{\equiv - \underline{\MPC}}\left[{m}_{\prd}-1+\left(\frac{1}{1-1/\Rfree}\right)\right] - \label{eq:pfinfhorc} - \end{aligned}\end{gathered}\end{equation} - where $\underline{\MPC}$ is the MPC in the - infinite-horizon perfect foresight problem. In our baseline problem, - we set $\PermGroFac = \pLvl_{\prd} = 1$. It is straightforward to show that the - infinite-horizon perfect-foresight value function and marginal value - function are given by - \begin{equation}\begin{gathered}\begin{aligned} - \bar{\vFunc}(m_{\prd}) - & = \left(\frac{\bar{\cFunc}(m_{\prd})^{1-\CRRA}}{ - (1-\CRRA)\underline{\MPC} }\right) - \\ \bar{\vFunc}^{m}(m_{\prd}) & = (\bar{\cFunc}(m_{\prd}))^{-\CRRA} - \\ \Opt{\vFunc}^{m}(a_{\prd}) & = \DiscFac \Rfree \PermGroFac_{\prd+1}^{-\CRRA} \bar{\vFunc}^{m}(\RNrm_{\prd+1} a_{\prd}+1). - \end{aligned}\end{gathered}\end{equation} - - % WW delete the text on 2011-06-21 because we no longer start from the infinite horizon perfect foresight solution. - % If we choose to pursue that starting point, we need to derive the optimist's and pessimist's consumption function, - % when the last period is given by the infinite horizon perfect-foresight solution. That will change the program significantly. - % In our case, with \epsilon being 10^(-4), iteration requires only 51 periods, and 0.032 minutes. -\end{comment} - -\begin{comment}% At suggestion of WW this section was deleted because the technique is obvious and can be captured by the footnote that has been added - \subsection{Coarse Then Fine \code{aVec} } - - The speed of each iteration is directly proportional to the number - of gridpoints at which the problem must be solved. Therefore - reducing the number of points in \code{aVec} can increase - the speed of solution greatly. Of course, this also decreases the - accuracy of the solution. However, once the converged solution is - obtained for a coarse \code{aVec}, the density of the grid - can be increased and iteration can continue until a converged - solution is found for the finer \code{aVec}. - % WW delete the text on 2011-06-21 because we no longer need a finer \code{aVec}. I add a footnote in next subsection instead. - - \subsection{Coarse then Fine \texttt{$\TranShkEmp$Vec}} - - The speed of solution is roughly proportionate\footnote{It is also - true that the speed of each iteration is directly proportional to - the number of gridpoints in \code{aVec}, at which the problem must - be solved. However given our method of moderation, now the problem - could be solved very precisely based on five gridpoints only. Hence - we do not pursue the process of ``Coarse then Fine \code{aVec}.''} - to the number of points used in approximating the distribution of - shocks. At least 3 gridpoints should probably be used as an initial - minimum, and my experience is that increasing the number of gridpoints - beyond 7 generally yields only very small changes in the solution. The program - \texttt{multiperiodCon\_infhor.m} - begins with three gridpoints, and then solves for successively finer - \texttt{$\TranShkEmp$Vec}. -\end{comment} diff --git a/docs/sec_the-problem-input-clean.tex b/docs/sec_the-problem-input-clean.tex deleted file mode 100644 index 48f931afd..000000000 --- a/docs/sec_the-problem-input-clean.tex +++ /dev/null @@ -1,46 +0,0 @@ - -\hypertarget{the-problem}{} -\section{The Problem}\label{sec:the-problem} - -The usual analysis of dynamic stochastic programming problems packs a great many events (intertemporal choice, stochastic shocks, intertemporal returns, income growth, the taking of expectations, time discounting, and more) into a complex decision in which the agent makes an optimal choice simultaneously taking all these elements into account. For the dissection here, we will be careful to break down everything that happens into distinct operations so that each element can be scrutinized and understood in isolation. - -We are interested in the behavior a consumer who begins {period} $t$ with a certain amount of `capital' $\kLvl_{t}$, which is immediately rewarded by a return factor $R_{t}$ with the proceeds deposited in a \textbf{b}ank \textbf{b}alance: -\begin{equation}\begin{gathered}\begin{aligned}\label{eq:bLvl} - \bLvl_{t} & = \kLvl_{t}R_{t}. - \end{aligned}\end{gathered}\end{equation} - -Simultaneously with the realization of the capital return, the consumer also receives noncapital income $\yLvl_{t}$, which is determined by multiplying the consumer's `permanent income' $\pLvl_{t}$ by a transitory shock $\TranShkEmp_{t}$: -\begin{equation}\begin{gathered}\begin{aligned} - \yLvl_{t} & = \pLvl_{t}\TranShkEmp_{t} \label{eq:yLvl} - \end{aligned}\end{gathered}\end{equation} -whose whose expectation is 1 (that is, before realization of the transitory shock, the consumer's expectation is that actual income will on average be equal to permanent income $\pLvl_{t}$). - -The combination of bank balances $\bLvl$ and income $\yLvl$ define's the consumer's `market resources' (sometimes called `cash-on-hand,' following~\cite{deatonUnderstandingC}): -\begin{equation}\begin{gathered}\begin{aligned} - \mLvl_{t} & = \bLvl_{t}+\yLvl_{t} \label{eq:mLvl}, - \end{aligned}\end{gathered}\end{equation} -available to be spent on consumption $\cLvl_{t}$ for a consumer subject to a liquidity constraint that requires $\cLvl \leq \mLvl$ (we are not imposing such a constraint yet - see subsection~\ref{subsec:LiqConstrSelfImposed} below for further discussion). - -The consumer's goal is to maximize discounted utility from consumption over the rest of a lifetime ending at date $T$: -% chktex-file 36 - \begin{equation}\label{eq:MaxProb} - \max~\Ex_{t}\left[\sum_{n=0}^{T-t}\beta^{n} \uFunc(\cLvl_{t+n})\right]. - \end{equation} -Income evolves according to: - \begin{equation}\begin{gathered}\begin{aligned} - \pLvl_{t+1} = G_{t+1}\pLvl_{t} & \text{~~ -- permanent labor income dynamics} \label{eq:permincgrow} - \\ \log ~ \TranShkEmp_{t+n} \sim ~\Nrml(-\std_{\TranShkEmp}^{2}/2,\std_{\TranShkEmp}^{2}) & \text{~~ -- lognormal transitory shocks}~\forall~n>0 . - \end{aligned}\end{gathered}\end{equation} - -Equation \eqref{eq:permincgrow} indicates that we are allowing for a predictable average profile of income growth over the lifetime $\{G\}_{0}^{T}$ (to capture typical career wage paths, pension arrangements, etc).\footnote{For expositional and pedagogical purposes, this equation assumes that there are no shocks to permanent income (though they are trivial to add). A large literature finds that, in reality, permanent (or at least extremely highly persistent) shocks exist and are quite large; such shocks therefore need to be incorporated into any `serious' model (that is, one that hopes to match and explain empirical data), but the treatment of permanent shocks clutters the exposition without adding much to the intuition, so permanent shocks are omitted from the analysis until the last section of the notes, which shows how to match the model with empirical micro data. For a full treatment of the theory including permanent shocks, see \cite{BufferStockTheory}.} Finally, the utility function is of the Constant Relative Risk Aversion (CRRA), form, $\uFunc(\bullet) = \bullet^{1-\rho}/(1-\rho)$. - -It is well known that this problem can be rewritten in recursive (Bellman) form: - \begin{equation}\begin{gathered}\begin{aligned} - v_{t}(\mLvl_{t},\pLvl_{t}) & = \max_{\cCtrl}~ \uFunc(\cCtrl) + \beta \Ex_{t}[ v_{t+1}(\mLvl_{t+1},\pLvl_{t+1})]\label{eq:vrecurse} - \end{aligned}\end{gathered}\end{equation} -subject to the Dynamic Budget Constraint (DBC) implicitly defined by equations~\eqref{eq:bLvl}-\eqref{eq:mLvl} and to the transition equation that defines next period's initial capital as this period's end-of-period assets: -\begin{equation}\begin{gathered}\begin{aligned} - \kLvl_{t+1} & = \aLvl_{t}. \label{eq:transitionstate} - \end{aligned}\end{gathered}\end{equation} - - diff --git a/docs/sec_the-problem-input.tex b/docs/sec_the-problem-input.tex deleted file mode 100644 index 482664ddd..000000000 --- a/docs/sec_the-problem-input.tex +++ /dev/null @@ -1,46 +0,0 @@ - -\hypertarget{the-problem}{} -\section{The Problem}\label{sec:the-problem} - -The usual analysis of dynamic stochastic programming problems packs a great many events (intertemporal choice, stochastic shocks, intertemporal returns, income growth, the taking of expectations, time discounting, and more) into a complex decision in which the agent makes an optimal choice simultaneously taking all these elements into account. For the dissection here, we will be careful to break down everything that happens into distinct operations so that each element can be scrutinized and understood in isolation. - -We are interested in the behavior a consumer who begins {period} $\prd$ with a certain amount of `capital' $\kLvl_{\prd}$, which is immediately rewarded by a return factor $\Rfree_{\prd}$ with the proceeds deposited in a \textbf{b}ank \textbf{b}alance: -\begin{equation}\begin{gathered}\begin{aligned}\label{eq:bLvl} - \bLvl_{\prd} & = \kLvl_{\prd}\Rfree_{\prd}. - \end{aligned}\end{gathered}\end{equation} - -Simultaneously with the realization of the capital return, the consumer also receives noncapital income $\yLvl_{\prd}$, which is determined by multiplying the consumer's `permanent income' $\pLvl_{\prd}$ by a transitory shock $\TranShkEmp_{\prd}$: -\begin{equation}\begin{gathered}\begin{aligned} - \yLvl_{\prd} & = \pLvl_{\prd}\TranShkEmp_{\prd} \label{eq:yLvl} - \end{aligned}\end{gathered}\end{equation} -whose whose expectation is 1 (that is, before realization of the transitory shock, the consumer's expectation is that actual income will on average be equal to permanent income $\pLvl_{\prd}$). - -The combination of bank balances $\bLvl$ and income $\yLvl$ define's the consumer's `market resources' (sometimes called `cash-on-hand,' following~\cite{deatonUnderstandingC}): -\begin{equation}\begin{gathered}\begin{aligned} - \mLvl_{\prd} & = \bLvl_{\prd}+\yLvl_{\prd} \label{eq:mLvl}, - \end{aligned}\end{gathered}\end{equation} -available to be spent on consumption $\cLvl_{\prd}$ for a consumer subject to a liquidity constraint that requires $\cLvl \leq \mLvl$ (we are not imposing such a constraint yet - see subsection~\ref{subsec:LiqConstrSelfImposed} below for further discussion). - -The consumer's goal is to maximize discounted utility from consumption over the rest of a lifetime ending at date $\trmT$: -% chktex-file 36 - \begin{equation}\label{eq:MaxProb} - \max~\Ex_{\prd}\left[\sum_{n=0}^{\trmT-\prd}\DiscFac^{n} \uFunc(\cLvl_{\prd+n})\right]. - \end{equation} -Income evolves according to: - \begin{equation}\begin{gathered}\begin{aligned} - \pLvl_{\prd+1} = \PermGroFac_{\prd+1}\pLvl_{\prd} & \text{~~ -- permanent labor income dynamics} \label{eq:permincgrow} - \\ \log ~ \TranShkEmp_{t+n} \sim ~\Nrml(-\std_{\TranShkEmp}^{2}/2,\std_{\TranShkEmp}^{2}) & \text{~~ -- lognormal transitory shocks}~\forall~n>0 . - \end{aligned}\end{gathered}\end{equation} - -Equation \eqref{eq:permincgrow} indicates that we are allowing for a predictable average profile of income growth over the lifetime $\{\PermGroFac\}_{0}^{T}$ (to capture typical career wage paths, pension arrangements, etc).\footnote{For expositional and pedagogical purposes, this equation assumes that there are no shocks to permanent income (though they are trivial to add). A large literature finds that, in reality, permanent (or at least extremely highly persistent) shocks exist and are quite large; such shocks therefore need to be incorporated into any `serious' model (that is, one that hopes to match and explain empirical data), but the treatment of permanent shocks clutters the exposition without adding much to the intuition, so permanent shocks are omitted from the analysis until the last section of the notes, which shows how to match the model with empirical micro data. For a full treatment of the theory including permanent shocks, see \cite{BufferStockTheory}.} Finally, the utility function is of the Constant Relative Risk Aversion (CRRA), form, $\uFunc(\bullet) = \bullet^{1-\CRRA}/(1-\CRRA)$. - -It is well known that this problem can be rewritten in recursive (Bellman) form: - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\prd}(\mLvl_{\prd},\pLvl_{\prd}) & = \max_{\cCtrl}~ \uFunc(\cCtrl) + \DiscFac \Ex_{\prd}[ \vFunc_{\prd+1}(\mLvl_{\prd+1},\pLvl_{\prd+1})]\label{eq:vrecurse} - \end{aligned}\end{gathered}\end{equation} -subject to the Dynamic Budget Constraint (DBC) implicitly defined by equations~\eqref{eq:bLvl}-\eqref{eq:mLvl} and to the transition equation that defines next period's initial capital as this period's end-of-period assets: -\begin{equation}\begin{gathered}\begin{aligned} - \kLvl_{\prd+1} & = \aLvl_{\prd}. \label{eq:transitionstate} - \end{aligned}\end{gathered}\end{equation} - - diff --git a/docs/sec_the-usual-theory-input.tex b/docs/sec_the-usual-theory-input.tex deleted file mode 100644 index 2bfd8ded4..000000000 --- a/docs/sec_the-usual-theory-input.tex +++ /dev/null @@ -1,78 +0,0 @@ - -\hypertarget{the-usual-theory}{} -\section{The Usual Theory, and a Bit More Notation}\label{sec:the-usual-theory} - -%\renewcommand{\prd}{t} - -\subsection{Periods, Stages, Steps} - -For the problem specified in \eqref{eq:vNormed}, the agent has only one decision to make in each {period} (how much to consume). %Many problems (including the portfolio choice example elaborated below) may have several distinct decision {stages} within the {period}, which requires a notation that permits the handoff of the solution to one {stage} to its successor. Here, we have -This simplifies matters because there is no need to distinguish betwen the next {stage} and the problem of the following {period}, so we can conflate the two. (See the portfolio choice example below for the notation and analysis of multi-{stage} problems.) - -\subsection{Steps} - -Generically, we want to think of the Bellman solution as having three {steps}: -\begin{enumerate} -\item \textbf{\Arrival}: Incoming state variables (e.g., $\kNrm$) are known, but any shocks associated with the period have not been realized and decision(s) have not yet been made -\item \textbf{\Decision}: All exogenous variables (like income shocks, rate of return shocks, and predictable income growth $\PermGroFac$) have been realized (so that, e.g., $\mNrm$'s value is known) and the agent solves the optimization problem -\item \textbf{\Continuation}: After all decisions have been made, their consequences are measured by evaluation of the continuing-value function at the values of the `outgoing' state variables (sometimes called `post-state' variables). -\end{enumerate} - -In the standard treatment in the literature, the (implicit) default assumption is that the {step} where the agent is solving a decision problem is the unique {step} at which the problem is defined. This is what was done above, when (for example) in \eqref{eq:vNormed} we related the value $\vFunc$ of the current decision to the expectation of the future value $\vFunc_{\prd+1}$. Here, instead, we want to encapsulate the current {stage}'s problem as a standalone object, which is solved by taking as given an exogenously-provided continuation-value function (in our case, $\vEndStp(a)$). - -When we want to refer to a specific {step} in the one {stage} of $\prd$ we will do so by supplementing the {step} with an indicator which tracks the {step} (and we need not denote the {stage} within the {period} because we have assumed there is only one {stage} in the {period}): -\begin{center} -% \mbox{% - \begin{tabular}{r|c|c|l|l} - Stp & Indicator & State & Usage & Explanation \\ \hline - {\Arrival} & $\arvl$ prefix & $\kNrm$ & $\vBegStp({\kNrm})$ & value at entry to $\prd$ (before shocks) \\ - {\Decision} & (blank/none) & $\mNrm$ & $\vMidStp(\mNrm)$ & value of $t$-decision (after shocks) \\ - {\Continuation} & $\cntn$ suffix & $\aNrm$ & $\vEndStp(\aNrm)$ & value at exit (after decision) - \end{tabular} -% } -\end{center} - -Notice that different {step}s of the {stage} have distinct state variables. $\kNrm$ is the state at the beginning of the {stage/period} because the shocks that yield $\mNrm$ from $\kNrm$ have not yet been realized. The state variable for the continuation {step} is $\aNrm$ because after the consumption decision has been made the model assumes that all that matters is where you have ended up, not how you got there. - -We can now restate the problem \eqref{eq:vNormed} with our new notation: -\begin{equation}\begin{gathered}\begin{aligned} - \vFunc(m) & = \max_{\cNrm} ~~ \uFunc(\cNrm)+ \vEndStp(\mNrm-\cNrm) -\end{aligned}\end{gathered}\end{equation} -whose first order condition with respect to $\cNrm$ is -\begin{equation}\begin{gathered}\begin{aligned} - \uFunc^{c}(\cNrm) &= \vEndStp^{\kNrm}(\mNrm-\cNrm) \label{eq:FOCnew} -\end{aligned}\end{gathered}\end{equation} -which is mathematically equivalent to the usual Euler equation for consumption. - -We will revert to this formulation when we turn to section~\ref{subsec:egm}. - -\hypertarget{summing-up}{} -\subsection{Summing Up}\label{subsec:summing-up} -For future reference, it will be useful here to write the full expressions for the distinct value functions at the {\Arrival} ($\BegMark$) and {\Decision} {steps}. % this is said two par ahead: (Recalling that the continuation-value function $\vEndStp(a)$ is provided to the solution algorithm as an input). - -There is no need to use our {period}-identifying notation for the model's variables; $\kNrm$, for example, will have only one unique value over the course of the {period} and therefore a notation like $\kNrm_{\EndStp}$ would be pointless; the same is true of all other variables. - -Recall that the continuation value function $\vFunc_{\EndStp}(\aNrm)=\DiscFac \vFunc_{\BegStpNxt}(\aNrm)$ is provided as an input to the current {stage} Bellman problem. Since within the scope of the solution of the current {stage} there is only one such continuation value function, in the solution context there is no point in keeping the {period} subscript when we write this function. The same point applies to all variables and functions in the {stage}. Given the continuation value function $\vEndStp$, the problem within the {stage} can be written with only the {step} indicators: - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc_{\arvl}(\kNrm) & = \Ex_{\arvl}[\vFunc(\overbrace{\kNrm \RNrm + \TranShkEmp}^{\mNrm})] \label{eq:vBegStp} - \end{aligned}\end{gathered}\end{equation} - \begin{equation}\begin{gathered}\begin{aligned} - \vFunc(\mNrm) & = \max_{\{{\cNrm}\}}~~\uFunc(\cNrm) +\Ex[ \vFunc_{\cntn}(\overbrace{\mNrm-\cFunc}^{\aNrm})] \label{eq:vMid} - \end{aligned}\end{gathered}\end{equation} -% and -% \begin{equation}\begin{gathered}\begin{aligned} -% \vEndStp(\aNrm_{\prd}) & = \DiscFac \vBegStpNxt(\overbrace{\kNrm_{\prd+1}}^{\aNrm_{\prd}}) \label{eq:vEndtdefn} -% \end{aligned}\end{gathered}\end{equation} - -\begin{comment} - \subsection{Implementation in Python} - - The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. - - \subsubsection{Useful auxilliary files} - - In this exercise, two primary user-defined modules are frequently imported and utilized. The first is the \texttt{gothic\_class} module, which contains functions describing the end-of-period value functions found in equations \eqref{eq:vBegStp} - \eqref{eq:vEnd} (and the corresponding first and second derivatives). %The advantage of defining functions in the code which decompose the consumer's optimal behavior in a given period will become evident in section \ref{subsec:transformation} - - The \texttt{resources} module is also used repeatedly throughout the notebook. This file has three primary objectives: (i) providing functions that discretize the continuous distributions from the theoretical model that describe the uncertainty a consumer faces, (ii) defining the utility function over consumption under a number of specifications, and (iii) enhancing the grid of end-of-period assets for which functions (such as those from the \texttt{gothic\_class} module) will be defined. These objectives will be discussed in greater detail and with respect to the numerical methods used to the problem in subsequent sections of this document. -\end{comment} - diff --git a/docs/sec_titlepage-input-clean.tex b/docs/sec_titlepage-input-clean.tex deleted file mode 100644 index 2098af374..000000000 --- a/docs/sec_titlepage-input-clean.tex +++ /dev/null @@ -1,55 +0,0 @@ -% Redefine \onlyinsubfile command defined in local.sty file: -% This lets any submaterial called from this doc know that it is not standalone -%\renewcommand{\onlyinsubfile}[1]{}\renewcommand{\notinsubfile}[1]{#1} - -\pagenumbering{roman} - -\title{Solution Methods for Microeconomic \\ Dynamic Stochastic Optimization Problems} - -\author{Christopher D. Carroll\authNum} - -\keywords{Dynamic Stochastic Optimization, Method of Simulated Moments, Structural Estimation, Indirect Inference} -\jelclass{E21, F41} - -\date{2024-04-20} -\maketitle -\footnotesize - -\noindent Note: The GitHub repo {\SMDSOPrepo} associated with this document contains python code that produces all results, from scratch, except for the last section on indirect inference. The numerical results have been confirmed by showing that the answers that the raw python produces correspond to the answers produced by tools available in the {\ARKurl} toolkit, more specifically those in the {\HARKrepo} which has full {\HARKdocs}. The MSM results at the end have have been superseded by tools in the {\EMDSOPrepo}. - -\normalsize - -\hypertarget{abstract}{} -\begin{abstract} - These notes describe tools for solving microeconomic dynamic stochastic optimization problems, and show how to use those tools for efficiently estimating a standard life cycle consumption/saving model using microeconomic data. No attempt is made at a systematic overview of the many possible technical choices; instead, I present a specific set of methods that have proven useful in my own work (and explain why other popular methods, such as value function iteration, are a bad idea). Paired with these notes is Python code that solves the problems described in the text. -\end{abstract} - -% \ifthenelse{\boolean{Web}}{}{ -\begin{footnotesize} - \begin{center} - \begin{tabbing} - \texttt{~~~~PDF:~} \= \= {\urlPDF} \\ - \texttt{~Slides:~} \> \> {\urlSlides} \\ - \texttt{~~~~Web:~} \> \> {\urlHTML} \\ - \texttt{~~~Code:~} \> \> {\urlCode} \\ - \texttt{Archive:~} \> \> {\urlRepo} \\ - \texttt{~~~~~~~~~} \> \> \textit{(Contains LaTeX code for this document and software producing figures and results)} - \end{tabbing} - \end{center} -\end{footnotesize} -% } -\begin{authorsinfo} - \name{Carroll: Department of Economics, Johns Hopkins University, Baltimore, MD, \\ - \href{mailto:ccarroll@jhu.edu}{\texttt{ccarroll@jhu.edu}}} -\end{authorsinfo} - -\thanksFooter{The notes were originally written for my Advanced Topics in Macroeconomic Theory class at Johns Hopkins University; instructors elsewhere are welcome to use them for teaching purposes. Relative to earlier drafts, this version incorporates several improvements related to new results in the paper \href{http://econ-ark.github.io/BufferStockTheory}{``Theoretical Foundations of Buffer Stock Saving''} (especially tools for approximating the consumption and value functions). Like the last major draft, it also builds on material in ``The Method of Endogenous Gridpoints for Solving Dynamic Stochastic Optimization Problems'' published in \textit{Economics Letters}, available at \url{http://www.econ2.jhu.edu/people/ccarroll/EndogenousArchive.zip}, and by including sample code for a method of simulated moments estimation of the life cycle model \textit{a la} \cite{gpLifecycle} and Cagetti~\citeyearpar{cagettiWprofiles}. Background derivations, notation, and related subjects are treated in my class notes for first year macro, available at \url{http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption}. I am grateful to several generations of graduate students in helping me to refine these notes, to Marc Chan for help in updating the text and software to be consistent with \cite{carrollEGM}, to Kiichi Tokuoka for drafting the section on structural estimation, to Damiano Sandri for exceptionally insightful help in revising and updating the method of simulated moments estimation section, and to Weifeng Wu and Metin Uyanik for revising to be consistent with the `method of moderation' and other improvements. All errors are my own. This document can be cited as \cite{SolvingMicroDSOPs} in the references.} - -\titlepagefinish -%\setcounter{page}{1} - -\thispagestyle{empty} -\ifpdf % The table of contents does not work if not in pdf mode -\tableofcontents \addtocontents{toc}{\vspace{1em}}\newpage -\fi -\newpage\pagenumbering{arabic} diff --git a/docs/sec_titlepage-input.tex b/docs/sec_titlepage-input.tex deleted file mode 100644 index 2098af374..000000000 --- a/docs/sec_titlepage-input.tex +++ /dev/null @@ -1,55 +0,0 @@ -% Redefine \onlyinsubfile command defined in local.sty file: -% This lets any submaterial called from this doc know that it is not standalone -%\renewcommand{\onlyinsubfile}[1]{}\renewcommand{\notinsubfile}[1]{#1} - -\pagenumbering{roman} - -\title{Solution Methods for Microeconomic \\ Dynamic Stochastic Optimization Problems} - -\author{Christopher D. Carroll\authNum} - -\keywords{Dynamic Stochastic Optimization, Method of Simulated Moments, Structural Estimation, Indirect Inference} -\jelclass{E21, F41} - -\date{2024-04-20} -\maketitle -\footnotesize - -\noindent Note: The GitHub repo {\SMDSOPrepo} associated with this document contains python code that produces all results, from scratch, except for the last section on indirect inference. The numerical results have been confirmed by showing that the answers that the raw python produces correspond to the answers produced by tools available in the {\ARKurl} toolkit, more specifically those in the {\HARKrepo} which has full {\HARKdocs}. The MSM results at the end have have been superseded by tools in the {\EMDSOPrepo}. - -\normalsize - -\hypertarget{abstract}{} -\begin{abstract} - These notes describe tools for solving microeconomic dynamic stochastic optimization problems, and show how to use those tools for efficiently estimating a standard life cycle consumption/saving model using microeconomic data. No attempt is made at a systematic overview of the many possible technical choices; instead, I present a specific set of methods that have proven useful in my own work (and explain why other popular methods, such as value function iteration, are a bad idea). Paired with these notes is Python code that solves the problems described in the text. -\end{abstract} - -% \ifthenelse{\boolean{Web}}{}{ -\begin{footnotesize} - \begin{center} - \begin{tabbing} - \texttt{~~~~PDF:~} \= \= {\urlPDF} \\ - \texttt{~Slides:~} \> \> {\urlSlides} \\ - \texttt{~~~~Web:~} \> \> {\urlHTML} \\ - \texttt{~~~Code:~} \> \> {\urlCode} \\ - \texttt{Archive:~} \> \> {\urlRepo} \\ - \texttt{~~~~~~~~~} \> \> \textit{(Contains LaTeX code for this document and software producing figures and results)} - \end{tabbing} - \end{center} -\end{footnotesize} -% } -\begin{authorsinfo} - \name{Carroll: Department of Economics, Johns Hopkins University, Baltimore, MD, \\ - \href{mailto:ccarroll@jhu.edu}{\texttt{ccarroll@jhu.edu}}} -\end{authorsinfo} - -\thanksFooter{The notes were originally written for my Advanced Topics in Macroeconomic Theory class at Johns Hopkins University; instructors elsewhere are welcome to use them for teaching purposes. Relative to earlier drafts, this version incorporates several improvements related to new results in the paper \href{http://econ-ark.github.io/BufferStockTheory}{``Theoretical Foundations of Buffer Stock Saving''} (especially tools for approximating the consumption and value functions). Like the last major draft, it also builds on material in ``The Method of Endogenous Gridpoints for Solving Dynamic Stochastic Optimization Problems'' published in \textit{Economics Letters}, available at \url{http://www.econ2.jhu.edu/people/ccarroll/EndogenousArchive.zip}, and by including sample code for a method of simulated moments estimation of the life cycle model \textit{a la} \cite{gpLifecycle} and Cagetti~\citeyearpar{cagettiWprofiles}. Background derivations, notation, and related subjects are treated in my class notes for first year macro, available at \url{http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption}. I am grateful to several generations of graduate students in helping me to refine these notes, to Marc Chan for help in updating the text and software to be consistent with \cite{carrollEGM}, to Kiichi Tokuoka for drafting the section on structural estimation, to Damiano Sandri for exceptionally insightful help in revising and updating the method of simulated moments estimation section, and to Weifeng Wu and Metin Uyanik for revising to be consistent with the `method of moderation' and other improvements. All errors are my own. This document can be cited as \cite{SolvingMicroDSOPs} in the references.} - -\titlepagefinish -%\setcounter{page}{1} - -\thispagestyle{empty} -\ifpdf % The table of contents does not work if not in pdf mode -\tableofcontents \addtocontents{toc}{\vspace{1em}}\newpage -\fi -\newpage\pagenumbering{arabic} diff --git a/docs/tmp.pdf b/docs/tmp.pdf deleted file mode 100644 index 966e9bf1b..000000000 Binary files a/docs/tmp.pdf and /dev/null differ diff --git a/docs/tmp.tex b/docs/tmp.tex deleted file mode 100644 index 18a81cd21..000000000 --- a/docs/tmp.tex +++ /dev/null @@ -1,37 +0,0 @@ -\documentclass{article} -\usepackage[utf8]{inputenc} -\usepackage{./.econtexRoot} % Set paths (like, \LaTeXInputs) - -\usepackage{econark} -\usepackage{\LaTeXInputs/local-macros} % econark-generic -\usepackage{SolvingMicroDSOPs-private} -\usepackage{amssymb} -\usepackage{unicode-math} - -\begin{document} -Hello, World! Here is a Unicode character: ΓΌ -%\newcommand{\umlaut}{ΓΌ} -%\newcommand{\ΓΌ}{umlaut} - -%\ΓΌ - -\umlaut\umlaut - -%$\Ex$ -\end{document} \endinput - - -\documentclass{article} -\usepackage{./.econtexRoot} % Set paths (like, \LaTeXInputs) - -\usepackage[utf8]{inputenc} -\usepackage{econark} -\usepackage{\LaTeXInputs/local-macros} % econark-generic - -\usepackage{SolvingMicroDSOPs-private} - -\begin{document} -\[ - \Ex -\] -\end{document}\endinput diff --git a/docs/unicode-subs-declare.sty b/docs/unicode-subs-declare.sty deleted file mode 100644 index d6d41541f..000000000 --- a/docs/unicode-subs-declare.sty +++ /dev/null @@ -1,17 +0,0 @@ -% If you encounter the unicode character, substitute the LaTeX on compile -\DeclareUnicodeCharacter{1D53C}{\mathbb{E}} % 𝔼 -\DeclareUnicodeCharacter{1D41A}{\mathbf{a}} % 𝐚 -\DeclareUnicodeCharacter{1D41B}{\mathbf{b}} % 𝐛 -\DeclareUnicodeCharacter{1D41C}{\mathbf{c}} % 𝐜 -\DeclareUnicodeCharacter{1D429}{\mathbf{p}} % 𝐩 -\DeclareUnicodeCharacter{1D424}{\mathbf{k}} % 𝐀 -\DeclareUnicodeCharacter{1D426}{\mathbf{m}} % 𝐦 -\DeclareUnicodeCharacter{1D42F}{\mathbf{v}} % 𝐯 -\DeclareUnicodeCharacter{1D432}{\mathbf{y}} % 𝐲 -\DeclareUnicodeCharacter{1D4A2}{\mathcal{G}} % 𝒒 -\DeclareUnicodeCharacter{211B}{\mathcal{R}} % β„› -\DeclareUnicodeCharacter{1D69E}{\mathrm{u}} % 𝚞 -\DeclareUnicodeCharacter{1D69F}{\mathrm{v}} % 𝚟 -\DeclareUnicodeCharacter{03B2}{\beta} % Ξ² -\DeclareUnicodeCharacter{03C3}{\sigma} % Οƒ -\DeclareUnicodeCharacter{03C1}{\rho} % ρ diff --git a/docs/unicode-subs-declare.tex b/docs/unicode-subs-declare.tex deleted file mode 100644 index d6d41541f..000000000 --- a/docs/unicode-subs-declare.tex +++ /dev/null @@ -1,17 +0,0 @@ -% If you encounter the unicode character, substitute the LaTeX on compile -\DeclareUnicodeCharacter{1D53C}{\mathbb{E}} % 𝔼 -\DeclareUnicodeCharacter{1D41A}{\mathbf{a}} % 𝐚 -\DeclareUnicodeCharacter{1D41B}{\mathbf{b}} % 𝐛 -\DeclareUnicodeCharacter{1D41C}{\mathbf{c}} % 𝐜 -\DeclareUnicodeCharacter{1D429}{\mathbf{p}} % 𝐩 -\DeclareUnicodeCharacter{1D424}{\mathbf{k}} % 𝐀 -\DeclareUnicodeCharacter{1D426}{\mathbf{m}} % 𝐦 -\DeclareUnicodeCharacter{1D42F}{\mathbf{v}} % 𝐯 -\DeclareUnicodeCharacter{1D432}{\mathbf{y}} % 𝐲 -\DeclareUnicodeCharacter{1D4A2}{\mathcal{G}} % 𝒒 -\DeclareUnicodeCharacter{211B}{\mathcal{R}} % β„› -\DeclareUnicodeCharacter{1D69E}{\mathrm{u}} % 𝚞 -\DeclareUnicodeCharacter{1D69F}{\mathrm{v}} % 𝚟 -\DeclareUnicodeCharacter{03B2}{\beta} % Ξ² -\DeclareUnicodeCharacter{03C3}{\sigma} % Οƒ -\DeclareUnicodeCharacter{03C1}{\rho} % ρ diff --git a/sec_notation-input-clean.tex b/sec_notation-input-clean.tex index 8494388ec..00c7fce06 100644 --- a/sec_notation-input-clean.tex +++ b/sec_notation-input-clean.tex @@ -70,9 +70,7 @@ \subsection{The Decision Problem in the New Notation}\label{subsec:decision-prob Having defined these notational conventions, we are now ready to move to substance. -\begin{comment} % - % \subsection{Implementation in Python} The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. diff --git a/sec_notation-input.tex b/sec_notation-input.tex index bc6f11dd8..8bce9af2a 100644 --- a/sec_notation-input.tex +++ b/sec_notation-input.tex @@ -70,9 +70,7 @@ \subsection{The Decision Problem in the New Notation}\label{subsec:decision-prob Having defined these notational conventions, we are now ready to move to substance. -\begin{comment} % - % \subsection{Implementation in Python} The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. diff --git a/sec_notation.tex b/sec_notation.tex index 195ffb815..958b69ef1 100644 --- a/sec_notation.tex +++ b/sec_notation.tex @@ -76,9 +76,7 @@ \subsection{The Decision Problem in the New Notation}\label{subsec:decision-prob Having defined these notational conventions, we are now ready to move to substance. -\begin{comment} % - % \subsection{Implementation in Python} The code implementing the tasks outlined each of the sections to come is available in the \texttt{\href{https://econ-ark.org/materials/SolvingMicroDSOPs}{SolvingMicroDSOPs}} jupyter notebook, written in \href{https://python.org}{Python}. The notebook imports various modules, including the standard \texttt{numpy} and \texttt{scipy} modules used for numerical methods in Python, as well as some user-defined modules designed to provide numerical solutions to the consumer's problem from the previous section. Before delving into the computational exercise, it is essential to touch on the practicality of these custom modules. diff --git a/sec_solving-the-next-input-clean.tex b/sec_solving-the-next-input-clean.tex index 5019c9af3..d2a8ef97e 100644 --- a/sec_solving-the-next-input-clean.tex +++ b/sec_solving-the-next-input-clean.tex @@ -118,7 +118,6 @@ \subsection{The Approximate Consumption and Value Functions} \hypertarget{an-interpolated-consumption-function}{} \subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $c$ that corresponds to a linear `connect-the-dots' interpolation of the value of $c$ from the values of the two nearest computed $\{m,c\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\cFunc}_{\MidPrdLsT}(m)$. %When called with an $\mNrm$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mNrm$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mNrm$. @@ -212,7 +211,6 @@ \subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} \label{fig:PlotuPrimeVSOPrime} \end{figure} -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package.% The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} @@ -301,7 +299,6 @@ \subsection{Transformation}\label{subsec:transformation} In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $t$ or the current period $t-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $m$ and `human wealth' (the PDV of future income) of $h_{t-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). -The well-known optimal solution is to spend half of total lifetime resources in period $t-1$ and the remainder in period $t (=T)$. Since total resources are known with certainty to be $m+h_{t-1}= m+1$, and since $v_{\MidStg}^{m}(m) = u^{c}(c)$, this implies that \begin{equation} v^{m}_{\MidPrdLsT}(m) = \left(\frac{m+1}{2}\right)^{-\rho} \label{eq:vPLin}. \end{equation} diff --git a/sec_solving-the-next-input.tex b/sec_solving-the-next-input.tex index c299f6ec8..1131b4fe6 100644 --- a/sec_solving-the-next-input.tex +++ b/sec_solving-the-next-input.tex @@ -118,7 +118,6 @@ \subsection{The Approximate Consumption and Value Functions} \hypertarget{an-interpolated-consumption-function}{} \subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $\cNrm$ that corresponds to a linear `connect-the-dots' interpolation of the value of $\cNrm$ from the values of the two nearest computed $\{\mNrm,\cNrm\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\cFunc}_{\MidPrdLsT}(\mNrm)$. %When called with an $\mNrm$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mNrm$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mNrm$. @@ -212,7 +211,6 @@ \subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} \label{fig:PlotuPrimeVSOPrime} \end{figure} -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package.% The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} @@ -301,7 +299,6 @@ \subsection{Transformation}\label{subsec:transformation} In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $\prdT$ or the current period $\prdT-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $\mNrm$ and `human wealth' (the PDV of future income) of $\hNrm_{\prdT-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). -The well-known optimal solution is to spend half of total lifetime resources in period $\prdT-1$ and the remainder in period $\prdT (=\trmT)$. Since total resources are known with certainty to be $\mNrm+\hNrm_{\prdT-1}= \mNrm+1$, and since $\vFunc_{\MidStg}^{m}(\mNrm) = \uFunc^{c}(\cNrm)$, this implies that \begin{equation} \vFunc^{m}_{\MidPrdLsT}(\mNrm) = \left(\frac{\mNrm+1}{2}\right)^{-\CRRA} \label{eq:vPLin}. \end{equation} diff --git a/sec_solving-the-next.tex b/sec_solving-the-next.tex index 45fc72184..c7bf91ec1 100644 --- a/sec_solving-the-next.tex +++ b/sec_solving-the-next.tex @@ -134,7 +134,6 @@ \subsection{The Approximate Consumption and Value Functions} \hypertarget{an-interpolated-consumption-function}{} \subsection{An Interpolated Consumption Function} \label{subsec:LinInterp} -We can now apply our solution to \eqref{eq:vEndTm1} to each of the values in $\vctr{m}$, generating a corresponding optimal $\vctr{c}$. This is called `sampling' the consumption function. Using the ordered pairs $\{\vctr{m},\vctr{c}\}$ we can create a piecewise linear `interpolating function' (a `spline') which when applied to any input $\vctr{m}[1] \leq m\leq \vctr{m}[-1]$ will yield the value of $\cNrm$ that corresponds to a linear `connect-the-dots' interpolation of the value of $\cNrm$ from the values of the two nearest computed $\{\mNrm,\cNrm\}$ points.\footnote{For a useful treatment of various kinds of interpolation appropriate for different questions, see } % This is accomplished in ``An Interpolated Consumption Function,'' which generates an interpolating function that we designate $\Aprx{\cFunc}_{\MidPrdLsT}(\mNrm)$. %When called with an $\mNrm$ that is equal to one of the points in $\code{{{\mVec}\_int}}$, $\Aprx{\cFunc}_{\prdT-1}$ returns the associated value of $\vctr{c}_{\code{\prdT-1}}$, and when called with a value of $\mNrm$ that is not exactly equal to one of the \texttt{mVec\_int}, returns the value of $c$ that reflects a linear interpolation between the $\vctr{c}_{\code{\prdT-1}}$ points associated with the two \texttt{mVec\_int} points immediately above and below $\mNrm$. @@ -231,7 +230,6 @@ \subsection{Value Function versus First Order Condition}\label{subsec:vVsuP} \label{fig:PlotuPrimeVSOPrime} \end{figure} -In the notebook, the ``Value Function versus the First Order Condition'' section completes the task of finding the values of consumption which satisfy the first order condition in \eqref{eq:FOCTm1} using the \href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html}{\texttt{brentq}} function from the \texttt{scipy} package.% The downward-sloping curve in Figure \ref{fig:PlotuPrimeVSOPrime} @@ -320,7 +318,6 @@ \subsection{Transformation}\label{subsec:transformation} In the case we are now considering with no uncertainty and no liquidity constraints, the optimizing consumer does not care whether a unit of income is scheduled to be received in the future period $\prdT$ or the current period $\prdT-1$; there is perfect certainty that the income will be received, so the consumer treats its PDV as equivalent to a unit of current wealth. Total resources available at the point when the consumption decision is made is therefore are comprised of two types: current market resources $\mNrm$ and `human wealth' (the PDV of future income) of $\hNrm_{\prdT-1}=1$ (because it is the value of human wealth as of the end of the period, there is only one more period of income of 1 left). -The well-known optimal solution is to spend half of total lifetime resources in period $\prdT-1$ and the remainder in period $\prdT (=\trmT)$. Since total resources are known with certainty to be $\mNrm+\hNrm_{\prdT-1}= \mNrm+1$, and since $\vFunc_{\MidStg}^{m}(\mNrm) = \uFunc^{c}(\cNrm)$, this implies that \begin{equation} \vFunc^{m}_{\MidPrdLsT}(\mNrm) = \left(\frac{\mNrm+1}{2}\right)^{-\CRRA} \label{eq:vPLin}. \end{equation} diff --git a/tmp.pdf b/tmp.pdf deleted file mode 100644 index 966e9bf1b..000000000 Binary files a/tmp.pdf and /dev/null differ diff --git a/tmp.tex b/tmp.tex deleted file mode 100644 index 18a81cd21..000000000 --- a/tmp.tex +++ /dev/null @@ -1,37 +0,0 @@ -\documentclass{article} -\usepackage[utf8]{inputenc} -\usepackage{./.econtexRoot} % Set paths (like, \LaTeXInputs) - -\usepackage{econark} -\usepackage{\LaTeXInputs/local-macros} % econark-generic -\usepackage{SolvingMicroDSOPs-private} -\usepackage{amssymb} -\usepackage{unicode-math} - -\begin{document} -Hello, World! Here is a Unicode character: ΓΌ -%\newcommand{\umlaut}{ΓΌ} -%\newcommand{\ΓΌ}{umlaut} - -%\ΓΌ - -\umlaut\umlaut - -%$\Ex$ -\end{document} \endinput - - -\documentclass{article} -\usepackage{./.econtexRoot} % Set paths (like, \LaTeXInputs) - -\usepackage[utf8]{inputenc} -\usepackage{econark} -\usepackage{\LaTeXInputs/local-macros} % econark-generic - -\usepackage{SolvingMicroDSOPs-private} - -\begin{document} -\[ - \Ex -\] -\end{document}\endinput diff --git a/unicode-subs-declare.sty b/unicode-subs-declare.sty deleted file mode 100644 index d6d41541f..000000000 --- a/unicode-subs-declare.sty +++ /dev/null @@ -1,17 +0,0 @@ -% If you encounter the unicode character, substitute the LaTeX on compile -\DeclareUnicodeCharacter{1D53C}{\mathbb{E}} % 𝔼 -\DeclareUnicodeCharacter{1D41A}{\mathbf{a}} % 𝐚 -\DeclareUnicodeCharacter{1D41B}{\mathbf{b}} % 𝐛 -\DeclareUnicodeCharacter{1D41C}{\mathbf{c}} % 𝐜 -\DeclareUnicodeCharacter{1D429}{\mathbf{p}} % 𝐩 -\DeclareUnicodeCharacter{1D424}{\mathbf{k}} % 𝐀 -\DeclareUnicodeCharacter{1D426}{\mathbf{m}} % 𝐦 -\DeclareUnicodeCharacter{1D42F}{\mathbf{v}} % 𝐯 -\DeclareUnicodeCharacter{1D432}{\mathbf{y}} % 𝐲 -\DeclareUnicodeCharacter{1D4A2}{\mathcal{G}} % 𝒒 -\DeclareUnicodeCharacter{211B}{\mathcal{R}} % β„› -\DeclareUnicodeCharacter{1D69E}{\mathrm{u}} % 𝚞 -\DeclareUnicodeCharacter{1D69F}{\mathrm{v}} % 𝚟 -\DeclareUnicodeCharacter{03B2}{\beta} % Ξ² -\DeclareUnicodeCharacter{03C3}{\sigma} % Οƒ -\DeclareUnicodeCharacter{03C1}{\rho} % ρ diff --git a/unicode-subs-declare.tex b/unicode-subs-declare.tex deleted file mode 100644 index d6d41541f..000000000 --- a/unicode-subs-declare.tex +++ /dev/null @@ -1,17 +0,0 @@ -% If you encounter the unicode character, substitute the LaTeX on compile -\DeclareUnicodeCharacter{1D53C}{\mathbb{E}} % 𝔼 -\DeclareUnicodeCharacter{1D41A}{\mathbf{a}} % 𝐚 -\DeclareUnicodeCharacter{1D41B}{\mathbf{b}} % 𝐛 -\DeclareUnicodeCharacter{1D41C}{\mathbf{c}} % 𝐜 -\DeclareUnicodeCharacter{1D429}{\mathbf{p}} % 𝐩 -\DeclareUnicodeCharacter{1D424}{\mathbf{k}} % 𝐀 -\DeclareUnicodeCharacter{1D426}{\mathbf{m}} % 𝐦 -\DeclareUnicodeCharacter{1D42F}{\mathbf{v}} % 𝐯 -\DeclareUnicodeCharacter{1D432}{\mathbf{y}} % 𝐲 -\DeclareUnicodeCharacter{1D4A2}{\mathcal{G}} % 𝒒 -\DeclareUnicodeCharacter{211B}{\mathcal{R}} % β„› -\DeclareUnicodeCharacter{1D69E}{\mathrm{u}} % 𝚞 -\DeclareUnicodeCharacter{1D69F}{\mathrm{v}} % 𝚟 -\DeclareUnicodeCharacter{03B2}{\beta} % Ξ² -\DeclareUnicodeCharacter{03C3}{\sigma} % Οƒ -\DeclareUnicodeCharacter{03C1}{\rho} % ρ