diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..9456aac --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,22 @@ +name: Documenter +on: + push: + branches: [main] + tags: [v*] + pull_request: + +jobs: + Documenter: + permissions: + contents: write + name: Documentation + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: julia-actions/cache@v1 + with: + cache-compiled: "true" + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-docdeploy@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..21a3a1a --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,30 @@ +name: Tests +on: + push: + branches: ['main'] + tags: '*' + pull_request: + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + julia-version: ['1'] + julia-arch: [x86] + os: [ubuntu-latest] + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@latest + with: + version: ${{ matrix.julia-version }} + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/cache@v1 + with: + cache-compiled: "true" + - uses: julia-actions/julia-runtest@v1 + - uses: julia-actions/julia-processcoverage@v1 + - uses: codecov/codecov-action@v1 + with: + file: lcov.info + \ No newline at end of file diff --git a/.gitignore b/.gitignore index 29126e4..824eb5f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# emacs backups +*~ + # Files generated by invoking Julia with --code-coverage *.jl.cov *.jl.*.cov diff --git a/Project.toml b/Project.toml index 30ff52e..6695fee 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,15 @@ +name = "ECON622_BLP" +uuid = "04c43c4b-146f-4579-99e2-63279d378c20" +authors = ["Paul Schrimpf "] +version = "0.0.1" + [deps] Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" +Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" +EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869" FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838" HCubature = "19dc6840-f33b-545b-b366-655c7e3ffd49" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" Sobol = "ed01d8cd-4d21-5b2a-85b4-cc3bdc58bad4" SparseGrids = "bafbe729-afc6-5148-bb4f-226bf3d46895" diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 0000000..dfa65cd --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,2 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 0000000..a01415d --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,18 @@ +using Documenter, ECON622_BLP +makedocs(; + modules=[ECON622_BLP], + format=Documenter.HTML(), + pages=[ + "Home" => "index.md", + "Function Reference" => "functions.md" + ], + repo=Remotes.GitHub("UBCECON","ECON622_BLP.jl"), + sitename="ECON622_BLP.jl", + #doctest=false, + authors="Paul Schrimpf " + #assets=String[], + #plugins=[bib] +) + + +deploydocs(repo="github.com/UBCECON/ECON622_BLP.jl.git", branch="gh-pages") diff --git a/docs/src/functions.md b/docs/src/functions.md new file mode 100644 index 0000000..33ba8a3 --- /dev/null +++ b/docs/src/functions.md @@ -0,0 +1,10 @@ +# Function Reference + +```@autodocs +Modules = [ECON622_BLP] +``` + +## Index + +```@index +``` diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 0000000..5deed5d --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,10 @@ +# ECON622_BLP.jl + +Random coefficients demand models in the style of Berry, Levinsohn, and Pakes (1995). Created as an example for teaching ECON622. + +```@contents +``` + +# Model + +# Another Section diff --git a/src/ECON622_BLP.jl b/src/ECON622_BLP.jl new file mode 100644 index 0000000..78151bb --- /dev/null +++ b/src/ECON622_BLP.jl @@ -0,0 +1,123 @@ +module ECON622_BLP + +import NLsolve + +export share, delta, Integrate, share! + +include("integrate.jl") + +@doc raw""" + share(δ, Σ, dFν, x) + +Computes shares in random coefficient logit with mean tastes `δ`, observed characteristics `x`, unobserved taste distribution `dFν`, and taste covariances `Σ`. +Assumes there is an outside option with u=0. The outside option has share `1-sum(s)` + +# Arguments + +- `δ` vector of length `J` +- `Σ` `K` by `K` matrix +- `x` `J` by `K` array +- `∫` AbstractIntegrator for integrating over distribution of `ν` + +# Returns + +- vector of length `J` consisting of $s_1$, ..., $s_J$ +""" +function share(δ, Σ, x, ∫::Integrate.AbstractIntegrator) + J,K = size(x) + (length(δ) == J) || error("length(δ)=$(length(δ)) != size(x,1)=$J") + (K,K) == size(Σ) || error("size(x,2)=$K != size(Σ)=$(size(Σ))") + function shareν(ν) + s = δ + x*Σ*ν + smax=max(0,maximum(s)) + return(exp.(s .- smax)./(sum(exp,s .- smax) + exp(-smax))) + end + return(∫(shareν)) +end + +function delta(s, Σ, x, ∫) + δ = similar(s) + delta!(δ, s, Σ, x, ∫) + return(δ) +end + +# to define custom reverse rule for enzyme +function delta!(δ, s, Σ, x, ∫) + eqns! = let xi=x, Σi=Σ, ∫i=∫ + function(F,δ) + F .= s - share(δ,Σi,xi,∫i) + end + end + δ0 = log.(s) .- log(1-sum(s)) + sol=NLsolve.nlsolve(eqns!, δ0, autodiff=:forward, method=:trust_region) + if (sol.residual_norm > 1e-4) + @warn "Possible problem in delta!(...)\n".*"$sol" + end + δ .= sol.zero + return nothing +end + + +import EnzymeCore +import EnzymeCore.EnzymeRules +import EnzymeCore: Const, Duplicated, BatchDuplicated, Active, BatchDuplicatedNoNeed +import EnzymeCore.EnzymeRules: ConfigWidth, AugmentedReturn, needs_primal, needs_shadow +import Enzyme # needed for Enzyme.jacobian +import LinearAlgebra: lu + +function EnzymeRules.forward(f::Const{typeof(delta)}, ::Type{<:Duplicated}, + s::Duplicated, Σ::Const, x::Const, ∫::Const) + δ = f.val(s.val, Σ.val, x.val, ∫.val) + sf(d) = share(d, Σ.val, x.val, ∫.val) + J = Enzyme.jacobian(Enzyme.Forward,sf, δ) + dδ = J \ s.dval + return Duplicated(δ, dδ) +end + +function EnzymeRules.forward(f::Const{typeof(delta)}, RT::Type{<:BatchDuplicated}, + s::BatchDuplicated, Σ::Const, x::Const, ∫::Const) + δ = f.val(s.val, Σ.val, x.val, ∫.val) + sf(d) = share(d, Σ.val, x.val, ∫.val) + J = lu(Enzyme.jacobian(Enzyme.Forward,sf, δ)) + out = BatchDuplicated(δ, map(ds->J \ ds, s.dval)) + return out +end + +function EnzymeRules.forward(f::Const{typeof(delta)}, RT::Type{<:BatchDuplicatedNoNeed}, + s::BatchDuplicated{T,N}, Σ::Const, x::Const, ∫::Const) where {T, N} + δ = f.val(s.val, Σ.val, x.val, ∫.val) + sf(d) = share(d, Σ.val, x.val, ∫.val) + J = lu(Enzyme.jacobian(Enzyme.Forward,sf, δ)) + out = NTuple{N,T}(J \ ds for ds in s.dval) + return out +end + + +function EnzymeRules.augmented_primal(config::ConfigWidth{W}, f::Const{typeof(delta!)}, RT::Type{<:Const}, + δ::Duplicated, s::Duplicated, Σ::Const, x::Const, ∫) where W + f.val(δ.val,s.val, Σ.val, x.val, ∫.val) + tape = (δ = copy(δ.val),s=copy(s.val),Σ=copy(Σ.val)) + @assert !needs_primal(config) && !needs_shadow(config) + return AugmentedReturn(nothing , nothing, tape) +end + +function EnzymeRules.reverse(config::ConfigWidth{W}, f::Const{typeof(delta!)}, dret::Type{<:Const}, tape, + δ::Duplicated, s::Duplicated, Σ::Const, x::Const, ∫) where W + sf(d) =share(d, tape.Σ, x.val, ∫.val) + J = lu(Enzyme.jacobian(Enzyme.Forward,sf, tape.δ)) + s.dval .+= J \ δ.dval + return(nothing, nothing, nothing,nothing,nothing) +end + +function broken!(x) + out = 0 + for i ∈ eachindex(x) + out+=exp(x[i]) + end + x[1] = x[length(x)+1] + return(out) +end + + + +end \ No newline at end of file diff --git a/src/blp.jl b/src/blp.jl deleted file mode 100644 index b8a8022..0000000 --- a/src/blp.jl +++ /dev/null @@ -1,51 +0,0 @@ -module BLP - -import NLsolve - -include("integrate.jl") - -@doc raw""" - share(δ, Σ, dFν, x) - -Computes shares in random coefficient logit with mean tastes `δ`, observed characteristics `x`, unobserved taste distribution `dFν`, and taste covariances `Σ`. -Assumes there is an outside option with u=0. The outside option has share `1-sum(s)` - -# Arguments - -- `δ` vector of length `J` -- `Σ` `K` by `K` matrix -- `x` `J` by `K` array -- `∫` AbstractIntegrator for integrating over distribution of `ν` - -# Returns - -- vector of length `J` consisting of $s_1$, ..., $s_J$ -""" -function share(δ, Σ, x, ∫::Integrate.AbstractIntegrator) - J,K = size(x) - (length(δ) == J) || error("length(δ)=$(length(δ)) != size(x,1)=$J") - (K,K) == size(Σ) || error("size(x,2)=$K != size(Σ)=$(size(Σ))") - function shareν(ν) - s = δ .+ x*Σ*ν - smax=max(0,maximum(s)) - s .-= smax - s .= exp.(s) - s ./= (sum(s) + exp(0-smax)) - return(s) - end - return(∫(shareν)) -end - -function delta(s, Σ, x, ∫) - function eqns!(F,δ) - F .= s - share(δ,Σ,x,∫) - end - δ0 = log.(s) .- log(1-sum(s)) - sol=NLsolve.nlsolve(eqns!, δ0, autodiff=:forward, method=:trust_region) - if (sol.residual_norm > 1e-4) - @warn "Possible problem in delta(s, ...)\n".*"$sol" - end - return(sol.zero) -end - -end \ No newline at end of file diff --git a/src/integrate.jl b/src/integrate.jl index 26b38b6..4c92b85 100644 --- a/src/integrate.jl +++ b/src/integrate.jl @@ -2,25 +2,30 @@ module Integrate using Distributions import Sobol: skip, SobolSeq -import Base.Iterators: take, Repeated +import Base.Iterators: take, Repeated, product, repeated import HCubature: hcubature import LinearAlgebra: cholesky +using FastGaussQuadrature, LinearAlgebra +using SparseGrids + abstract type AbstractIntegrator end -(∫::AbstractIntegrator)(f::Function) = sum(w*f(x) for (w,x) in zip(∫.w, ∫.x)) +(∫::AbstractIntegrator)(f::Function) = #sum(∫.w[i]*f(∫.x[i]) for i in eachindex(∫.x)) + sum(w*f(x) for (x,w) in zip(∫.x, ∫.w)) struct FixedNodeIntegrator{Tx,Tw} <: AbstractIntegrator x::Tx w::Tw end -MonteCarloIntegrator(distribution::Distribution, ndraw=100)=FixedNodeIntegrator([rand(distribution) for i=1:ndraw], Repeated(1/ndraw)) +MonteCarloIntegrator(distribution::Distribution, ndraw=100)= + FixedNodeIntegrator([rand(distribution) for i=1:ndraw], fill(1/ndraw, ndraw)) function QuasiMonteCarloIntegrator(distribution::UnivariateDistribution, ndraws=100) ss = skip(SobolSeq(1), ndraw) x = [quantile(distribution, x) for x in take(ss,ndraw)] - w = Repeated(1/ndraw) + w = fill(1/ndraw, ndraw) #repeated(1/ndraw, ndraw) FixedNodeIntegrator(x,w) end @@ -28,10 +33,26 @@ function QuasiMonteCarloIntegrator(distribution::AbstractMvNormal, ndraw=100) ss = skip(SobolSeq(length(distribution)), ndraw) L = cholesky(distribution.Σ).L x = [L*quantile.(Normal(), x) for x in take(ss,ndraw)] - w = Repeated(1/ndraw) + w = fill(1/ndraw, ndraw) #repeated(1/ndraw, ndraw) FixedNodeIntegrator(x,w) end +function QuadratureIntegrator(dx::AbstractMvNormal; ndraw=100) + n = Int(ceil(ndraw^(1/length(dx)))) + x, w = gausshermite(n) + L = cholesky(dx.Σ).L + x = vec([√2*L*vcat(xs...) + dx.μ for xs in product(repeated(x,length(dx))...)]) + w = vec([prod(ws)/π^(length(dx)/2) for ws in product(repeated(w, length(dx))...)]) + FixedNodeIntegrator(x,w) +end + +function SparseGridIntegrator(dx::AbstractMvNormal; order=5) + X, W = sparsegrid(length(dx), order, gausshermite, sym=true) + L = cholesky(dx.Σ).L + X = [√2*L*x + dx.μ for x ∈ X] + W /= π^(length(dx)/2) + FixedNodeIntegrator(X,W) +end struct AdaptiveIntegrator{FE,FT,FJ,A,L} <: AbstractIntegrator eval::FE @@ -41,8 +62,6 @@ struct AdaptiveIntegrator{FE,FT,FJ,A,L} <: AbstractIntegrator limits::L end -(∫::AdaptiveIntegrator)(f::Function) = ∫.eval(t->f(∫.transform(t))*∫.detJ(t), ∫.limits...; ∫.args...)[1] - function AdaptiveIntegrator(dist::AbstractMvNormal; eval=hcubature, options=()) D = length(dist) x(t) = t./(1 .- t.^2) @@ -52,4 +71,6 @@ function AdaptiveIntegrator(dist::AbstractMvNormal; eval=hcubature, options=()) AdaptiveIntegrator(hcubature,x,Dx,args, limits) end +(∫::AdaptiveIntegrator)(f::Function) = ∫.eval(t->f(∫.transform(t))*∫.detJ(t), ∫.limits...; ∫.args...)[1] + end \ No newline at end of file diff --git a/test/Project.toml b/test/Project.toml new file mode 100644 index 0000000..00a2580 --- /dev/null +++ b/test/Project.toml @@ -0,0 +1,10 @@ +[deps] +AbbreviatedStackTraces = "ac637c84-cc71-43bf-9c33-c1b4316be3d4" +Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" +Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" +FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" diff --git a/test/runtests.jl b/test/runtests.jl index b0374c4..6735f14 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,39 +1,66 @@ -using Test +using Test, TestItemRunner -@testset "integrator" begin - # include("../src/integrate.jl") # for interactive execution - using Distributions +#= + # Just do this once +import Pkg +Pkg.activate() +Pkg.develop(path=normpath(joinpath(@__DIR__, ".."))) +Pkg.activate(@__DIR__) + =# + + +@testitem "integrator" begin + import ECON622_BLP as BLP + import ECON622_BLP: Integrate + using Distributions, LinearAlgebra + # includet("../src/integrate.jl") # for interactive execution + dimx = 3 A = rand(dimx,dimx) - Σ = A*A' + Σ = A*A' + I dx = MvNormal(zeros(dimx), Σ) - ∫a = Integrate.AdaptiveIntegrator(dx, options=(rtol=1e-6,initdiv=3)) + ∫a = Integrate.AdaptiveIntegrator(dx, options=(rtol=1e-4,initdiv=3)) V = ∫a(x->x*x') @test isapprox(V, Σ, rtol=1e-5) + f(x) = exp(x[1])/sum(exp.(x)) val = ∫a(f) - for N ∈ [1_000, 10_000, 100_000] + for N ∈ [1_000, 10_000] ∫mc = Integrate.MonteCarloIntegrator(dx, N) ∫qmc = Integrate.QuasiMonteCarloIntegrator(dx,N) @test isapprox(∫mc(x->x*x'), Σ, rtol=10/sqrt(N)) @test isapprox(∫qmc(x->x*x'), Σ, rtol=10/sqrt(N)) + + @test isapprox(∫mc(f),val,rtol=2/sqrt(N)) + @test isapprox(∫qmc(f),val,rtol=2/sqrt(N)) + end + for N ∈ [25, 100, 200] + ∫ = Integrate.QuadratureIntegrator(dx,ndraw=N) + n=ceil(N^(1/dimx)) + @test isapprox(∫(x->x*x'), Σ, rtol=1/2^n) + @test isapprox(∫(f),val,rtol=1/2^n) + end - f(x) = exp(x[1])/sum(exp.(x)) - @test isapprox(∫mc(f),val,rtol=1/sqrt(N)) - @test isapprox(∫qmc(f),val,rtol=1/sqrt(N)) + for order ∈ [3, 5, 8, 10] + ∫ = Integrate.SparseGridIntegrator(dx,order=order) + n=length(∫.w) + @test isapprox(∫(x->x*x'), Σ, rtol=1/n) + @test isapprox(∫(f),val,rtol=log(n)/n) end end -@testset "share=δ⁻¹" - # include("../src/blp.jl") - using LinearAlgebra +@testitem "share=δ⁻¹" begin + import ECON622_BLP as BLP + import ECON622_BLP: Integrate + using Distributions, LinearAlgebra + J = 4 dimx=2 dx = MvNormal(dimx, 1.0) Σ = [1 0.5; 0.5 1] - N = 1_000 - ∫ = BLP.Integrate.QuasiMonteCarloIntegrator(dx, N) + N = 50 + ∫ = Integrate.QuasiMonteCarloIntegrator(dx, N) X = [(-1.).^(1:J) 1:J] δ = collect((1:J)./J) s = BLP.share(δ,Σ,X,∫) @@ -45,10 +72,56 @@ end X = rand(J, dimx) dx = MvNormal(dimx, 1.0) Σ = I + ones(dimx,dimx) - ∫ = BLP.Integrate.QuasiMonteCarloIntegrator(dx, N) + ∫ = Integrate.QuasiMonteCarloIntegrator(dx, N) δ = 1*rand(J) s = BLP.share(δ,Σ,X,∫) d = BLP.delta(s, Σ, X, ∫) - @test isapprox(d, δ, rtol=1e-6) - + @test isapprox(d, δ, rtol=1e-6) + + @testset "hard delta" begin + δ = 5*rand(J) + @show s = BLP.share(δ,Σ,X,∫) + d = BLP.delta(s, Σ, X, ∫) + @test_skip isapprox(d, δ, rtol=1e-4) + end +end + +@testitem "differentialibility" begin + import ECON622_BLP as BLP + import ECON622_BLP: Integrate + using Distributions, LinearAlgebra + + import FiniteDiff, ForwardDiff, Enzyme, Zygote + import Enzyme: Active, Const, Duplicated + + J = 4 + dimx=2 + dx = MvNormal(dimx, 1.0) + Σ = [1 0.5; 0.5 1] + ∫ = Integrate.QuasiMonteCarloIntegrator(dx, 40) + X = [(-1.).^(1:J) 1:J] + δ = collect((1:J)./J) + s = BLP.share(δ, Σ, X, ∫) + sharefn = d->BLP.share(d,Σ,X,∫) + @testset "share function" begin + Jfd = FiniteDiff.finite_difference_jacobian(sharefn, δ) + @test isapprox(Jfd, ForwardDiff.jacobian(sharefn, δ), rtol=1e-4) + @test isapprox(Jfd, Zygote.jacobian(sharefn, δ)[1], rtol=1e-4) + Enzyme.API.runtimeActivity!(true) + Jer = Enzyme.jacobian(Enzyme.Reverse, sharefn, δ, Val(J)) + @test isapprox(Jfd, Jer, rtol=1e-4) + Jef = Enzyme.jacobian(Enzyme.Forward, sharefn, δ, Val(J)) + @test isapprox(Jfd, Jef, rtol=1e-4) + end + + @testset "delta function" begin + dfn = s->BLP.delta(s,Σ,X,∫) + Jfd = FiniteDiff.finite_difference_jacobian(dfn, s) + @test isapprox(Jfd, ForwardDiff.jacobian(dfn, s), rtol=1e-4) + @test isapprox(Jfd, Enzyme.jacobian(Enzyme.Forward, dfn, s), rtol=1e-4) + @test isapprox(Jfd, Enzyme.jacobian(Enzyme.Reverse, dfn, s, Val(J)), rtol=1e-4) + end + end + +@run_package_tests