Skip to content

Commit

Permalink
Add WrappedBlock, PEImpulseUpdate and fix issues
Browse files Browse the repository at this point in the history
  • Loading branch information
junyuan-chen committed Dec 27, 2023
1 parent d105061 commit 73a599d
Show file tree
Hide file tree
Showing 16 changed files with 444 additions and 69 deletions.
14 changes: 7 additions & 7 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,34 +44,34 @@ SeqJacNonlinearSystemsExt = "NonlinearSystems"
SeqJacRootsExt = "Roots"

[compat]
AdvancedMH = "0.7"
AdvancedMH = "0.7, 0.8"
AutoregressiveModels = "0.1"
BlockArrays = "0.16"
CSV = "0.9, 0.10"
CodecZlib = "0.7"
CommonSolve = "0.2"
ComponentArrays = "0.13"
ComponentArrays = "0.13, 0.14, 0.15"
DataFrames = "1"
Distributions = "0.25"
FFTW = "1"
FastLapackInterface = "1.2"
FillArrays = "0.13"
FastLapackInterface = "1.2, 2"
FillArrays = "0.13, 1"
FiniteDiff = "2"
Graphs = "1.4"
JSON3 = "1"
LogDensityProblems = "2"
LoopVectorization = "0.12.101"
MCMCChains = "5"
MCMCChains = "5, 6"
MacroTools = "0.5"
NLopt = "0.6"
NLopt = "0.6, 1"
NLsolve = "4.5"
NonlinearSystems = "0.1.1"
Requires = "1"
Roots = "1.4, 2"
SplitApplyCombine = "1.2"
StaticArraysCore = "1.4"
StatsAPI = "1.2"
StatsBase = "0.33"
StatsBase = "0.33, 0.34"
StructArrays = "0.6"
Tables = "1"
TransformVariables = "0.8"
Expand Down
10 changes: 9 additions & 1 deletion src/SequenceJacobians.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ using FiniteDiff: JacobianCache, finite_difference_jacobian!, GradientCache,
finite_difference_gradient!, HessianCache, finite_difference_hessian!, default_relstep
using Graphs: AbstractGraph, Edge, SimpleDiGraphFromIterator, topological_sort_by_dfs
using LinearAlgebra: BLAS, LAPACK, I, UniformScaling, Diagonal, LU, lu!, lu, rmul!,
Hermitian, cholesky!, ldiv!, inv!, norm, dot, stride1, diag, diagind
Hermitian, cholesky!, ldiv!, inv!, norm, dot, stride1, diag, diagind,
Diagonal, Bidiagonal
using LogDensityProblems: LogDensityOrder
using MacroTools
using MacroTools: postwalk
Expand Down Expand Up @@ -111,6 +112,8 @@ export supconverged,
block,
steadystate!,
AbstractBlockJacobian,
ShiftBlockJacobian,
MatrixBlockJacobian,
SimpleBlockJacobian,
jacobian,
transition!,
Expand Down Expand Up @@ -198,6 +201,9 @@ export supconverged,
CombinedBlock,
CombinedBlockJacobian,

WrappedBlock,
wrap,

@simple,
@implicit,

Expand All @@ -219,6 +225,7 @@ export supconverged,
aslongtable,

ImpulseUpdate,
PEImpulseUpdate,

BayesianModel,
TransformedBayesianModel,
Expand All @@ -245,6 +252,7 @@ include("hetblock.jl")
include("model.jl")
include("jacobian.jl")
include("combinedblock.jl")
include("wrappedblock.jl")
include("macros.jl")
include("allcov.jl")
include("shock.jl")
Expand Down
41 changes: 36 additions & 5 deletions src/bayesian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ struct BayesianModel{TF, NT, PR<:Tuple, SH<:Tuple, N1, N2, N3,
Ycache::Vector{TF}
nT::Int
Tobs::Int
x::Vector{TF}
dl::Vector{TF}
dlcache::GC
d2l::Matrix{TF}
Expand Down Expand Up @@ -262,6 +263,7 @@ function bayesian(gs::GMaps{TF}, shocks, observables,
allcovcache = FFTWAllCovCache(nT, Nobs, wexo, TF; allcovcachekwargs...)
V = Matrix{TF}(undef, nY, nY)
dl = Vector{TF}(undef, npara)
x = similar(dl)
dlcache = GradientCache{TF,Nothing,Nothing,Vector{TF},fdtype,TF,Val(true)}(
zero(TF), nothing, nothing, similar(dl))
d2l = Matrix{TF}(undef, npara, npara)
Expand All @@ -270,7 +272,7 @@ function bayesian(gs::GMaps{TF}, shocks, observables,
return BayesianModel(shockses, shockparas, strucparas, priors, lookuppara,
exovars, obs, lookupobs, Ref(paravals), paraaxis, u, gs, shocks, Z, G, GZ, SE,
allcovcache, V, measurement_error, Y, Ycache, nT, Tobs,
dl, dlcache, d2l, d2lcache, fdkwargs)
x, dl, dlcache, d2l, d2lcache, fdkwargs)
end

nshock(bm::BayesianModel) = typeof(bm).parameters[5]
Expand Down Expand Up @@ -452,14 +454,33 @@ end
_resize_fdcache!(ca::GradientCache{<:Any,Nothing,Nothing,<:AbstractVector}, N::Int) =
resize!(ca.c3, N)

function _copypara!(tar::Vector, src::NamedTuple)
i = 1
for v in src
if v isa Real
tar[i] = v
i += 1
else
copyto!(tar, i, v)
i += length(v)
end
end
return tar
end

function logposterior_and_gradient!(bm::BayesOrTrans, θ, grad::AbstractVector)
l = logposterior!(bm, θ)
p = parent(bm)
ca = p.dlcache
# Transformation could change the dimension
dimension(bm) <= length(ca.c3) || _resize_fdcache!(ca, dimension(bm))
ca = _update_fdcache(ca, l)
finite_difference_gradient!(grad, bm, θ, ca; p.fdkwargs...)
if θ isa AbstractArray
finite_difference_gradient!(grad, bm, θ, ca; p.fdkwargs...)
else
_copypara!(bm.x, θ)
finite_difference_gradient!(grad, bm, bm.x, ca; p.fdkwargs...)
end
return l, grad
end

Expand All @@ -479,7 +500,7 @@ function (bm::BayesOrTrans)(θ, grad::AbstractVector)
end

capabilities(::Type{<:BayesOrTrans}) = LogDensityOrder{1}()
dimension(bm::BayesianModel) = length(bm.priors)
dimension(bm::BayesianModel) = lastindex(bm.paraaxis)
logdensity(bm::BayesianModel, θ) = logposterior!(bm, θ)

# Automatic differentiation is not applicable and hence use FiniteDiff
Expand All @@ -498,7 +519,12 @@ function logdensity_and_gradient(bm::TransformedBayesianModel, θ)
ca = p.dlcache
dimension(bm) <= length(ca.c3) || _resize_fdcache!(ca, dimension(bm))
ca = _update_fdcache(ca, l)
finite_difference_gradient!(grad, f, θ, ca; p.fdkwargs...)
if θ isa AbstractArray
finite_difference_gradient!(grad, f, θ, ca; p.fdkwargs...)
else
_copypara!(bm.x, θ)
finite_difference_gradient!(grad, f, bm.x, ca; p.fdkwargs...)
end
return l, copy(grad)
end

Expand All @@ -516,7 +542,12 @@ function logdensity_hessian!(bm::BayesOrTrans, θ, h::AbstractMatrix)
_update_fdcache!(ca, θ)
# Share fdkwargs with gradient for simplicity but their keywords are not identical
f = Fix1(logdensity, bm)
finite_difference_hessian!(h, f, θ, ca; p.fdkwargs...)
if θ isa AbstractArray
finite_difference_hessian!(h, f, θ, ca; p.fdkwargs...)
else
_copypara!(bm.x, θ)
finite_difference_hessian!(h, f, bm.x, ca; p.fdkwargs...)
end
return h
end

Expand Down
6 changes: 5 additions & 1 deletion src/block.jl
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ end

abstract type AbstractBlockJacobian{TF} end

abstract type ShiftBlockJacobian{TF} <: AbstractBlockJacobian{TF} end
abstract type MatrixBlockJacobian{TF} <: AbstractBlockJacobian{TF} end

struct ArrayToArgs{CumWidths, StaticArgs}
function ArrayToArgs(cumwidths::NTuple{N,Int}, staticargs::Bool=true) where N
N >= 1 || throw(ArgumentError("length of cumwidths must be at least 1"))
Expand Down Expand Up @@ -145,7 +148,7 @@ end

const PseudoBlockMat{TF, P} = PseudoBlockMatrix{TF, P, Tuple{BlockedUnitRange{Vector{Int64}}, BlockedUnitRange{Vector{Int64}}}}

struct SimpleBlockJacobian{BLK<:SimpleBlock, TF, ins, PF<:PartialF, FD} <: AbstractBlockJacobian{TF}
struct SimpleBlockJacobian{BLK<:SimpleBlock, TF, ins, PF<:PartialF, FD} <: ShiftBlockJacobian{TF}
blk::BLK
J::PseudoBlockMat{TF}
x::Vector{TF}
Expand Down Expand Up @@ -178,6 +181,7 @@ function (j::SimpleBlockJacobian{BLK,TF,ins})(varvals::NamedTuple) where {BLK,TF
_tuple_copyto!(j.g.vals, _hascache(j.g) ? invals[2:end] : invals)
# A cache should not be reached from any source variable
_tuple_copyto!(j.x, map(k->getfield(varvals, k), ins))
# ! 1 allocation happens here?
finite_difference_jacobian!(j.J.blocks, j.g, j.x, j.fdcache)
return j
end
Expand Down
Loading

0 comments on commit 73a599d

Please sign in to comment.