-
Notifications
You must be signed in to change notification settings - Fork 227
Support DPPL 0.37 #2550
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Support DPPL 0.37 #2550
Changes from all commits
6e32434
7948bc5
7607955
05944ac
eed3b09
a344c70
9b134a7
6c62c76
fe93769
4ee5649
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -190,16 +190,7 @@ function DynamicPPL.initialstep( | |
# Create a Hamiltonian. | ||
metricT = getmetricT(spl.alg) | ||
metric = metricT(length(theta)) | ||
ldf = DynamicPPL.LogDensityFunction( | ||
model, | ||
vi, | ||
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we | ||
# need to pass in the sampler? (In fact LogDensityFunction defaults to | ||
# using leafcontext(model.context) so could we just remove the argument | ||
# entirely?) | ||
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)); | ||
adtype=spl.alg.adtype, | ||
) | ||
ldf = DynamicPPL.LogDensityFunction(model, vi; adtype=spl.alg.adtype) | ||
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf) | ||
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf) | ||
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func) | ||
|
@@ -214,7 +205,7 @@ function DynamicPPL.initialstep( | |
theta = vi[:] | ||
|
||
# Cache current log density. | ||
log_density_old = getlogp(vi) | ||
log_density_old = getloglikelihood(vi) | ||
|
||
# Find good eps if not provided one | ||
if iszero(spl.alg.ϵ) | ||
|
@@ -242,10 +233,12 @@ function DynamicPPL.initialstep( | |
# Update `vi` based on acceptance | ||
if t.stat.is_accept | ||
vi = DynamicPPL.unflatten(vi, t.z.θ) | ||
vi = setlogp!!(vi, t.stat.log_density) | ||
# TODO(mhauru) Is setloglikelihood! the right thing here? | ||
vi = setloglikelihood!!(vi, t.stat.log_density) | ||
else | ||
vi = DynamicPPL.unflatten(vi, theta) | ||
vi = setlogp!!(vi, log_density_old) | ||
# TODO(mhauru) Is setloglikelihood! the right thing here? | ||
vi = setloglikelihood!!(vi, log_density_old) | ||
Comment on lines
-245
to
+241
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not fully sure what to do here, There is an argument that we should re-evaluate the model anyway because |
||
end | ||
|
||
transition = Transition(model, vi, t) | ||
|
@@ -290,7 +283,8 @@ function AbstractMCMC.step( | |
vi = state.vi | ||
if t.stat.is_accept | ||
vi = DynamicPPL.unflatten(vi, t.z.θ) | ||
vi = setlogp!!(vi, t.stat.log_density) | ||
# TODO(mhauru) Is setloglikelihood! the right thing here? | ||
vi = setloglikelihood!!(vi, t.stat.log_density) | ||
end | ||
|
||
# Compute next transition and state. | ||
|
@@ -302,16 +296,7 @@ end | |
|
||
function get_hamiltonian(model, spl, vi, state, n) | ||
metric = gen_metric(n, spl, state) | ||
ldf = DynamicPPL.LogDensityFunction( | ||
model, | ||
vi, | ||
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we | ||
# need to pass in the sampler? (In fact LogDensityFunction defaults to | ||
# using leafcontext(model.context) so could we just remove the argument | ||
# entirely?) | ||
DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context)); | ||
adtype=spl.alg.adtype, | ||
) | ||
ldf = DynamicPPL.LogDensityFunction(model, vi; adtype=spl.alg.adtype) | ||
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf) | ||
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf) | ||
return AHMC.Hamiltonian(metric, lp_func, lp_grad_func) | ||
|
@@ -516,10 +501,6 @@ function DynamicPPL.assume( | |
return DynamicPPL.assume(dist, vn, vi) | ||
end | ||
|
||
function DynamicPPL.observe(::Sampler{<:Hamiltonian}, d::Distribution, value, vi) | ||
return DynamicPPL.observe(d, value, vi) | ||
end | ||
|
||
#### | ||
#### Default HMC stepsize and mass matrix adaptor | ||
#### | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As established in (e.g.) TuringLang/DynamicPPL.jl#955 (comment) SamplingContext for Hamiltonians was never overloaded so it is equivalent to just use DefaultContext in the LDF.