Running Pathfinder on Turing.jl models

This tutorial demonstrates how Turing can be used with Pathfinder.

We'll demonstrate with a regression example.

using AdvancedHMC, LinearAlgebra, Pathfinder, Random, Turing
Random.seed!(39)

@model function regress(x, y)
    α ~ Normal()
    β ~ Normal()
    σ ~ truncated(Normal(); lower=0)
    y .~ Normal.(α .+ β .* x, σ)
end
x = 0:0.1:10
y = @. 2x + 1.5 + randn() * 0.2
model = regress(collect(x), y)
DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}(Main.regress, (x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9  …  9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0], y = [1.6664116869390373, 1.891497101913886, 1.7384305108256857, 2.2187296181842964, 2.3732885334405034, 2.769366183240351, 2.6402850816920136, 2.8316802176647675, 2.6147284441437417, 3.001320964172886  …  19.676131789844952, 20.12746375915987, 20.081681871812364, 20.2537624581252, 20.430828353298363, 20.75518547480482, 20.85047170077969, 21.424483267847467, 21.63606510912814, 21.12808223508428]), NamedTuple(), DynamicPPL.DefaultContext())

The first way we can use Turing with Pathfinder is via its mode estimation functionality. We can use Turing.optim_function to generate a SciMLBase.OptimizationFunction, which pathfinder and multipathfinder can take as inputs.

fun = optim_function(model, MAP(); constrained=false)
(func = SciMLBase.OptimizationFunction{true, SciMLBase.NoAD, Turing.Optimisation.var"#l#4"{DynamicPPL.LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}}, Turing.Optimisation.var"#3#5"{DynamicPPL.LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED_NO_TIME), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing}(Turing.Optimisation.var"#l#4"{DynamicPPL.LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}}(DynamicPPL.LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}(DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}((α = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(α => 1), [α], UnitRange{Int64}[1:1], [-0.306337066942797], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), β = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(β => 1), [β], UnitRange{Int64}[1:1], [1.4263375219406673], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), σ = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(σ => 1), [σ], UnitRange{Int64}[1:1], [-0.1589308206098669], Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}[Truncated(Distributions.Normal{Float64}(μ=0.0, σ=1.0); lower=0.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1]))), Base.RefValue{Float64}(-1802.3124239176739), Base.RefValue{Int64}(1)), DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}(Main.regress, (x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9  …  9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0], y = [1.6664116869390373, 1.891497101913886, 1.7384305108256857, 2.2187296181842964, 2.3732885334405034, 2.769366183240351, 2.6402850816920136, 2.8316802176647675, 2.6147284441437417, 3.001320964172886  …  19.676131789844952, 20.12746375915987, 20.081681871812364, 20.2537624581252, 20.430828353298363, 20.75518547480482, 20.85047170077969, 21.424483267847467, 21.63606510912814, 21.12808223508428]), NamedTuple(), DynamicPPL.DefaultContext()), Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}(DynamicPPL.DefaultContext()))), SciMLBase.NoAD(), Turing.Optimisation.var"#3#5"{DynamicPPL.LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}}(DynamicPPL.LogDensityFunction{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}}(DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}((α = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(α => 1), [α], UnitRange{Int64}[1:1], [-0.306337066942797], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), β = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(β => 1), [β], UnitRange{Int64}[1:1], [1.4263375219406673], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), σ = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(σ => 1), [σ], UnitRange{Int64}[1:1], [-0.1589308206098669], Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}[Truncated(Distributions.Normal{Float64}(μ=0.0, σ=1.0); lower=0.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1]))), Base.RefValue{Float64}(-1802.3124239176739), Base.RefValue{Int64}(1)), DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}(Main.regress, (x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9  …  9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0], y = [1.6664116869390373, 1.891497101913886, 1.7384305108256857, 2.2187296181842964, 2.3732885334405034, 2.769366183240351, 2.6402850816920136, 2.8316802176647675, 2.6147284441437417, 3.001320964172886  …  19.676131789844952, 20.12746375915987, 20.081681871812364, 20.2537624581252, 20.430828353298363, 20.75518547480482, 20.85047170077969, 21.424483267847467, 21.63606510912814, 21.12808223508428]), NamedTuple(), DynamicPPL.DefaultContext()), Turing.Optimisation.OptimizationContext{DynamicPPL.DefaultContext}(DynamicPPL.DefaultContext()))), nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing, SciMLBase.DEFAULT_OBSERVED_NO_TIME, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing), init = Turing.Optimisation.Init{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.constrained_space{false}}(DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}((α = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(α => 1), [α], UnitRange{Int64}[1:1], [-0.306337066942797], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), β = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(β => 1), [β], UnitRange{Int64}[1:1], [1.4263375219406673], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), σ = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(σ => 1), [σ], UnitRange{Int64}[1:1], [-0.1589308206098669], Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}[Truncated(Distributions.Normal{Float64}(μ=0.0, σ=1.0); lower=0.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1]))), Base.RefValue{Float64}(-1802.3124239176739), Base.RefValue{Int64}(1)), DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}(Main.regress, (x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9  …  9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0], y = [1.6664116869390373, 1.891497101913886, 1.7384305108256857, 2.2187296181842964, 2.3732885334405034, 2.769366183240351, 2.6402850816920136, 2.8316802176647675, 2.6147284441437417, 3.001320964172886  …  19.676131789844952, 20.12746375915987, 20.081681871812364, 20.2537624581252, 20.430828353298363, 20.75518547480482, 20.85047170077969, 21.424483267847467, 21.63606510912814, 21.12808223508428]), NamedTuple(), DynamicPPL.DefaultContext()), Turing.Optimisation.constrained_space{false}()), transform = Turing.Optimisation.ParameterTransform{DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}, DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}, Turing.Optimisation.constrained_space{true}}(DynamicPPL.TypedVarInfo{@NamedTuple{α::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, β::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}, σ::DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}}, Float64}((α = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:α, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:α, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(α => 1), [α], UnitRange{Int64}[1:1], [-0.306337066942797], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), β = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:β, typeof(identity)}, Int64}, Vector{Distributions.Normal{Float64}}, Vector{AbstractPPL.VarName{:β, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(β => 1), [β], UnitRange{Int64}[1:1], [1.4263375219406673], Distributions.Normal{Float64}[Distributions.Normal{Float64}(μ=0.0, σ=1.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1])), σ = DynamicPPL.Metadata{Dict{AbstractPPL.VarName{:σ, typeof(identity)}, Int64}, Vector{Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}}, Vector{AbstractPPL.VarName{:σ, typeof(identity)}}, Vector{Float64}, Vector{Set{DynamicPPL.Selector}}}(Dict(σ => 1), [σ], UnitRange{Int64}[1:1], [-0.1589308206098669], Distributions.Truncated{Distributions.Normal{Float64}, Distributions.Continuous, Float64, Float64, Nothing}[Truncated(Distributions.Normal{Float64}(μ=0.0, σ=1.0); lower=0.0)], Set{DynamicPPL.Selector}[Set()], [0], Dict{String, BitVector}("del" => [0], "trans" => [1]))), Base.RefValue{Float64}(-1802.3124239176739), Base.RefValue{Int64}(1)), DynamicPPL.Model{typeof(Main.regress), (:x, :y), (), (), Tuple{Vector{Float64}, Vector{Float64}}, Tuple{}, DynamicPPL.DefaultContext}(Main.regress, (x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9  …  9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0], y = [1.6664116869390373, 1.891497101913886, 1.7384305108256857, 2.2187296181842964, 2.3732885334405034, 2.769366183240351, 2.6402850816920136, 2.8316802176647675, 2.6147284441437417, 3.001320964172886  …  19.676131789844952, 20.12746375915987, 20.081681871812364, 20.2537624581252, 20.430828353298363, 20.75518547480482, 20.85047170077969, 21.424483267847467, 21.63606510912814, 21.12808223508428]), NamedTuple(), DynamicPPL.DefaultContext()), Turing.Optimisation.constrained_space{true}()))
dim = length(fun.init())
pathfinder(fun.func; dim)
Single-path Pathfinder result
  tries: 1
  draws: 5
  fit iteration: 22 (total: 24)
  fit ELBO: -0.43 ± 0.09
  fit distribution: Distributions.MvNormal{Float64, Pathfinder.WoodburyPDMat{Float64, LinearAlgebra.Diagonal{Float64, Vector{Float64}}, Matrix{Float64}, Matrix{Float64}, Pathfinder.WoodburyPDFactorization{Float64, LinearAlgebra.Diagonal{Float64, Vector{Float64}}, LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}}, LinearAlgebra.UpperTriangular{Float64, Matrix{Float64}}}}, Vector{Float64}}(
dim: 3
μ: [1.4756791506881866, 2.006048098205582, -1.5519095882027991]
Σ: [0.0017463392745146373 -0.00026068112202907415 -1.9065104284984736e-5; -0.00026068112202907415 5.218684490829844e-5 2.2395982072709613e-6; -1.9065104284984736e-5 2.2395982072709613e-6 0.004953445831188703]
)
multipathfinder(fun.func, 1_000; dim, nruns=8)
Multi-path Pathfinder result
  runs: 8
  draws: 1000
  Pareto shape diagnostic: 0.29 (good)

However, for convenience, pathfinder and multipathfinder can take Turing models as inputs and produce MCMCChains.Chains objects as outputs.

result_single = pathfinder(model; ndraws=1_000)
Single-path Pathfinder result
  tries: 1
  draws: 1000
  fit iteration: 30 (total: 32)
  fit ELBO: -0.33 ± 0.11
  fit distribution: Distributions.MvNormal{Float64, Pathfinder.WoodburyPDMat{Float64, LinearAlgebra.Diagonal{Float64, Vector{Float64}}, Matrix{Float64}, Matrix{Float64}, Pathfinder.WoodburyPDFactorization{Float64, LinearAlgebra.Diagonal{Float64, Vector{Float64}}, LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}}, LinearAlgebra.UpperTriangular{Float64, Matrix{Float64}}}}, Vector{Float64}}(
dim: 3
μ: [1.4756791541314018, 2.0060480976814277, -1.55190958821236]
Σ: [0.0017550399152274835 -0.0002618460201236413 -2.8299520917607275e-5; -0.0002618460201236413 5.233685435162255e-5 3.8375211120613815e-6; -2.8299520917607275e-5 3.8375211120613815e-6 0.004958165318687945]
)
result_multi = multipathfinder(model, 1_000; nruns=8)
Multi-path Pathfinder result
  runs: 8
  draws: 1000
  Pareto shape diagnostic: 0.79 (bad)

Here, the Pareto shape diagnostic indicates that it is likely safe to use these draws to compute posterior estimates.

When passed a Model, Pathfinder also gives access to the posterior draws in a familiar MCMCChains.Chains object.

result_multi.draws_transformed
Chains MCMC chain (1000×3×1 Array{Float64, 3}):

Iterations        = 1:1:1000
Number of chains  = 1
Samples per chain = 1000
parameters        = α, β, σ

Summary Statistics
  parameters      mean       std      mcse    ess_bulk   ess_tail      rhat        Symbol   Float64   Float64   Float64     Float64    Float64   Float64    ⋯

           α    1.4731    0.0435    0.0014    938.8256   782.0770    1.0003    ⋯
           β    2.0065    0.0075    0.0002    938.5307   914.2359    0.9995    ⋯
           σ    0.2149    0.0144    0.0004   1047.5625   908.0965    1.0023    ⋯
                                                                1 column omitted

Quantiles
  parameters      2.5%     25.0%     50.0%     75.0%     97.5% 
      Symbol   Float64   Float64   Float64   Float64   Float64 

           α    1.3893    1.4464    1.4728    1.5016    1.5611
           β    1.9909    2.0019    2.0069    2.0111    2.0221
           σ    0.1880    0.2048    0.2148    0.2235    0.2435

We can also use these posterior draws to initialize MCMC sampling.

init_params = collect.(eachrow(result_multi.draws_transformed.value[1:4, :, 1]))
4-element Vector{Vector{Float64}}:
 [1.4772782591056148, 2.005452696361856, 0.21492050861813508]
 [1.4734264142728142, 2.008384318416601, 0.20187094916399045]
 [1.4777152637680282, 2.0065535871925806, 0.22426600320081969]
 [1.5111047121393704, 2.0040351048519147, 0.23346795852358765]
chns = sample(model, Turing.NUTS(), MCMCThreads(), 1_000, 4; init_params, progress=false)
Chains MCMC chain (1000×15×4 Array{Float64, 3}):

Iterations        = 501:1:1500
Number of chains  = 4
Samples per chain = 1000
Wall duration     = 7.01 seconds
Compute duration  = 5.35 seconds
parameters        = α, β, σ
internals         = lp, n_steps, is_accept, acceptance_rate, log_density, hamiltonian_energy, hamiltonian_energy_error, max_hamiltonian_energy_error, tree_depth, numerical_error, step_size, nom_step_size

Summary Statistics
  parameters      mean       std      mcse    ess_bulk    ess_tail      rhat      Symbol   Float64   Float64   Float64     Float64     Float64   Float64   ⋯

           α    1.4734    0.0427    0.0010   1850.9340   1851.2484    1.0048   ⋯
           β    2.0064    0.0074    0.0002   2000.4331   2143.0034    1.0030   ⋯
           σ    0.2166    0.0156    0.0003   2042.5095   2222.5054    1.0024   ⋯
                                                                1 column omitted

Quantiles
  parameters      2.5%     25.0%     50.0%     75.0%     97.5% 
      Symbol   Float64   Float64   Float64   Float64   Float64 

           α    1.3899    1.4453    1.4731    1.5018    1.5568
           β    1.9923    2.0014    2.0064    2.0113    2.0207
           σ    0.1895    0.2054    0.2157    0.2264    0.2501

To use Pathfinder's estimate of the metric and skip warm-up, at the moment one needs to use AdvancedHMC directly.

ℓπ(x) = -fun.func.f(x, nothing)
function ∂ℓπ∂θ(x)
    g = similar(x)
    fun.func.grad(g, x, nothing)
    rmul!(g, -1)
    return ℓπ(x), g
end

ndraws = 1_000
nadapts = 50
inv_metric = result_multi.pathfinder_results[1].fit_distribution.Σ
metric = Pathfinder.RankUpdateEuclideanMetric(inv_metric)
hamiltonian = Hamiltonian(metric, ℓπ, ∂ℓπ∂θ)
ϵ = find_good_stepsize(hamiltonian, init_params[1])
integrator = Leapfrog(ϵ)
kernel = HMCKernel(Trajectory{MultinomialTS}(integrator, GeneralisedNoUTurn()))
adaptor = StepSizeAdaptor(0.8, integrator)
samples, stats = sample(
    hamiltonian,
    kernel,
    init_params[1],
    ndraws + nadapts,
    adaptor,
    nadapts;
    drop_warmup=true,
    progress=false,
)
([[1.4614993606522217, 2.012270580210989, -1.5095604254403407], [1.4226717907757558, 2.014188194297197, -1.4728163742931615], [1.3514335159370776, 2.0241244010309667, -1.4450760180114748], [1.4801806502469723, 2.007388453304671, -1.517567279995825], [1.4442343951934835, 2.0115921047785497, -1.6430740403608055], [1.4381596058678068, 2.008122840729696, -1.427840463962348], [1.489502159395784, 2.0094728503879935, -1.4705783824193928], [1.528000644916985, 2.001203896300999, -1.5854490771334822], [1.4942477479095198, 2.0042834760033093, -1.4257591789799497], [1.4077185737722135, 2.018004012210266, -1.6847116304161316]  …  [1.4464396507473365, 2.0061106431295217, -1.5414485640135875], [1.489180939548136, 2.009942791499547, -1.5774488708852508], [1.4639238201793194, 2.009818942023887, -1.5556958581456228], [1.4435281874090267, 2.0032226748944972, -1.5432907439862344], [1.5766747493559754, 1.9982767358211602, -1.5754310665963995], [1.3824506363258355, 2.017572781731849, -1.4403807734437073], [1.5360734638778126, 1.999568157240875, -1.5332884983175525], [1.5226866413732816, 2.0021726436781253, -1.6086908100764863], [1.4774215933919772, 2.0041074932609164, -1.452223387214546], [1.4776044798860755, 2.003829211490795, -1.4116466151630334]], NamedTuple[(n_steps = 7, is_accept = true, acceptance_rate = 0.9837927572156603, log_density = 7.408170741162024, hamiltonian_energy = -5.169695118653516, hamiltonian_energy_error = -0.004365467951970459, max_hamiltonian_energy_error = 0.05226232127218822, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.9388560020904174, log_density = 6.941311985180742, hamiltonian_energy = -6.119464556617942, hamiltonian_energy_error = -0.03924988457823542, max_hamiltonian_energy_error = 0.13359332677437585, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 3, is_accept = true, acceptance_rate = 0.8524351252641228, log_density = 3.5988375461907154, hamiltonian_energy = -2.9893423714346903, hamiltonian_energy_error = 0.20068849923353804, max_hamiltonian_energy_error = 0.20068849923353804, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.9120198679367945, log_density = 7.955521231040067, hamiltonian_energy = -2.352560383143027, hamiltonian_energy_error = -0.2747722102018195, max_hamiltonian_energy_error = 0.35791133895921634, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.9879442915646696, log_density = 6.947929722737464, hamiltonian_energy = -6.744853581141774, hamiltonian_energy_error = 0.013510397780402528, max_hamiltonian_energy_error = 0.0410923338146203, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 31, is_accept = true, acceptance_rate = 0.7995174365311264, log_density = 6.116923198461538, hamiltonian_energy = -4.139675246294436, hamiltonian_energy_error = 0.20225619002269735, max_hamiltonian_energy_error = 0.3803237557048651, tree_depth = 4, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.9262431773146256, log_density = 6.571269320259917, hamiltonian_energy = -5.236802325930176, hamiltonian_energy_error = 0.1037823766256114, max_hamiltonian_energy_error = 0.18564449169186847, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 15, is_accept = true, acceptance_rate = 0.986800353245087, log_density = 6.9173622971580775, hamiltonian_energy = -6.180141129071025, hamiltonian_energy_error = -0.08872460982336161, max_hamiltonian_energy_error = -0.26805751551856716, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.9999379580760949, log_density = 6.62724160745422, hamiltonian_energy = -5.446376947796333, hamiltonian_energy_error = -0.07674294061347986, max_hamiltonian_energy_error = -0.1479064354061146, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.971217299617754, log_density = 4.361965837717279, hamiltonian_energy = -3.2136637119987053, hamiltonian_energy_error = 0.08283193190907356, max_hamiltonian_energy_error = -0.10571280872680244, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false)  …  (n_steps = 7, is_accept = true, acceptance_rate = 0.9479232145611768, log_density = 7.287945448291346, hamiltonian_energy = -6.857129347762827, hamiltonian_energy_error = 0.08396065085420013, max_hamiltonian_energy_error = -0.11561636059292635, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 3, is_accept = true, acceptance_rate = 0.9308571870317569, log_density = 6.7144711274964735, hamiltonian_energy = -6.124795083273038, hamiltonian_energy_error = 0.08207528119111718, max_hamiltonian_energy_error = -0.1951284893688099, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 3, is_accept = true, acceptance_rate = 0.9804366882818236, log_density = 8.024819283043094, hamiltonian_energy = -6.424222381381306, hamiltonian_energy_error = -0.2775215648473299, max_hamiltonian_energy_error = -0.2775215648473299, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.6926522686099129, log_density = 5.769675738152529, hamiltonian_energy = -5.045493061702855, hamiltonian_energy_error = 0.5260474078934383, max_hamiltonian_energy_error = 0.5260474078934383, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 3, is_accept = true, acceptance_rate = 0.8324650417682236, log_density = 3.0053225526348086, hamiltonian_energy = -1.7256326635229742, hamiltonian_energy_error = 0.35043642104073114, max_hamiltonian_energy_error = -0.5546393473931874, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 7, is_accept = true, acceptance_rate = 0.9807389769967357, log_density = 4.912325434859289, hamiltonian_energy = -1.573725296938897, hamiltonian_energy_error = -0.5889128974155522, max_hamiltonian_energy_error = -1.043893351923411, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 15, is_accept = true, acceptance_rate = 0.9913949975452216, log_density = 6.944761888146639, hamiltonian_energy = -3.4088103530715603, hamiltonian_energy_error = -0.10185841486519198, max_hamiltonian_energy_error = -0.2485511589678593, tree_depth = 3, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 15, is_accept = true, acceptance_rate = 0.9980115650563364, log_density = 6.764964190326862, hamiltonian_energy = -6.3335696892218145, hamiltonian_energy_error = 0.01027200220550295, max_hamiltonian_energy_error = -0.194223973456495, tree_depth = 4, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 23, is_accept = true, acceptance_rate = 0.9040965577961488, log_density = 7.191845645981066, hamiltonian_energy = -5.055764521205811, hamiltonian_energy_error = -0.07093194881149767, max_hamiltonian_energy_error = 0.31943744629084403, tree_depth = 4, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false), (n_steps = 3, is_accept = true, acceptance_rate = 0.9779352546989539, log_density = 6.298606375462178, hamiltonian_energy = -6.2799715497259605, hamiltonian_energy_error = 0.02610711907905028, max_hamiltonian_energy_error = 0.02610711907905028, tree_depth = 2, numerical_error = false, step_size = 0.4158938850542094, nom_step_size = 0.4158938850542094, is_adapt = false)])

Now we pack the samples into an MCMCChains.Chains. For this we use the utility Pathfinder.flattened_varnames_list.

samples_transformed = reduce(vcat, fun.transform.(samples)')
varnames = Pathfinder.flattened_varnames_list(model)
chns = MCMCChains.Chains(samples_transformed, varnames)
Chains MCMC chain (1000×3×1 Array{Float64, 3}):

Iterations        = 1:1:1000
Number of chains  = 1
Samples per chain = 1000
parameters        = α, β, σ

Summary Statistics
  parameters      mean       std      mcse   ess_bulk   ess_tail      rhat   e     Symbol   Float64   Float64   Float64    Float64    Float64   Float64     ⋯

           α    1.4752    0.0435    0.0015   825.4263   787.6426    1.0004     ⋯
           β    2.0061    0.0075    0.0003   596.2646   593.3069    1.0012     ⋯
           σ    0.2152    0.0154    0.0007   541.8789   552.6359    1.0020     ⋯
                                                                1 column omitted

Quantiles
  parameters      2.5%     25.0%     50.0%     75.0%     97.5% 
      Symbol   Float64   Float64   Float64   Float64   Float64 

           α    1.3873    1.4447    1.4756    1.5050    1.5570
           β    1.9911    2.0012    2.0065    2.0113    2.0202
           σ    0.1861    0.2046    0.2145    0.2243    0.2482

See Initializing HMC with Pathfinder for further examples.