From 0709095d0217a792ef4cff48c87a1172ea58660b Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Wed, 14 Feb 2024 11:40:14 -0600 Subject: [PATCH] Run JuliaFormatter.format() Using JuliaFormatter v1.0.47 --- docs/make.jl | 30 ++++++++--------- docs/pages.jl | 2 +- docs/src/examples/pendulum.md | 10 +++--- docs/src/methods.md | 16 ++++----- src/abc_inference.jl | 29 ++++++++-------- src/dynamichmc_inference.jl | 39 +++++++++++----------- src/stan_inference.jl | 62 +++++++++++++++++------------------ src/turing_inference.jl | 36 ++++++++++---------- test/abc.jl | 14 ++++---- test/dynamicHMC.jl | 28 ++++++++-------- test/runtests.jl | 16 ++++++--- test/stan.jl | 18 +++++----- test/turing.jl | 34 ++++++++++--------- 13 files changed, 173 insertions(+), 161 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index e0220b5f..c9683f26 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,21 +8,19 @@ ENV["GKSwstype"] = "100" include("pages.jl") makedocs(sitename = "DiffEqBayes.jl", - authors = "Chris Rackauckas, Vaibhav Kumar Dixit et al.", - clean = true, - doctest = false, - modules = [DiffEqBayes], - strict = [ - :doctest, - :linkcheck, - :parse_error, - :example_block, - # Other available options are - # :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block - ], - format = Documenter.HTML(assets = ["assets/favicon.ico"], - canonical = "https://docs.sciml.ai/DiffEqBayes/stable/"), - pages = pages) + authors = "Chris Rackauckas, Vaibhav Kumar Dixit et al.", + clean = true, + doctest = false, + modules = [DiffEqBayes], + strict = [ + :doctest, + :linkcheck, + :parse_error, + :example_block # Other available options are # :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block + ], + format = Documenter.HTML(assets = ["assets/favicon.ico"], + canonical = "https://docs.sciml.ai/DiffEqBayes/stable/"), + pages = pages) deploydocs(repo = "github.com/SciML/DiffEqBayes.jl.git"; - push_preview = true) + push_preview = true) diff --git a/docs/pages.jl b/docs/pages.jl index 67264b2a..76bc7e12 100644 --- a/docs/pages.jl +++ b/docs/pages.jl @@ -1,4 +1,4 @@ pages = ["index.md", "Methods" => "methods.md", - "Examples" => ["examples.md", "examples/pendulum.md"], + "Examples" => ["examples.md", "examples/pendulum.md"] ] diff --git a/docs/src/examples/pendulum.md b/docs/src/examples/pendulum.md index eadacaa5..c2231765 100644 --- a/docs/src/examples/pendulum.md +++ b/docs/src/examples/pendulum.md @@ -76,7 +76,7 @@ length of the pendulum L is probably around 3.0: ```@example pendulum priors = [ truncated(Normal(0.1, 1.0), lower = 0.0), - truncated(Normal(3.0, 1.0), lower = 0.0), + truncated(Normal(3.0, 1.0), lower = 0.0) ] ``` @@ -84,7 +84,7 @@ Finally, let's run the estimation routine from DiffEqBayes.jl with the Turing.jl ```@example pendulum bayesian_result = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 10_000, - syms = [:omega, :L]) + syms = [:omega, :L]) ``` Notice that while our guesses had the wrong means, the learned parameters converged @@ -120,15 +120,15 @@ to get better understanding of the performance. ```@example pendulum @btime bayesian_result = turing_inference(prob1, Tsit5(), t, data, priors; - syms = [:omega, :L], num_samples = 10_000) + syms = [:omega, :L], num_samples = 10_000) ``` ```@example pendulum @btime bayesian_result = stan_inference(prob1, t, data, priors; num_samples = 10_000, - print_summary = false) + print_summary = false) ``` ```@example pendulum @btime bayesian_result = dynamichmc_inference(prob1, Tsit5(), t, data, priors; - num_samples = 10_000) + num_samples = 10_000) ``` diff --git a/docs/src/methods.md b/docs/src/methods.md index 1eb303cb..28a029f7 100644 --- a/docs/src/methods.md +++ b/docs/src/methods.md @@ -12,9 +12,9 @@ using DiffEqBayes ```julia stan_inference(prob::ODEProblem, t, data, priors = nothing; alg = :rk45, - num_samples = 1000, num_warmups = 1000, reltol = 1e-3, - abstol = 1e-6, maxiter = Int(1e5), likelihood = Normal, - vars = (StanODEData(), InverseGamma(2, 3))) + num_samples = 1000, num_warmups = 1000, reltol = 1e-3, + abstol = 1e-6, maxiter = Int(1e5), likelihood = Normal, + vars = (StanODEData(), InverseGamma(2, 3))) ``` `stan_inference` uses [Stan.jl](https://stanjulia.github.io/CmdStan.jl/latest/INTRO/) @@ -37,8 +37,8 @@ parameter list. ```julia function turing_inference(prob::DiffEqBase.DEProblem, alg, t, data, priors; - likelihood_dist_priors, likelihood, num_samples = 1000, - sampler = Turing.NUTS(num_samples, 0.65), parallel_type = MCMCSerial(), n_chains = 1, syms, kwargs...) + likelihood_dist_priors, likelihood, num_samples = 1000, + sampler = Turing.NUTS(num_samples, 0.65), parallel_type = MCMCSerial(), n_chains = 1, syms, kwargs...) end ``` @@ -55,7 +55,7 @@ type. `num_samples` is the number of samples per MCMC chain. Sampling from multi ```julia dynamichmc_inference(prob::DEProblem, alg, t, data, priors, transformations; - σ = 0.01, ϵ = 0.001, initial = Float64[]) + σ = 0.01, ϵ = 0.001, initial = Float64[]) ``` `dynamichmc_inference` uses [DynamicHMC.jl](https://github.com/tpapp/DynamicHMC.jl) to @@ -71,8 +71,8 @@ parameter values to specific domains. `initial` values for the parameters can be ```julia abc_inference(prob::DEProblem, alg, t, data, priors; ϵ = 0.001, - distancefunction = euclidean, ABCalgorithm = ABCSMC, progress = false, - num_samples = 500, maxiterations = 10^5, kwargs...) + distancefunction = euclidean, ABCalgorithm = ABCSMC, progress = false, + num_samples = 500, maxiterations = 10^5, kwargs...) ``` `abc_inference` uses [ApproxBayes.jl](https://github.com/marcjwilliams1/ApproxBayes.jl), which uses Approximate Bayesian Computation (ABC) to diff --git a/src/abc_inference.jl b/src/abc_inference.jl index bb875540..5b5f47b8 100644 --- a/src/abc_inference.jl +++ b/src/abc_inference.jl @@ -1,5 +1,5 @@ function createabcfunction(prob, t, distancefunction, alg; save_idxs = nothing, - sample_u0 = false, kwargs...) + sample_u0 = false, kwargs...) function simfunc(params, constants, data) local u0 if sample_u0 @@ -14,7 +14,7 @@ function createabcfunction(prob, t, distancefunction, alg; save_idxs = nothing, u0 = prob.u0 end sol = solve(prob, alg, u0 = u0, p = params, saveat = t, save_idxs = save_idxs, - kwargs...) + kwargs...) if size(sol, 2) < length(t) return Inf, nothing else @@ -25,18 +25,19 @@ function createabcfunction(prob, t, distancefunction, alg; save_idxs = nothing, end function abc_inference(prob::DiffEqBase.DEProblem, alg, t, data, priors; ϵ = 0.001, - distancefunction = euclidean, ABCalgorithm = ABCSMC, - progress = false, - num_samples = 500, maxiterations = 10^5, save_idxs = nothing, - sample_u0 = false, parallel = false, kwargs...) - abcsetup = ABCalgorithm(createabcfunction(prob, t, distancefunction, alg; - save_idxs = save_idxs, sample_u0 = sample_u0, - kwargs...), - length(priors), - ϵ, - ApproxBayes.Prior(priors); - nparticles = num_samples, - maxiterations = maxiterations) + distancefunction = euclidean, ABCalgorithm = ABCSMC, + progress = false, + num_samples = 500, maxiterations = 10^5, save_idxs = nothing, + sample_u0 = false, parallel = false, kwargs...) + abcsetup = ABCalgorithm( + createabcfunction(prob, t, distancefunction, alg; + save_idxs = save_idxs, sample_u0 = sample_u0, + kwargs...), + length(priors), + ϵ, + ApproxBayes.Prior(priors); + nparticles = num_samples, + maxiterations = maxiterations) abcresult = runabc(abcsetup, data, progress = progress, parallel = parallel) return abcresult diff --git a/src/dynamichmc_inference.jl b/src/dynamichmc_inference.jl index 500ac14e..63849d0c 100644 --- a/src/dynamichmc_inference.jl +++ b/src/dynamichmc_inference.jl @@ -44,12 +44,13 @@ function (P::DynamicHMCPosterior)(θ) end end _saveat = t === nothing ? Float64[] : t - sol = solve(problem, algorithm; u0 = u0, p = p, saveat = _saveat, save_idxs = save_idxs, - solve_kwargs...) + sol = solve( + problem, algorithm; u0 = u0, p = p, saveat = _saveat, save_idxs = save_idxs, + solve_kwargs...) failure = size(sol, 2) < length(_saveat) failure && return T(0) * sum(σ) + T(-Inf) log_likelihood = sum(sum(map(logpdf, Normal.(0.0, σ), sol[:, i] .- data[:, i])) - for (i, t) in enumerate(t)) + for (i, t) in enumerate(t)) log_prior_parameters = sum(map(logpdf, parameter_priors, parameters)) log_prior_σ = sum(map(logpdf, σ_priors, σ)) log_likelihood + log_prior_parameters + log_prior_σ @@ -91,24 +92,24 @@ posterior values (transformed from `ℝⁿ`). - `mcmc_kwargs` are passed on as keyword arguments to `DynamicHMC.mcmc_with_warmup` """ function dynamichmc_inference(problem::DiffEqBase.DEProblem, algorithm, t, data, - parameter_priors, - parameter_transformations = as(Vector, asℝ₊, - length(parameter_priors)); - σ_priors = fill(Normal(0, 5), size(data, 1)), - sample_u0 = false, rng = Random.GLOBAL_RNG, - num_samples = 1000, AD_gradient_kind = Val(:ForwardDiff), - save_idxs = nothing, solve_kwargs = (), - mcmc_kwargs = (initialization = (q = zeros(length(parameter_priors) + - (save_idxs === - nothing ? - length(data[:, 1]) : - length(save_idxs))),),)) + parameter_priors, + parameter_transformations = as(Vector, asℝ₊, + length(parameter_priors)); + σ_priors = fill(Normal(0, 5), size(data, 1)), + sample_u0 = false, rng = Random.GLOBAL_RNG, + num_samples = 1000, AD_gradient_kind = Val(:ForwardDiff), + save_idxs = nothing, solve_kwargs = (), + mcmc_kwargs = (initialization = (q = zeros(length(parameter_priors) + + (save_idxs === + nothing ? + length(data[:, 1]) : + length(save_idxs))),),)) P = DynamicHMCPosterior(; algorithm = algorithm, problem = problem, t = t, data = data, - parameter_priors = parameter_priors, σ_priors = σ_priors, - solve_kwargs = solve_kwargs, sample_u0 = sample_u0, - save_idxs = save_idxs) + parameter_priors = parameter_priors, σ_priors = σ_priors, + solve_kwargs = solve_kwargs, sample_u0 = sample_u0, + save_idxs = save_idxs) trans = as((parameters = parameter_transformations, - σ = as(Vector, asℝ₊, length(σ_priors)))) + σ = as(Vector, asℝ₊, length(σ_priors)))) ℓ = TransformedLogDensity(trans, P) ∇ℓ = LogDensityProblemsAD.ADgradient(AD_gradient_kind, ℓ) results = mcmc_with_warmup(rng, ∇ℓ, num_samples; mcmc_kwargs...) diff --git a/src/stan_inference.jl b/src/stan_inference.jl index fcf80327..0cec0bcc 100644 --- a/src/stan_inference.jl +++ b/src/stan_inference.jl @@ -20,7 +20,7 @@ function generate_priors(n, priors) else for i in 1:n priors_string = string(priors_string, "theta_$i ~ ", stan_string(priors[i]), - ";") + ";") end end priors_string @@ -39,7 +39,7 @@ function generate_theta(n, priors) end if lower_bound != "" && upper_bound != "" theta = string(theta, "real", "<$lower_bound", ",", "$upper_bound>", - " theta_$i", ";") + " theta_$i", ";") elseif lower_bound != "" theta = string(theta, "real", "<$lower_bound", ">", " theta_$i", ";") elseif upper_bound != "" @@ -52,23 +52,23 @@ function generate_theta(n, priors) end function stan_inference(prob::DiffEqBase.DEProblem, - # Positional arguments - t, data, priors = nothing, stanmodel = nothing; - # DiffEqBayes keyword arguments - likelihood = Normal, vars = (StanODEData(), InverseGamma(3, 3)), - sample_u0 = false, save_idxs = nothing, diffeq_string = nothing, - # Stan differential equation function keyword arguments - alg = :rk45, reltol = 1e-3, abstol = 1e-6, maxiter = Int(1e5), - # stan_sample keyword arguments - num_samples = 1000, num_warmups = 1000, - num_cpp_chains = 1, num_chains = 1, num_threads = 1, - delta = 0.8, - # read_samples arguments - output_format = :mcmcchains, - # read_summary arguments - print_summary = true, - # pass in existing tmpdir - tmpdir = mktempdir()) + # Positional arguments + t, data, priors = nothing, stanmodel = nothing; + # DiffEqBayes keyword arguments + likelihood = Normal, vars = (StanODEData(), InverseGamma(3, 3)), + sample_u0 = false, save_idxs = nothing, diffeq_string = nothing, + # Stan differential equation function keyword arguments + alg = :rk45, reltol = 1e-3, abstol = 1e-6, maxiter = Int(1e5), + # stan_sample keyword arguments + num_samples = 1000, num_warmups = 1000, + num_cpp_chains = 1, num_chains = 1, num_threads = 1, + delta = 0.8, + # read_samples arguments + output_format = :mcmcchains, + # read_summary arguments + print_summary = true, + # pass in existing tmpdir + tmpdir = mktempdir()) save_idxs !== nothing && length(save_idxs) == 1 ? save_idxs = save_idxs[1] : save_idxs = save_idxs length_of_y = length(prob.u0) @@ -113,7 +113,7 @@ function stan_inference(prob::DiffEqBase.DEProblem, hyper_params = string(hyper_params, "sigma$(i-1) ~ $dist;") tuple_hyper_params = string(tuple_hyper_params, "sigma$(i-1)", ",") setup_params = string(setup_params, - "row_vector[$(length(save_idxs))] sigma$(i-1);") + "row_vector[$(length(save_idxs))] sigma$(i-1);") end end @@ -157,12 +157,12 @@ function stan_inference(prob::DiffEqBase.DEProblem, if isnothing(diffeq_string) diffeq_string = ModelingToolkit.build_function(ModelingToolkit.equations(sys), - ModelingToolkit.states(sys), - ModelingToolkit.parameters(sys), - ModelingToolkit.get_iv(sys); - expression = Val{true}, - fname = :sho, - target = ModelingToolkit.StanTarget()) + ModelingToolkit.states(sys), + ModelingToolkit.parameters(sys), + ModelingToolkit.get_iv(sys); + expression = Val{true}, + fname = :sho, + target = ModelingToolkit.StanTarget()) end parameter_estimation_model = " @@ -192,16 +192,16 @@ function stan_inference(prob::DiffEqBase.DEProblem, " stanmodel = SampleModel("parameter_estimation_model", - parameter_estimation_model, - tmpdir) + parameter_estimation_model, + tmpdir) end data = Dict("u0" => prob.u0, "T" => length(t), - "internal_var___u" => view(data, :, 1:length(t)), - "t0" => prob.tspan[1], "ts" => t) + "internal_var___u" => view(data, :, 1:length(t)), + "t0" => prob.tspan[1], "ts" => t) @time rc = stan_sample(stanmodel; data, num_threads, num_cpp_chains, - num_samples, num_warmups, num_chains, delta) + num_samples, num_warmups, num_chains, delta) if success(rc) return StanResult(stanmodel, rc, read_samples(stanmodel, output_format)) diff --git a/src/turing_inference.jl b/src/turing_inference.jl index dab77121..a7aa8a93 100644 --- a/src/turing_inference.jl +++ b/src/turing_inference.jl @@ -1,21 +1,21 @@ function turing_inference(prob::DiffEqBase.DEProblem, - alg, - t, - data, - priors; - likelihood_dist_priors = [InverseGamma(2, 3)], - likelihood = (u, p, t, σ) -> MvNormal(u, - Diagonal((σ[1])^2 * - ones(length(u)))), - num_samples = 1000, - sampler = Turing.NUTS(0.65), - parallel_type = MCMCSerial(), - n_chains = 1, - syms = [Turing.@varname(theta[i]) for i in 1:length(priors)], - sample_u0 = false, - save_idxs = nothing, - progress = false, - kwargs...) + alg, + t, + data, + priors; + likelihood_dist_priors = [InverseGamma(2, 3)], + likelihood = (u, p, t, σ) -> MvNormal(u, + Diagonal((σ[1])^2 * + ones(length(u)))), + num_samples = 1000, + sampler = Turing.NUTS(0.65), + parallel_type = MCMCSerial(), + n_chains = 1, + syms = [Turing.@varname(theta[i]) for i in 1:length(priors)], + sample_u0 = false, + save_idxs = nothing, + progress = false, + kwargs...) N = length(priors) Turing.@model function infer(x, ::Type{T} = Float64) where {T <: Real} theta = Vector{T}(undef, length(priors)) @@ -37,7 +37,7 @@ function turing_inference(prob::DiffEqBase.DEProblem, end _saveat = t === nothing ? Float64[] : t sol = solve(prob, alg; u0 = u0, p = p, saveat = _saveat, progress = progress, - save_idxs = save_idxs, kwargs...) + save_idxs = save_idxs, kwargs...) failure = size(sol, 2) < length(_saveat) if failure diff --git a/test/abc.jl b/test/abc.jl index 962b867b..d89882b5 100644 --- a/test/abc.jl +++ b/test/abc.jl @@ -17,13 +17,13 @@ data = convert(Array, randomized) priors = [Normal(1.5, 0.01)] bayesian_result = abc_inference(prob1, Tsit5(), t, data, priors; - num_samples = 500, ϵ = 0.001) + num_samples = 500, ϵ = 0.001) @test mean(bayesian_result.parameters, weights(bayesian_result.weights))≈1.5 atol=0.1 priors = [Normal(1.0, 0.01), Normal(1.0, 0.01), Normal(1.5, 0.01)] bayesian_result = abc_inference(prob1, Tsit5(), t, data, priors; - num_samples = 500, ϵ = 0.001, sample_u0 = true) + num_samples = 500, ϵ = 0.001, sample_u0 = true) meanvals = mean(bayesian_result.parameters, weights(bayesian_result.weights), 1) @test meanvals[1]≈1.0 atol=0.1 @@ -35,14 +35,14 @@ randomized = VectorOfArray([(sol(t[i]) + 0.01randn(1)) for i in 1:length(t)]) data = convert(Array, randomized) priors = [Normal(1.5, 0.01)] bayesian_result = abc_inference(prob1, Tsit5(), t, data, priors; - num_samples = 500, ϵ = 0.001, save_idxs = [1]) + num_samples = 500, ϵ = 0.001, save_idxs = [1]) @test mean(bayesian_result.parameters, weights(bayesian_result.weights))≈1.5 atol=0.1 priors = [Normal(1.0, 0.01), Normal(1.5, 0.01)] bayesian_result = abc_inference(prob1, Tsit5(), t, data, priors; - num_samples = 500, ϵ = 0.001, sample_u0 = true, - save_idxs = [1]) + num_samples = 500, ϵ = 0.001, sample_u0 = true, + save_idxs = [1]) meanvals = mean(bayesian_result.parameters, weights(bayesian_result.weights), 1) @test meanvals[1]≈1.0 atol=0.1 @@ -63,8 +63,8 @@ distfn = function (d1, d2) end priors = [Normal(1.5, 0.01)] bayesian_result = abc_inference(prob1, Tsit5(), t, data, priors; - num_samples = 500, ϵ = 0.001, - distancefunction = distfn) + num_samples = 500, ϵ = 0.001, + distancefunction = distfn) @test mean(bayesian_result.parameters, weights(bayesian_result.weights))≈1.5 atol=0.1 diff --git a/test/dynamicHMC.jl b/test/dynamicHMC.jl index 530b2d9a..8f1c93b3 100644 --- a/test/dynamicHMC.jl +++ b/test/dynamicHMC.jl @@ -21,14 +21,14 @@ randomized = VectorOfArray([(sol(t[i]) + σ * randn(2)) for i in 1:length(t)]) data = convert(Array, randomized) mcmc_kwargs = (initialization = (q = zeros(1 + 2),), reporter = reporter) bayesian_result = dynamichmc_inference(prob1, Tsit5(), t, data, (Normal(1.5, 1),), - as(Vector, asℝ₊, 1), mcmc_kwargs = mcmc_kwargs) + as(Vector, asℝ₊, 1), mcmc_kwargs = mcmc_kwargs) @test mean(p.parameters[1] for p in bayesian_result.posterior)≈p[1] atol=0.1 priors = [Normal(1.0, 0.01), Normal(1.0, 0.01), Normal(1.5, 0.01)] mcmc_kwargs = (initialization = (q = zeros(3 + 2),), reporter = reporter) bayesian_result = dynamichmc_inference(prob1, Tsit5(), t, data, priors, - as(Vector, asℝ₊, 3), mcmc_kwargs = mcmc_kwargs, - sample_u0 = true) + as(Vector, asℝ₊, 3), mcmc_kwargs = mcmc_kwargs, + sample_u0 = true) @test mean(p.parameters[1] for p in bayesian_result.posterior)≈1.0 atol=0.1 @test mean(p.parameters[2] for p in bayesian_result.posterior)≈1.0 atol=0.1 @@ -40,15 +40,15 @@ data = convert(Array, randomized) mcmc_kwargs = (initialization = (q = zeros(1 + 1),), reporter = reporter) bayesian_result = dynamichmc_inference(prob1, Tsit5(), t, data, (Normal(1.5, 1),), - as(Vector, asℝ₊, 1), mcmc_kwargs = mcmc_kwargs, - save_idxs = [1]) + as(Vector, asℝ₊, 1), mcmc_kwargs = mcmc_kwargs, + save_idxs = [1]) @test mean(p.parameters[1] for p in bayesian_result.posterior)≈p[1] atol=0.1 priors = [Normal(1.0, 0.001), Normal(1.5, 0.001)] mcmc_kwargs = (initialization = (q = zeros(2 + 1),), reporter = reporter) bayesian_result = dynamichmc_inference(prob1, Tsit5(), t, data, priors, - as(Vector, asℝ₊, 2), mcmc_kwargs = mcmc_kwargs, - save_idxs = [1], sample_u0 = true) + as(Vector, asℝ₊, 2), mcmc_kwargs = mcmc_kwargs, + save_idxs = [1], sample_u0 = true) @test mean(p.parameters[1] for p in bayesian_result.posterior)≈1.0 atol=0.1 @test mean(p.parameters[2] for p in bayesian_result.posterior)≈1.5 atol=0.1 @@ -67,8 +67,8 @@ likelihood = function (sol) return l end @test_broken bayesian_result = dynamichmc_inference(prob1, Tsit5(), likelihood, - [truncated(Normal(1.5, 1), 0, 2)], - as((a = asℝ₊,))) + [truncated(Normal(1.5, 1), 0, 2)], + as((a = asℝ₊,))) @test_broken mean(bayesian_result[1][1])≈1.5 atol=1e-1 f1 = @ode_def LotkaVolterraTest4 begin @@ -84,11 +84,11 @@ t = collect(range(1, stop = 10, length = 10)) randomized = VectorOfArray([(sol(t[i]) + 0.01randn(2)) for i in 1:length(t)]) data = convert(Array, randomized) priors = (a = truncated(Normal(1.5, 0.01), 0, 2), - b = truncated(Normal(1.0, 0.01), 0, 1.5), - c = truncated(Normal(3.0, 0.01), 0, 4), - d = truncated(Normal(1.0, 0.01), 0, 2)) + b = truncated(Normal(1.0, 0.01), 0, 1.5), + c = truncated(Normal(3.0, 0.01), 0, 4), + d = truncated(Normal(1.0, 0.01), 0, 2)) mcmc_kwargs = (initialization = (q = zeros(4 + 2), ϵ = 1.0), reporter = reporter, - warmup_stages = default_warmup_stages(; stepsize_search = nothing)) + warmup_stages = default_warmup_stages(; stepsize_search = nothing)) bayesian_result = dynamichmc_inference(prob1, Tsit5(), t, data, priors, - as(Vector, asℝ₊, 4), mcmc_kwargs = mcmc_kwargs) + as(Vector, asℝ₊, 4), mcmc_kwargs = mcmc_kwargs) @test norm(mean([p.parameters for p in bayesian_result.posterior]) .- p, Inf) ≤ 0.1 diff --git a/test/runtests.jl b/test/runtests.jl index a8121aec..e09e7a7a 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -4,12 +4,20 @@ const LONGER_TESTS = false const GROUP = get(ENV, "GROUP", "All") if GROUP == "All" || GROUP == "Core" - @time @safetestset "DynamicHMC" begin include("dynamicHMC.jl") end - @time @safetestset "Turing" begin include("turing.jl") end + @time @safetestset "DynamicHMC" begin + include("dynamicHMC.jl") + end + @time @safetestset "Turing" begin + include("turing.jl") + end # @time @safetestset "ABC" begin include("abc.jl") end end if GROUP == "Stan" || GROUP == "All" - @time @safetestset "Stan_String" begin include("stan_string.jl") end - @time @safetestset "Stan" begin include("stan.jl") end + @time @safetestset "Stan_String" begin + include("stan_string.jl") + end + @time @safetestset "Stan" begin + include("stan.jl") + end end diff --git a/test/stan.jl b/test/stan.jl index 41b2ede0..124fb16a 100644 --- a/test/stan.jl +++ b/test/stan.jl @@ -17,23 +17,23 @@ data = convert(Array, randomized) priors = [truncated(Normal(1.5, 0.1), 1.0, 1.8)] bayesian_result = stan_inference(prob1, t, data, priors; num_samples = 300, - num_warmups = 500, likelihood = Normal) + num_warmups = 500, likelihood = Normal) @test mean(get(bayesian_result.chains, :theta_1)[1])≈1.5 atol=3e-1 # Test norecompile bayesian_result2 = stan_inference(prob1, t, data, priors, bayesian_result.model; - num_samples = 300, num_warmups = 500, likelihood = Normal) + num_samples = 300, num_warmups = 500, likelihood = Normal) @test mean(get(bayesian_result2.chains, :theta_1)[1])≈1.5 atol=3e-1 priors = [ truncated(Normal(1.0, 0.01), 0.5, 2.0), truncated(Normal(1.0, 0.01), 0.5, 2.0), - truncated(Normal(1.5, 0.01), 1.0, 2.0), + truncated(Normal(1.5, 0.01), 1.0, 2.0) ] bayesian_result = stan_inference(prob1, t, data, priors; num_samples = 300, - num_warmups = 500, likelihood = Normal, sample_u0 = true) + num_warmups = 500, likelihood = Normal, sample_u0 = true) @test mean(get(bayesian_result.chains, :theta_1)[1])≈1.0 atol=3e-1 @test mean(get(bayesian_result.chains, :theta_2)[1])≈1.0 atol=3e-1 @@ -44,14 +44,14 @@ randomized = VectorOfArray([(sol(t[i]) + 0.01 * randn(1)) for i in 1:length(t)]) data = convert(Array, randomized) priors = [truncated(Normal(1.5, 0.1), 0.5, 2)] bayesian_result = stan_inference(prob1, t, data, priors; num_samples = 300, - num_warmups = 500, likelihood = Normal, save_idxs = [1]) + num_warmups = 500, likelihood = Normal, save_idxs = [1]) @test mean(get(bayesian_result.chains, :theta_1)[1])≈1.5 atol=3e-1 priors = [truncated(Normal(1.0, 0.01), 0.5, 2), truncated(Normal(1.5, 0.01), 0.5, 2)] bayesian_result = stan_inference(prob1, t, data, priors; num_samples = 300, - num_warmups = 500, likelihood = Normal, save_idxs = [1], - sample_u0 = true) + num_warmups = 500, likelihood = Normal, save_idxs = [1], + sample_u0 = true) @test mean(get(bayesian_result.chains, :theta_1)[1])≈1.0 atol=3e-1 @test mean(get(bayesian_result.chains, :theta_2)[1])≈1.5 atol=3e-1 @@ -73,8 +73,8 @@ priors = [truncated(Normal(1.5, 0.01), 0.5, 2), truncated(Normal(1.0, 0.01), 0.5 truncated(Normal(3.0, 0.01), 0.5, 4), truncated(Normal(1.0, 0.01), 0.5, 2)] bayesian_result = stan_inference(prob1, t, data, priors; num_samples = 100, - num_warmups = 500, - vars = (DiffEqBayes.StanODEData(), InverseGamma(4, 1))) + num_warmups = 500, + vars = (DiffEqBayes.StanODEData(), InverseGamma(4, 1))) @test mean(get(bayesian_result.chains, :theta_1)[1])≈1.5 atol=1e-1 @test mean(get(bayesian_result.chains, :theta_2)[1])≈1.0 atol=1e-1 @test mean(get(bayesian_result.chains, :theta_3)[1])≈3.0 atol=1e-1 diff --git a/test/turing.jl b/test/turing.jl index 9cba1028..a83cb4c1 100644 --- a/test/turing.jl +++ b/test/turing.jl @@ -16,28 +16,32 @@ randomized = VectorOfArray([(sol(t[i]) + 0.01randn(2)) for i in 1:length(t)]) data = convert(Array, randomized) priors = [Normal(1.5, 0.01)] -bayesian_result = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 500, syms = [:a]) +bayesian_result = turing_inference( + prob1, Tsit5(), t, data, priors; num_samples = 500, syms = [:a]) @show bayesian_result @test mean(get(bayesian_result, :a)[1])≈1.5 atol=3e-1 -bayesian_result = turing_inference(prob1, Rosenbrock23(autodiff = false), t, data, priors; num_samples = 500, syms = [:a]) +bayesian_result = turing_inference( + prob1, Rosenbrock23(autodiff = false), t, data, priors; num_samples = 500, syms = [:a]) bayesian_result = turing_inference(prob1, Rosenbrock23(), t, data, priors; - num_samples = 500, - syms = [:a]) + num_samples = 500, + syms = [:a]) # --- test Multithreaded sampling println("Multithreaded case") -result_threaded = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 500, syms = [:a], parallel_type=MCMCThreads(), n_chains=2) +result_threaded = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 500, + syms = [:a], parallel_type = MCMCThreads(), n_chains = 2) @test length(result_threaded.value.axes[3]) == 2 @test mean(get(result_threaded, :a)[1])≈1.5 atol=3e-1 # --- priors = [Normal(1.0, 0.01), Normal(1.0, 0.01), Normal(1.5, 0.01)] -bayesian_result = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 500, sample_u0 = true, syms = [:u1, :u2, :a]) +bayesian_result = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 500, + sample_u0 = true, syms = [:u1, :u2, :a]) @test mean(get(bayesian_result, :a)[1])≈1.5 atol=3e-1 @test mean(get(bayesian_result, :u1)[1])≈1.0 atol=3e-1 @@ -48,14 +52,14 @@ randomized = VectorOfArray([(sol(t[i]) + 0.01 * randn(1)) for i in 1:length(t)]) data = convert(Array, randomized) priors = [Normal(1.5, 0.01)] bayesian_result = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 500, - syms = [:a], save_idxs = [1]) + syms = [:a], save_idxs = [1]) @test mean(get(bayesian_result, :a)[1])≈1.5 atol=3e-1 priors = [Normal(1.0, 0.01), Normal(1.5, 0.01)] bayesian_result = turing_inference(prob1, Tsit5(), t, data, priors; num_samples = 500, - sample_u0 = true, - syms = [:u1, :a], save_idxs = [1]) + sample_u0 = true, + syms = [:u1, :a], save_idxs = [1]) @test mean(get(bayesian_result, :a)[1])≈1.5 atol=3e-1 @test mean(get(bayesian_result, :u1)[1])≈1.0 atol=3e-1 @@ -77,7 +81,7 @@ priors = [truncated(Normal(1.5, 0.01), 0, 2), truncated(Normal(1.0, 0.01), 0, 1. truncated(Normal(3.0, 0.01), 0, 4), truncated(Normal(1.0, 0.01), 0, 2)] bayesian_result = turing_inference(prob2, Tsit5(), t, data, priors; num_samples = 500, - syms = [:a, :b, :c, :d]) + syms = [:a, :b, :c, :d]) @show bayesian_result @@ -103,10 +107,10 @@ s_sol = solve(s_prob, DynamicSS(Tsit5()), abstol = 1e-4, reltol = 1e-3) data = [1.05, 0.23] priors = [truncated(Normal(2.0, 0.2), 0, 3)] bayesian_result = turing_inference(s_prob, DynamicSS(Tsit5()), - nothing, data, priors; - num_samples = 500, - maxiters = 1e6, - syms = [:α], - abstol = 1e-4, reltol = 1e-3,) + nothing, data, priors; + num_samples = 500, + maxiters = 1e6, + syms = [:α], + abstol = 1e-4, reltol = 1e-3) @test mean(get(bayesian_result, :α)[1])≈2.0 atol=3e-1