diff --git a/HISTORY.md b/HISTORY.md index 1e0bdae8e..130b4facb 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,9 @@ +# 0.41 + +Removed the `varinfo` keyword argument from `DynamicPPL.TestUtils.AD.run_ad` and replaced the `varinfo` field in `ADResult` with `ldf::LogDensityFunction`. + +Removed `getargnames`, `getmissings`, and `Base.nameof(::Model)` from the public API (export and documentation) as they are considered internal implementation details. + # 0.40.17 Implemented missing methods for `Base.copy` on internal structs. diff --git a/Project.toml b/Project.toml index 86503e8d4..63200edc3 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "DynamicPPL" uuid = "366bfd00-2699-11ea-058f-f148b4cae6d8" -version = "0.40.17" +version = "0.41" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" diff --git a/benchmarks/Project.toml b/benchmarks/Project.toml index dd37468e6..6adb27efa 100644 --- a/benchmarks/Project.toml +++ b/benchmarks/Project.toml @@ -24,7 +24,7 @@ DynamicPPL = {path = "../"} ADTypes = "1.14.0" Chairmarks = "1.3.1" Distributions = "0.25.117" -DynamicPPL = "0.40" +DynamicPPL = "0.41" Enzyme = "0.13" ForwardDiff = "1" JSON = "1.3.0" diff --git a/docs/Project.toml b/docs/Project.toml index 658a85a23..1de8a07be 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -32,7 +32,7 @@ Distributions = "0.25" Documenter = "1" DocumenterInterLinks = "1" DocumenterMermaid = "0.1, 0.2" -DynamicPPL = "0.40" +DynamicPPL = "0.41" FillArrays = "0.13, 1" ForwardDiff = "0.10, 1" LogDensityProblems = "2" diff --git a/docs/src/api.md b/docs/src/api.md index f6e66b011..289febe0d 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -28,14 +28,6 @@ Model Model() ``` -Basic properties of a model can be accessed with [`getargnames`](@ref), [`getmissings`](@ref), and [`nameof`](@ref). - -```@docs -nameof(::Model) -getargnames -getmissings -``` - The context of a model can be set using [`contextualize`](@ref): ```@docs diff --git a/src/DynamicPPL.jl b/src/DynamicPPL.jl index 7d26e073a..379437303 100644 --- a/src/DynamicPPL.jl +++ b/src/DynamicPPL.jl @@ -116,8 +116,6 @@ export AbstractVarInfo, OrderedDict, # Model Model, - getmissings, - getargnames, setthreadsafe, requires_threadsafe, extract_priors, diff --git a/src/test_utils/ad.jl b/src/test_utils/ad.jl index 8c9f96491..434540cd5 100644 --- a/src/test_utils/ad.jl +++ b/src/test_utils/ad.jl @@ -8,16 +8,9 @@ using DynamicPPL: DynamicPPL, Model, LogDensityFunction, - VarInfo, - AbstractVarInfo, AbstractTransformStrategy, LinkAll, getlogjoint_internal, - to_vector_params, - get_vector_values, - unflatten!!, - OnlyAccsVarInfo, - VectorValueAccumulator, InitFromPrior using LinearAlgebra: norm @@ -151,8 +144,9 @@ struct ADResult{Tparams<:AbstractFloat,Tresult<:AbstractFloat,Ttol<:AbstractFloa model::Model "The function used to extract the log density from the model" getlogdensity::Function - "The VarInfo that was used" - varinfo::Union{Nothing,AbstractVarInfo} + + "The LogDensityFunction that was used" + ldf::LogDensityFunction "The values at which the model was evaluated" params::Vector{Tparams} "The AD backend that was tested" @@ -201,7 +195,6 @@ end rtol::AbstractFloat=sqrt(eps()), getlogdensity::Function=getlogjoint_internal, rng::Random.AbstractRNG=Random.default_rng(), - varinfo::Union{Nothing,AbstractVarInfo}=nothing, transform_strategy::AbstractTransformStrategy=LinkAll(), params::Union{Nothing,Vector{<:AbstractFloat}}=nothing, verbose=true, @@ -235,15 +228,12 @@ Everything else is optional, and can be categorised into several groups: You can control whether this transformation happens or not by passing the `transform_strategy` keyword argument. The default is `LinkAll()`, which means that all - parameters will be transformed to unconstrained space. However, if you want to evaluate - in the original space, you can use `UnlinkAll()`; you can also specify mixed linking - strategies if desired (see [the DynamicPPL documentation](@ref transform-strategies) for - more information. + parameters will be transformed to unconstrained space. This is the most relevant setting + for testing AD. - Instead of passing the `transform_strategy`, you can also directly pass the `varinfo` - keyword argument, which expects a VarInfo object that has been generated with the desired - transformation. If both `varinfo` and `transform_strategy` are passed, then `varinfo` - takes precedence, and `transform_strategy` is ignored. + However, if you want to evaluate in the original space, you can use `UnlinkAll()`; you + can also specify mixed linking strategies if desired (see [the DynamicPPL + documentation](@ref transform-strategies) for more information). 1. _How to specify the parameters to be used for evaluation._ @@ -252,12 +242,6 @@ Everything else is optional, and can be categorised into several groups: prior of the model. If you want to seed the parameter generation, you can pass the `rng` keyword argument, which will then be used to generate the parameters. - Note that these only reflect the parameters used for _evaluating_ the gradient. If you - also want to control the parameters used for _preparing_ the gradient, then you need to - manually set these parameters inside the `varinfo` keyword argument, for example using - `vi = DynamicPPL.unflatten!!(vi, prep_params)`. You could then evaluate the gradient at a - different set of parameters using the `params` keyword argument. - 1. _Which type of logp is being calculated._ By default, `run_ad` evaluates the 'internal log joint density' of the model, @@ -334,7 +318,6 @@ function run_ad( rtol::AbstractFloat=sqrt(eps()), getlogdensity::Function=getlogjoint_internal, rng::AbstractRNG=default_rng(), - varinfo::Union{Nothing,AbstractVarInfo}=nothing, transform_strategy::AbstractTransformStrategy=LinkAll(), params::Union{Nothing,Vector{<:AbstractFloat}}=nothing, verbose=true, @@ -347,19 +330,11 @@ function run_ad( verbose && @info "Running AD on $(model.f) with $(adtype)\n" # Generate initial parameters - vvals = if isnothing(varinfo) - accs = OnlyAccsVarInfo(VectorValueAccumulator()) - _, accs = DynamicPPL.init!!(rng, model, accs, InitFromPrior(), transform_strategy) - get_vector_values(accs) - elseif varinfo isa VarInfo - varinfo.values - elseif varinfo isa OnlyAccsVarInfo - get_vector_values(varinfo) - end - ldf = LogDensityFunction(model, getlogdensity, vvals; adtype=adtype) + ldf = LogDensityFunction(model, getlogdensity, transform_strategy; adtype=adtype) if isnothing(params) - params = to_vector_params(vvals, ldf) + params = rand(rng, ldf, InitFromPrior()) end + params = [p for p in params] # Concretise verbose && println(" params : $(params)") @@ -380,7 +355,7 @@ function run_ad( grad_true = test.grad elseif test isa WithBackend ldf_reference = LogDensityFunction( - model, getlogdensity, vvals; adtype=test.adtype + model, getlogdensity, transform_strategy; adtype=test.adtype ) value_true, grad_true = logdensity_and_gradient(ldf_reference, params) # collect(): https://github.com/JuliaDiff/DifferentiationInterface.jl/issues/754 @@ -417,7 +392,7 @@ function run_ad( return ADResult( model, getlogdensity, - varinfo, + ldf, params, adtype, atol,