From 548896f011802ce1e55988eb5951268aa2ed84ab Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Sat, 21 Mar 2026 02:47:29 +0000 Subject: [PATCH 01/11] Fix propagation of non-Float64 floats --- HISTORY.md | 10 +++++++ docs/src/api.md | 6 ---- src/DynamicPPL.jl | 1 - src/accumulators/default.jl | 36 +++++++++++++++++----- src/distribution_wrappers.jl | 6 ++-- src/transformed_values.jl | 8 ++--- src/utils.jl | 43 --------------------------- test/accumulators.jl | 18 +++++++++++ test/floattypes/LocalPreferences.toml | 2 ++ test/logdensityfunction.jl | 31 +++++++++++++++++++ 10 files changed, 97 insertions(+), 64 deletions(-) create mode 100644 test/floattypes/LocalPreferences.toml diff --git a/HISTORY.md b/HISTORY.md index 17c328988..7a57237c2 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,13 @@ +# 0.40.15 + +DynamicPPL now allows you to set the type that log-probabilities are initialised with, using the `set_logprob_type!` function. +This records a compile-time preference so requires restarting Julia to take effect. + +This allows model log-probability accumulation to work with different numerical precisions. +For example, if your model is defined using distributions that are parameterised by `Float32` only (and avoid promoting them to `Float64` elsewhere in the model), and you call `DynamicPPL.set_logprob_type!(Float32)`, the resulting log-probabilities will also be `Float32`. + +Previously, DynamicPPL would automatically choose a `Float64` log-probability, causing any lower-precision model to be promoted. + # 0.40.14 Fixed `check_model()` erroneously failing for models such as `x[1:2] .~ univariate_dist`. diff --git a/docs/src/api.md b/docs/src/api.md index 77066b89c..e82279c39 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -193,12 +193,6 @@ DynamicPPL.prefix ## Utilities -`typed_identity` is the same as `identity`, but with an overload for `with_logabsdet_jacobian` that ensures that it never errors. - -```@docs -typed_identity -``` - It is possible to manually increase (or decrease) the accumulated log likelihood or prior from within a model function. ```@docs diff --git a/src/DynamicPPL.jl b/src/DynamicPPL.jl index 619e8d4c2..5afe4746e 100644 --- a/src/DynamicPPL.jl +++ b/src/DynamicPPL.jl @@ -114,7 +114,6 @@ export AbstractVarInfo, @model, # Utilities OrderedDict, - typed_identity, # Model Model, getmissings, diff --git a/src/accumulators/default.jl b/src/accumulators/default.jl index 0cf5dd7a7..518c912f3 100644 --- a/src/accumulators/default.jl +++ b/src/accumulators/default.jl @@ -17,16 +17,37 @@ types like LogPriorAccumulator, LogJacobianAccumulator, and LogLikelihoodAccumul """ abstract type LogProbAccumulator{T<:Real} <: AbstractAccumulator end -# The first of the below methods sets AccType{T}() = AccType(zero(T)) for any -# AccType <: LogProbAccumulator{T}. The second one sets LogProbType as the default eltype T -# when calling AccType(). +""" + NoLogProb <: Real + +Singleton type that represents the absence of a log probability value. This is used as the +default type parameter for `LogProbAccumulator` when no log probability value is needed, to +avoid defining a concrete type such as `Float64` that would cause unwanted type promotion +when accumulating log probabilities of other types (e.g., `Float32`). + +Adding anything to `NoLogProb()` returns the other thing. In other words, `NoLogProb` is a +true additive identity which additionally preserves types. +""" +struct NoLogProb <: Real end +Base.zero(::Type{NoLogProb}) = NoLogProb() +Base.convert(::Type{T}, ::NoLogProb) where {T<:Number} = zero(T) +Base.promote_rule(::Type{NoLogProb}, ::Type{T}) where {T<:Number} = T +Base.iszero(::NoLogProb) = true +Base.hash(::NoLogProb, h::UInt) = hash(0.0, h) +Base.:(+)(::NoLogProb, ::NoLogProb) = NoLogProb() +(::Type{T})(::NoLogProb) where {T<:Real} = zero(T) + """ LogProbAccumulator{T}() -Create a new `LogProbAccumulator` accumulator with the log prior initialized to zero. +Create a new `LogProbAccumulator` accumulator with the log prior initialized to `zero(T)`. + + LogProbAccumulator() + +Create a new `LogProbAccumulator{NoLogProb}` accumulator. """ (::Type{AccType})() where {T<:Real,AccType<:LogProbAccumulator{T}} = AccType(zero(T)) -(::Type{AccType})() where {AccType<:LogProbAccumulator} = AccType{LogProbType}() +(::Type{AccType})() where {AccType<:LogProbAccumulator} = AccType{NoLogProb}(NoLogProb()) Base.copy(acc::LogProbAccumulator) = acc @@ -38,7 +59,7 @@ end # equality of hashes. Both of the below implementations are also different from the default # implementation for structs. function Base.:(==)(acc1::LogProbAccumulator, acc2::LogProbAccumulator) - return accumulator_name(acc1) === accumulator_name(acc2) && logp(acc1) == logp(acc2) + return (accumulator_name(acc1) === accumulator_name(acc2)) & (logp(acc1) == logp(acc2)) end function Base.isequal(acc1::LogProbAccumulator, acc2::LogProbAccumulator) @@ -59,6 +80,7 @@ function combine(acc::LogProbAccumulator, acc2::LogProbAccumulator) end acclogp(acc::LogProbAccumulator, val) = basetypeof(acc)(logp(acc) + val) +acclogp(acc::LogProbAccumulator{NoLogProb}, val) = basetypeof(acc)(val) function Base.convert( ::Type{AccType}, acc::LogProbAccumulator @@ -175,7 +197,7 @@ function accumulate_observe!!(acc::LogLikelihoodAccumulator, right, left, vn, te return acclogp(acc, Distributions.loglikelihood(right, left)) end -function default_accumulators(::Type{FloatT}=LogProbType) where {FloatT} +function default_accumulators(::Type{FloatT}=NoLogProb) where {FloatT} return AccumulatorTuple( LogPriorAccumulator{FloatT}(), LogJacobianAccumulator{FloatT}(), diff --git a/src/distribution_wrappers.jl b/src/distribution_wrappers.jl index 33dd451ff..bd940feac 100644 --- a/src/distribution_wrappers.jl +++ b/src/distribution_wrappers.jl @@ -54,13 +54,13 @@ function Distributions.rand!( return Distributions.rand!(rng, d.dist, x) end function Distributions.logpdf(::NoDist{<:Univariate}, x::Real) - return zero(LogProbType) + return NoLogProb() end function Distributions.logpdf(::NoDist{<:Multivariate}, x::AbstractVector{<:Real}) - return zero(LogProbType) + return NoLogProb() end function Distributions.logpdf(::NoDist{<:Matrixvariate}, x::AbstractMatrix{<:Real}) - return zero(LogProbType) + return NoLogProb() end for f in ( diff --git a/src/transformed_values.jl b/src/transformed_values.jl index 1befbf435..0166ad2e5 100644 --- a/src/transformed_values.jl +++ b/src/transformed_values.jl @@ -179,7 +179,7 @@ struct UntransformedValue{V} <: AbstractTransformedValue end Base.:(==)(tv1::UntransformedValue, tv2::UntransformedValue) = tv1.val == tv2.val Base.isequal(tv1::UntransformedValue, tv2::UntransformedValue) = isequal(tv1.val, tv2.val) -get_transform(::UntransformedValue) = typed_identity +get_transform(::UntransformedValue) = Bijectors.VectorBijectors.TypedIdentity() get_internal_value(tv::UntransformedValue) = tv.val set_internal_value(::UntransformedValue, new_val) = UntransformedValue(new_val) @@ -362,7 +362,7 @@ function apply_transform_strategy( # vectorisation transform. However, sometimes that's not needed (e.g. when # evaluating with an OnlyAccsVarInfo). So we just return an UntransformedValue. If a # downstream function requires a VectorValue, it's on them to generate it. - (raw_value, UntransformedValue(raw_value), zero(LogProbType)) + (raw_value, UntransformedValue(raw_value), NoLogProb()) else error("unknown target transform $target") end @@ -383,7 +383,7 @@ function apply_transform_strategy( (raw_value, linked_tv, logjac) elseif target isa Unlink # No need to transform further - (raw_value, tv, zero(LogProbType)) + (raw_value, tv, NoLogProb()) else error("unknown target transform $target") end @@ -406,7 +406,7 @@ function apply_transform_strategy( (raw_value, linked_tv, logjac) elseif target isa Unlink # No need to transform further - (raw_value, tv, zero(LogProbType)) + (raw_value, tv, NoLogProb()) else error("unknown target transform $target") end diff --git a/src/utils.jl b/src/utils.jl index f89071349..a96203516 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -2,49 +2,6 @@ # defined in other files. function subset end -""" -The type for all log probability variables. - -This is Float64 on 64-bit systems and Float32 on 32-bit systems. -""" -const LogProbType = float(Real) - -""" - typed_identity(x) - -Identity function, but with an overload for `with_logabsdet_jacobian` to ensure -that it returns a sensible zero logjac. - -The problem with plain old `identity` is that the default definition of -`with_logabsdet_jacobian` for `identity` returns `zero(eltype(x))`: -https://github.com/JuliaMath/ChangesOfVariables.jl/blob/d6a8115fc9b9419decbdb48e2c56ec9675b4c6a4/src/with_ladj.jl#L154 - -This is fine for most samples `x`, but if `eltype(x)` doesn't return a sensible type (e.g. -if it's `Any`), then using `identity` will error with `zero(Any)`. This can happen with, -for example, `ProductNamedTupleDistribution`: - -```julia -julia> using Distributions; d = product_distribution((a = Normal(), b = LKJCholesky(3, 0.5))); - -julia> eltype(rand(d)) -Any -``` - -The same problem precludes us from eventually broadening the scope of DynamicPPL.jl to -support distributions with non-numeric samples. - -Furthermore, in principle, the type of the log-probability should be separate from the type -of the sample. Thus, instead of using `zero(LogProbType)`, we should use the eltype of the -LogJacobianAccumulator. There's no easy way to thread that through here, but if a way to do -this is discovered, then `typed_identity` is what will allow us to obtain that custom -behaviour. -""" -function typed_identity end -@inline typed_identity(x) = x -@inline Bijectors.with_logabsdet_jacobian(::typeof(typed_identity), x) = - (x, zero(LogProbType)) -@inline Bijectors.inverse(::typeof(typed_identity)) = typed_identity - """ @addlogprob!(ex) diff --git a/test/accumulators.jl b/test/accumulators.jl index b88d76e96..c027acf69 100644 --- a/test/accumulators.jl +++ b/test/accumulators.jl @@ -11,6 +11,7 @@ using DynamicPPL: AccumulatorTuple, LogLikelihoodAccumulator, LogPriorAccumulator, + NoLogProb, accumulate_assume!!, accumulate_observe!!, combine, @@ -27,6 +28,8 @@ using DynamicPPL: @testset "accumulators" begin @testset "individual accumulator types" begin @testset "constructors" begin + # Note that NoLogProb() == 0.0 because `NoLogProb()` gets promoted to Float64, + # which in turn calls `convert(Float64, NoLogProb())`, which is `zero(Float64)`. @test LogPriorAccumulator(0.0) == LogPriorAccumulator() == LogPriorAccumulator{Float64}() == @@ -37,6 +40,14 @@ using DynamicPPL: LogLikelihoodAccumulator{Float64}() == LogLikelihoodAccumulator{Float64}(0.0) == DynamicPPL.reset(LogLikelihoodAccumulator(1.0)) + # However, `NoLogProb() !== 0.0`. + @test LogPriorAccumulator(NoLogProb()) !== LogPriorAccumulator(0.0) + @test LogPriorAccumulator() === LogPriorAccumulator(NoLogProb()) + end + + @testset "reset" begin + @test DynamicPPL.reset(LogPriorAccumulator(1.0)) === LogPriorAccumulator(0.0) + @test DynamicPPL.reset(LogPriorAccumulator()) === LogPriorAccumulator() end @testset "addition and incrementation" begin @@ -48,6 +59,13 @@ using DynamicPPL: LogLikelihoodAccumulator(2.0) end + @testset "addition to NoLogProb" begin + for val in (1.0, 1.0f0, BigFloat(1.0), 1, UInt(1), Float16(1.0)) + @test acclogp(LogPriorAccumulator(NoLogProb()), val) == + LogPriorAccumulator(val) + end + end + @testset "split and combine" begin for acc in [ LogPriorAccumulator(1.0), diff --git a/test/floattypes/LocalPreferences.toml b/test/floattypes/LocalPreferences.toml new file mode 100644 index 000000000..07bee33ee --- /dev/null +++ b/test/floattypes/LocalPreferences.toml @@ -0,0 +1,2 @@ +[DynamicPPL] +floattype = "min" diff --git a/test/logdensityfunction.jl b/test/logdensityfunction.jl index 904c2d69d..f089de665 100644 --- a/test/logdensityfunction.jl +++ b/test/logdensityfunction.jl @@ -96,6 +96,7 @@ end ldf = DynamicPPL.LogDensityFunction(model) xs = [1.0] + @test LogDensityProblems.logdensity(ldf, xs) ≈ logpdf(Normal(), xs[1]) + N * logpdf(Normal(xs[1]), 0.0) end @@ -422,6 +423,36 @@ end end end +@testset "LogDensityFunction: non-default float types" begin + for T in (Float16, Float32, BigFloat) + @model function with_floattype(y) + x ~ Normal(T(0.0), T(1.0)) + return y ~ Normal(x, T(1.0)) + end + model = with_floattype(T(0.5)) + + for adtype in [ + AutoForwardDiff(), + AutoReverseDiff(; compile=false), + AutoReverseDiff(; compile=true), + AutoMooncake(; config=nothing), + ] + if T == BigFloat && adtype isa AutoMooncake + # Mooncake doesn't seem to support BigFloat + continue + end + ldf = DynamicPPL.LogDensityFunction( + model, getlogjoint_internal, LinkAll(); adtype=adtype + ) + x = rand(ldf) + @test eltype(x) == T + @test LogDensityProblems.logdensity(ldf, x) isa T + @test LogDensityProblems.logdensity_and_gradient(ldf, x)[1] isa T + @test eltype(LogDensityProblems.logdensity_and_gradient(ldf, x)[2]) == T + end + end +end + @testset "AD with LogDensityFunction" begin # Used as the ground truth that others are compared against. ref_adtype = AutoForwardDiff() From 90bc136d36bbf2c8bf220c84adadf53d79b0d656 Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Mon, 23 Mar 2026 20:59:23 +0000 Subject: [PATCH 02/11] Reinstate LogProbType, make it a compile-time preference --- .gitignore | 1 + Project.toml | 2 + docs/src/api.md | 8 +++ src/DynamicPPL.jl | 1 + src/accumulators/default.jl | 26 ++------- src/distribution_wrappers.jl | 6 +-- src/transformed_values.jl | 6 +-- src/utils.jl | 101 +++++++++++++++++++++++++++++++++++ test/accumulators.jl | 21 +++++--- 9 files changed, 137 insertions(+), 35 deletions(-) diff --git a/.gitignore b/.gitignore index d5a87f1eb..10d8a5f69 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ Manifest.toml **.~undo-tree~ benchmarks/*.json +LocalPreferences.toml diff --git a/Project.toml b/Project.toml index af9ae267f..ac9ad87df 100644 --- a/Project.toml +++ b/Project.toml @@ -22,6 +22,7 @@ LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c" MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" +Preferences = "21216c6a-2e73-6563-6e65-726566657250" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" @@ -70,6 +71,7 @@ MarginalLogDensities = "0.4.3" Mooncake = "0.4.147, 0.5" OrderedCollections = "1" PrecompileTools = "1.2.1" +Preferences = "1.5.2" Printf = "1.10" Random = "1.6" ReverseDiff = "1" diff --git a/docs/src/api.md b/docs/src/api.md index e82279c39..1754cc6e0 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -390,6 +390,14 @@ DynamicPPL.split DynamicPPL.combine ``` +The float type used for accumulation of log-probabilities is defined by a compile-time preference: + +```@docs +DynamicPPL.LogProbType +DynamicPPL.set_logprob_type! +DynamicPPL.NoLogProb +``` + ```@docs VNTAccumulator DoNotAccumulate diff --git a/src/DynamicPPL.jl b/src/DynamicPPL.jl index 5afe4746e..4756f8ae7 100644 --- a/src/DynamicPPL.jl +++ b/src/DynamicPPL.jl @@ -200,6 +200,7 @@ export AbstractVarInfo, # Convenience macros @addlogprob!, check_model, + set_logprob_type!, # Deprecated. generated_quantities diff --git a/src/accumulators/default.jl b/src/accumulators/default.jl index 518c912f3..b531ae54e 100644 --- a/src/accumulators/default.jl +++ b/src/accumulators/default.jl @@ -17,26 +17,6 @@ types like LogPriorAccumulator, LogJacobianAccumulator, and LogLikelihoodAccumul """ abstract type LogProbAccumulator{T<:Real} <: AbstractAccumulator end -""" - NoLogProb <: Real - -Singleton type that represents the absence of a log probability value. This is used as the -default type parameter for `LogProbAccumulator` when no log probability value is needed, to -avoid defining a concrete type such as `Float64` that would cause unwanted type promotion -when accumulating log probabilities of other types (e.g., `Float32`). - -Adding anything to `NoLogProb()` returns the other thing. In other words, `NoLogProb` is a -true additive identity which additionally preserves types. -""" -struct NoLogProb <: Real end -Base.zero(::Type{NoLogProb}) = NoLogProb() -Base.convert(::Type{T}, ::NoLogProb) where {T<:Number} = zero(T) -Base.promote_rule(::Type{NoLogProb}, ::Type{T}) where {T<:Number} = T -Base.iszero(::NoLogProb) = true -Base.hash(::NoLogProb, h::UInt) = hash(0.0, h) -Base.:(+)(::NoLogProb, ::NoLogProb) = NoLogProb() -(::Type{T})(::NoLogProb) where {T<:Real} = zero(T) - """ LogProbAccumulator{T}() @@ -44,10 +24,10 @@ Create a new `LogProbAccumulator` accumulator with the log prior initialized to LogProbAccumulator() -Create a new `LogProbAccumulator{NoLogProb}` accumulator. +Create a new `LogProbAccumulator{DynamicPPL.LogProbType}` accumulator. """ (::Type{AccType})() where {T<:Real,AccType<:LogProbAccumulator{T}} = AccType(zero(T)) -(::Type{AccType})() where {AccType<:LogProbAccumulator} = AccType{NoLogProb}(NoLogProb()) +(::Type{AccType})() where {AccType<:LogProbAccumulator} = AccType{LogProbType}() Base.copy(acc::LogProbAccumulator) = acc @@ -197,7 +177,7 @@ function accumulate_observe!!(acc::LogLikelihoodAccumulator, right, left, vn, te return acclogp(acc, Distributions.loglikelihood(right, left)) end -function default_accumulators(::Type{FloatT}=NoLogProb) where {FloatT} +function default_accumulators(::Type{FloatT}=LogProbType) where {FloatT} return AccumulatorTuple( LogPriorAccumulator{FloatT}(), LogJacobianAccumulator{FloatT}(), diff --git a/src/distribution_wrappers.jl b/src/distribution_wrappers.jl index bd940feac..33dd451ff 100644 --- a/src/distribution_wrappers.jl +++ b/src/distribution_wrappers.jl @@ -54,13 +54,13 @@ function Distributions.rand!( return Distributions.rand!(rng, d.dist, x) end function Distributions.logpdf(::NoDist{<:Univariate}, x::Real) - return NoLogProb() + return zero(LogProbType) end function Distributions.logpdf(::NoDist{<:Multivariate}, x::AbstractVector{<:Real}) - return NoLogProb() + return zero(LogProbType) end function Distributions.logpdf(::NoDist{<:Matrixvariate}, x::AbstractMatrix{<:Real}) - return NoLogProb() + return zero(LogProbType) end for f in ( diff --git a/src/transformed_values.jl b/src/transformed_values.jl index 0166ad2e5..10d474de1 100644 --- a/src/transformed_values.jl +++ b/src/transformed_values.jl @@ -362,7 +362,7 @@ function apply_transform_strategy( # vectorisation transform. However, sometimes that's not needed (e.g. when # evaluating with an OnlyAccsVarInfo). So we just return an UntransformedValue. If a # downstream function requires a VectorValue, it's on them to generate it. - (raw_value, UntransformedValue(raw_value), NoLogProb()) + (raw_value, UntransformedValue(raw_value), zero(LogProbType)) else error("unknown target transform $target") end @@ -383,7 +383,7 @@ function apply_transform_strategy( (raw_value, linked_tv, logjac) elseif target isa Unlink # No need to transform further - (raw_value, tv, NoLogProb()) + (raw_value, tv, zero(LogProbType)) else error("unknown target transform $target") end @@ -406,7 +406,7 @@ function apply_transform_strategy( (raw_value, linked_tv, logjac) elseif target isa Unlink # No need to transform further - (raw_value, tv, NoLogProb()) + (raw_value, tv, zero(LogProbType)) else error("unknown target transform $target") end diff --git a/src/utils.jl b/src/utils.jl index a96203516..c5124d85d 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -2,6 +2,107 @@ # defined in other files. function subset end +using Preferences: @load_preference, @set_preferences! + +""" + DynamicPPL.NoLogProb <: Real + +Singleton type that represents the absence of a log probability value. This is used as the +default type parameter for `LogProbAccumulator` when no log probability value is needed, to +avoid defining a concrete type such as `Float64` that would cause unwanted type promotion +when accumulating log probabilities of other types (e.g., `Float32`). + +Adding anything to `NoLogProb()` returns the other thing. In other words, `NoLogProb` is a +true additive identity which additionally preserves types. +""" +struct NoLogProb <: Real end +Base.zero(::Type{NoLogProb}) = NoLogProb() +Base.convert(::Type{T}, ::NoLogProb) where {T<:Number} = zero(T) +Base.promote_rule(::Type{NoLogProb}, ::Type{T}) where {T<:Number} = T +Base.iszero(::NoLogProb) = true +Base.hash(::NoLogProb, h::UInt) = hash(0.0, h) +Base.:(+)(::NoLogProb, ::NoLogProb) = NoLogProb() +(::Type{T})(::NoLogProb) where {T<:Real} = zero(T) + +const FLOAT_TYPE_PREF_KEY = "floattype" + +""" + DynamicPPL.LogProbType + +The default type used for log-probabilities in DynamicPPL.jl. This is a compile-time constant +that can be set via [`set_logprob_type!`](@ref), which under the hood uses Preferences.jl. + +Note that this does not prevent computations within the model from promoting the +log-probability to a different type. In essence, `LogProbType` specifies the *lowest* +possible type that log-probabilities can be, and DynamicPPL promises to not insert any extra +operations that would cause this to be promoted to a higher type. However, DynamicPPL cannot +guard against user code inside models. + +For example, in: + +```julia +@model f() = x ~ Normal(0.0, 1.0) +``` + +the log-probability of the model will always be promoted to `Float64`, regardless of the +value of `LogProbType`, because the logpdf of `Normal(0.0, 1.0)` is a `Float64`. On the +other hand, in: + +```julia +@model f() = x ~ Normal(0.0f0, 1.0f0) +``` + +the log-probability of the model will be `Float32` if `LogProbType` is `Float32` or lower. +""" +const LogProbType = let + logp_pref = @load_preference(FLOAT_TYPE_PREF_KEY, "f64") + if logp_pref == "f64" + Float64 + elseif logp_pref == "f32" + Float32 + elseif logp_pref == "f16" + Float16 + elseif logp_pref == "min" + NoLogProb + else + error("Unsupported log probability preference: $logp_pref") + end +end + +""" + set_logprob_type!(::Type{T}) where {T} + +Set the log probability type for DynamicPPL.jl, [`DynamicPPL.LogProbType`](@ref), to `T`. +Permitted values are `Float64`, `Float32`, `Float16`, and `NoLogProb`. The default in +DynamicPPL is `Float64`. + +`NoLogProb` is a special type that is the "lowest" possible float type. This means that the +log probability will be promoted to whatever type the model dictates. This is a totally +unintrusive option, which can be useful if you do not know in advance what log probability +type you are targeting, or want to troubleshoot a model to see what type the log probability +is being promoted to. However, this can also cause type stability issues and performance +degradations, so we generally recommend setting a specific log probability type if you know +what type you want to target. + +This function uses Preferences.jl to set a compile-time constant, so you will need to +restart your Julia session for the change to take effect. +""" +function set_logprob_type!(::Type{T}) where {T} + new_pref = if T == Float64 + "f64" + elseif T == Float32 + "f32" + elseif T == Float16 + "f16" + elseif T == NoLogProb + "min" + else + throw(ArgumentError("Unsupported log probability type: $T")) + end + @set_preferences!(FLOAT_TYPE_PREF_KEY => new_pref) + @info "DynamicPPL's log probability type has been set to $T.\nPlease note you will need to restart your Julia session for this change to take effect." +end + """ @addlogprob!(ex) diff --git a/test/accumulators.jl b/test/accumulators.jl index c027acf69..6ecde3e84 100644 --- a/test/accumulators.jl +++ b/test/accumulators.jl @@ -28,8 +28,6 @@ using DynamicPPL: @testset "accumulators" begin @testset "individual accumulator types" begin @testset "constructors" begin - # Note that NoLogProb() == 0.0 because `NoLogProb()` gets promoted to Float64, - # which in turn calls `convert(Float64, NoLogProb())`, which is `zero(Float64)`. @test LogPriorAccumulator(0.0) == LogPriorAccumulator() == LogPriorAccumulator{Float64}() == @@ -40,14 +38,25 @@ using DynamicPPL: LogLikelihoodAccumulator{Float64}() == LogLikelihoodAccumulator{Float64}(0.0) == DynamicPPL.reset(LogLikelihoodAccumulator(1.0)) - # However, `NoLogProb() !== 0.0`. - @test LogPriorAccumulator(NoLogProb()) !== LogPriorAccumulator(0.0) - @test LogPriorAccumulator() === LogPriorAccumulator(NoLogProb()) end - @testset "reset" begin + @testset "float types" begin + # f64 @test DynamicPPL.reset(LogPriorAccumulator(1.0)) === LogPriorAccumulator(0.0) @test DynamicPPL.reset(LogPriorAccumulator()) === LogPriorAccumulator() + + # f32 + @test LogPriorAccumulator{Float32}() == LogPriorAccumulator(0.0f0) + @test DynamicPPL.reset(LogPriorAccumulator(1.0f0)) === + LogPriorAccumulator(0.0f0) + @test DynamicPPL.reset(LogPriorAccumulator{Float32}()) === + LogPriorAccumulator{Float32}() + + # nologprob + @test LogPriorAccumulator(NoLogProb()) !== LogPriorAccumulator(0.0) + @test LogPriorAccumulator(NoLogProb()) === LogPriorAccumulator(NoLogProb()) + @test DynamicPPL.reset(LogPriorAccumulator(NoLogProb())) === + LogPriorAccumulator(NoLogProb()) end @testset "addition and incrementation" begin From 460b62cc85cc571d39b2d23c7fa55db240d4b3f0 Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Mon, 23 Mar 2026 21:24:24 +0000 Subject: [PATCH 03/11] Add tests --- .github/workflows/FloatTypes.yml | 42 +++++++++++++++++ test/floattypes/Project.toml | 10 ++++ test/floattypes/main.jl | 79 ++++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100644 .github/workflows/FloatTypes.yml create mode 100644 test/floattypes/Project.toml create mode 100644 test/floattypes/main.jl diff --git a/.github/workflows/FloatTypes.yml b/.github/workflows/FloatTypes.yml new file mode 100644 index 000000000..e716902e0 --- /dev/null +++ b/.github/workflows/FloatTypes.yml @@ -0,0 +1,42 @@ +name: Float type promotion + +on: + push: + branches: + - main + pull_request: + +# needed to allow julia-actions/cache to delete old caches that it has created +permissions: + actions: write + contents: read + +# Cancel existing tests on the same PR if a new commit is added to a pull request +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + floattypes: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - uses: julia-actions/setup-julia@v2 + with: + version: "1" + + - uses: julia-actions/cache@v2 + + - name: Run float type tests + working-directory: test/floattypes + run: | + julia --project=. --color=yes -e 'using Pkg; Pkg.instantiate()' + julia --project=. --color=yes main.jl setup f64 + julia --project=. --color=yes main.jl run f64 + julia --project=. --color=yes main.jl setup f32 + julia --project=. --color=yes main.jl run f32 + julia --project=. --color=yes main.jl setup f16 + julia --project=. --color=yes main.jl run f16 + julia --project=. --color=yes main.jl setup min + julia --project=. --color=yes main.jl run min diff --git a/test/floattypes/Project.toml b/test/floattypes/Project.toml new file mode 100644 index 000000000..02a770fe7 --- /dev/null +++ b/test/floattypes/Project.toml @@ -0,0 +1,10 @@ +[deps] +ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" +Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" +DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[sources] +DynamicPPL = {path = "../../"} diff --git a/test/floattypes/main.jl b/test/floattypes/main.jl new file mode 100644 index 000000000..235bf5b52 --- /dev/null +++ b/test/floattypes/main.jl @@ -0,0 +1,79 @@ +# Script to use for testing promotion of log-prob types. Since this relies on compile-time +# preferences, it's hard to run this within the usual CI setup. +# +# Usage: +# julia --project=. main.jl setup f32 # Sets the preference +# julia --project=. main.jl run f32 # Checks that the preference is respected +# +# and this should be looped over for `f64`, `f32`, `f16`, and `min`. + +using DynamicPPL, LogDensityProblems, ForwardDiff, Distributions, ADTypes, Test + +function floattypestr_to_type(floattypestr) + if floattypestr == "f64" + return Float64 + elseif floattypestr == "f32" + return Float32 + elseif floattypestr == "f16" + return Float16 + elseif floattypestr == "min" + return DynamicPPL.NoLogProb + else + error("Invalid float type: $floattypestr") + end +end + +function setup(floattypestr) + T = floattypestr_to_type(floattypestr) + return DynamicPPL.set_logprob_type!(T) +end + +function test_with_type(::Type{T}) where {T} + @testset "Testing with type $T" begin + @model f() = x ~ Normal(T(0), T(1)) + model = f() + vnt = rand(model) + @test vnt[@varname(x)] isa T + lj = (@inferred logjoint(f(), (; x=T(0.0)))) + @test lj isa T + ldf = LogDensityFunction( + f(), getlogjoint_internal, LinkAll(); adtype=AutoForwardDiff() + ) + @test rand(ldf) isa AbstractVector{T} + lp = (@inferred LogDensityProblems.logdensity(ldf, [T(0)])) + @test lp isa T + @test lp ≈ logpdf(Normal(T(0), T(1)), T(0)) + lp_and_grad = (@inferred LogDensityProblems.logdensity_and_gradient(ldf, [T(0)])) + @test first(lp_and_grad) isa T + @test last(lp_and_grad) isa AbstractVector{T} + end +end + +function run(floattypestr) + T = floattypestr_to_type(floattypestr) + if T == DynamicPPL.NoLogProb + @test DynamicPPL.LogProbType === DynamicPPL.NoLogProb + # all higher types should cause promotion to those types + test_with_type(Float16) + test_with_type(Float32) + test_with_type(Float64) + else + @test DynamicPPL.LogProbType === T + test_with_type(T) + end +end + +if length(ARGS) != 2 || + !(ARGS[1] in ["setup", "run"]) || + !(ARGS[2] in ["f64", "f32", "f16", "min"]) + println("Usage: julia --project=. main.jl ") + exit(1) +end + +mode = ARGS[1] +floattypestr = ARGS[2] +if mode == "setup" + setup(floattypestr) +elseif mode == "run" + run(floattypestr) +end From a4cc40d12355de311318fb9406b37570a9465ba8 Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Mon, 23 Mar 2026 21:48:35 +0000 Subject: [PATCH 04/11] remove the old tests from pre-compile time --- test/logdensityfunction.jl | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/test/logdensityfunction.jl b/test/logdensityfunction.jl index f089de665..7ccf4efcb 100644 --- a/test/logdensityfunction.jl +++ b/test/logdensityfunction.jl @@ -423,36 +423,6 @@ end end end -@testset "LogDensityFunction: non-default float types" begin - for T in (Float16, Float32, BigFloat) - @model function with_floattype(y) - x ~ Normal(T(0.0), T(1.0)) - return y ~ Normal(x, T(1.0)) - end - model = with_floattype(T(0.5)) - - for adtype in [ - AutoForwardDiff(), - AutoReverseDiff(; compile=false), - AutoReverseDiff(; compile=true), - AutoMooncake(; config=nothing), - ] - if T == BigFloat && adtype isa AutoMooncake - # Mooncake doesn't seem to support BigFloat - continue - end - ldf = DynamicPPL.LogDensityFunction( - model, getlogjoint_internal, LinkAll(); adtype=adtype - ) - x = rand(ldf) - @test eltype(x) == T - @test LogDensityProblems.logdensity(ldf, x) isa T - @test LogDensityProblems.logdensity_and_gradient(ldf, x)[1] isa T - @test eltype(LogDensityProblems.logdensity_and_gradient(ldf, x)[2]) == T - end - end -end - @testset "AD with LogDensityFunction" begin # Used as the ground truth that others are compared against. ref_adtype = AutoForwardDiff() From e60732f36c4bd7a920b0d88ee99a154f996395dd Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Mon, 23 Mar 2026 22:16:03 +0000 Subject: [PATCH 05/11] get rid of offending ambiguities --- src/utils.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/utils.jl b/src/utils.jl index c5124d85d..fe1a558dd 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -22,7 +22,6 @@ Base.promote_rule(::Type{NoLogProb}, ::Type{T}) where {T<:Number} = T Base.iszero(::NoLogProb) = true Base.hash(::NoLogProb, h::UInt) = hash(0.0, h) Base.:(+)(::NoLogProb, ::NoLogProb) = NoLogProb() -(::Type{T})(::NoLogProb) where {T<:Real} = zero(T) const FLOAT_TYPE_PREF_KEY = "floattype" From 12fc78485b71d60f931c25049a7e7dc8b3ad01d9 Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Mon, 23 Mar 2026 22:26:51 +0000 Subject: [PATCH 06/11] export get_input_vector_type as well --- HISTORY.md | 4 +++- docs/src/api.md | 1 + src/DynamicPPL.jl | 1 + src/logdensityfunction.jl | 12 +++++++++++- 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 7a57237c2..e2325d7e9 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,6 @@ # 0.40.15 -DynamicPPL now allows you to set the type that log-probabilities are initialised with, using the `set_logprob_type!` function. +DynamicPPL now allows you to set the type that log-probabilities are initialised with, using the `DynamicPPL.set_logprob_type!` function. This records a compile-time preference so requires restarting Julia to take effect. This allows model log-probability accumulation to work with different numerical precisions. @@ -8,6 +8,8 @@ For example, if your model is defined using distributions that are parameterised Previously, DynamicPPL would automatically choose a `Float64` log-probability, causing any lower-precision model to be promoted. +The function `DynamicPPL.get_input_vector_type(::LogDensityFunction)` is now exported, in order to help with querying the type that log-probabilities are initialised with. + # 0.40.14 Fixed `check_model()` erroneously failing for models such as `x[1:2] .~ univariate_dist`. diff --git a/docs/src/api.md b/docs/src/api.md index 1754cc6e0..f6e66b011 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -72,6 +72,7 @@ The [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) inte ```@docs LogDensityFunction +get_input_vector_type ``` Internally, this is accomplished using [`init!!`](@ref) on: diff --git a/src/DynamicPPL.jl b/src/DynamicPPL.jl index 4756f8ae7..7c1b05fe0 100644 --- a/src/DynamicPPL.jl +++ b/src/DynamicPPL.jl @@ -128,6 +128,7 @@ export AbstractVarInfo, LogDensityFunction, OnlyAccsVarInfo, to_vector_params, + get_input_vector_type, # Leaf contexts AbstractContext, contextualize, diff --git a/src/logdensityfunction.jl b/src/logdensityfunction.jl index 5ff30f80f..41c8769f4 100644 --- a/src/logdensityfunction.jl +++ b/src/logdensityfunction.jl @@ -309,7 +309,17 @@ function _default_vnt(model::Model, transform_strategy::AbstractTransformStrateg return getacc(oavi, Val(VECTORVAL_ACCNAME)).values end -function _get_input_vector_type(::LogDensityFunction{M,A,L,G,R,P,X}) where {M,A,L,G,R,P,X} +""" + DynamicPPL.get_input_vector_type(::LogDensityFunction) + +Get the type of the vector `x` that should be passed to `LogDensityProblems.logdensity(ldf, +x)`. + +Note that if you pass a vector of a different type, it will be converted to the correct +type. This allows you however to determine upfront what kind of vector should be passed in. +It is also useful for determining e.g. whether Float32 or Float64 parameters are expected. +""" +function get_input_vector_type(::LogDensityFunction{M,A,L,G,R,P,X}) where {M,A,L,G,R,P,X} return X end From 99e9b14bd689d02501182bc4f6799a367b2dd8cf Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Mon, 23 Mar 2026 22:30:46 +0000 Subject: [PATCH 07/11] bump patch --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index ac9ad87df..c794203dd 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "DynamicPPL" uuid = "366bfd00-2699-11ea-058f-f148b4cae6d8" -version = "0.40.14" +version = "0.40.15" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" From 93010f86bcbb2219b0cf164042ee12da90359c7f Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Mon, 23 Mar 2026 22:32:25 +0000 Subject: [PATCH 08/11] fix typos --- src/accumulators/vector_params.jl | 2 +- src/logdensityfunction.jl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/accumulators/vector_params.jl b/src/accumulators/vector_params.jl index 0f7b9dd7d..2ab48816e 100644 --- a/src/accumulators/vector_params.jl +++ b/src/accumulators/vector_params.jl @@ -17,7 +17,7 @@ initialisation strategy and collect the vectorised parameters corresponding to t strategy. """ function VectorParamAccumulator(ldf::LogDensityFunction) - et = eltype(_get_input_vector_type(ldf)) + et = eltype(get_input_vector_type(ldf)) dim = ldf._dim vals = Vector{et}(undef, dim) set_indices = falses(dim) diff --git a/src/logdensityfunction.jl b/src/logdensityfunction.jl index 41c8769f4..b1ae82f2b 100644 --- a/src/logdensityfunction.jl +++ b/src/logdensityfunction.jl @@ -425,7 +425,7 @@ function LogDensityProblems.logdensity_and_gradient( ) # `params` has to be converted to the same vector type that was used for AD preparation, # otherwise the preparation will not be valid. - params = convert(_get_input_vector_type(ldf), params) + params = convert(get_input_vector_type(ldf), params) return if _use_closure(ldf.adtype) DI.value_and_gradient( LogDensityAt( @@ -596,7 +596,7 @@ that. """ function to_vector_params(vector_values::VarNamedTuple, ldf::LogDensityFunction) return to_vector_params_inner( - vector_values, ldf._varname_ranges, eltype(_get_input_vector_type(ldf)), ldf._dim + vector_values, ldf._varname_ranges, eltype(get_input_vector_type(ldf)), ldf._dim ) end From 5fd6cd6fb45dd79582aa5737d052f9c8c5425de7 Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Tue, 24 Mar 2026 00:23:26 +0000 Subject: [PATCH 09/11] Avoid breaking change in typed_identity --- HISTORY.md | 2 ++ src/DynamicPPL.jl | 3 ++- src/deprecated.jl | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index e2325d7e9..5b4c37b30 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -10,6 +10,8 @@ Previously, DynamicPPL would automatically choose a `Float64` log-probability, c The function `DynamicPPL.get_input_vector_type(::LogDensityFunction)` is now exported, in order to help with querying the type that log-probabilities are initialised with. +`DynamicPPL.typed_identity` is deprecated; please use `Bijectors.VectorBijectors.TypedIdentity()` instead (it does the same thing). + # 0.40.14 Fixed `check_model()` erroneously failing for models such as `x[1:2] .~ univariate_dist`. diff --git a/src/DynamicPPL.jl b/src/DynamicPPL.jl index 7c1b05fe0..7d26e073a 100644 --- a/src/DynamicPPL.jl +++ b/src/DynamicPPL.jl @@ -203,7 +203,8 @@ export AbstractVarInfo, check_model, set_logprob_type!, # Deprecated. - generated_quantities + generated_quantities, + typed_identity # Reexport using Distributions: loglikelihood diff --git a/src/deprecated.jl b/src/deprecated.jl index 0bcaae9b7..bc978faf1 100644 --- a/src/deprecated.jl +++ b/src/deprecated.jl @@ -1 +1,3 @@ @deprecate generated_quantities(model, params) returned(model, params) + +typed_identity = Bijectors.VectorBijectors.TypedIdentity() From 9f2975d2c43d2f7281af8625b605c57e137cb689 Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Tue, 24 Mar 2026 00:27:19 +0000 Subject: [PATCH 10/11] fix some more things --- src/accumulators/default.jl | 2 +- src/utils.jl | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/accumulators/default.jl b/src/accumulators/default.jl index b531ae54e..589823bf2 100644 --- a/src/accumulators/default.jl +++ b/src/accumulators/default.jl @@ -39,7 +39,7 @@ end # equality of hashes. Both of the below implementations are also different from the default # implementation for structs. function Base.:(==)(acc1::LogProbAccumulator, acc2::LogProbAccumulator) - return (accumulator_name(acc1) === accumulator_name(acc2)) & (logp(acc1) == logp(acc2)) + return accumulator_name(acc1) === accumulator_name(acc2) && logp(acc1) == logp(acc2) end function Base.isequal(acc1::LogProbAccumulator, acc2::LogProbAccumulator) diff --git a/src/utils.jl b/src/utils.jl index fe1a558dd..d218a9be9 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -22,6 +22,7 @@ Base.promote_rule(::Type{NoLogProb}, ::Type{T}) where {T<:Number} = T Base.iszero(::NoLogProb) = true Base.hash(::NoLogProb, h::UInt) = hash(0.0, h) Base.:(+)(::NoLogProb, ::NoLogProb) = NoLogProb() +Base.:(-)(::NoLogProb, ::NoLogProb) = NoLogProb() const FLOAT_TYPE_PREF_KEY = "floattype" From 613dc1db23effd4c5e958f8dd6b527a4543b670e Mon Sep 17 00:00:00 2001 From: Penelope Yong Date: Tue, 24 Mar 2026 00:28:47 +0000 Subject: [PATCH 11/11] remove localpreferences in CI --- test/floattypes/LocalPreferences.toml | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 test/floattypes/LocalPreferences.toml diff --git a/test/floattypes/LocalPreferences.toml b/test/floattypes/LocalPreferences.toml deleted file mode 100644 index 07bee33ee..000000000 --- a/test/floattypes/LocalPreferences.toml +++ /dev/null @@ -1,2 +0,0 @@ -[DynamicPPL] -floattype = "min"