|  | 
| 49 | 49 | - [`getlogprior`](@ref): calculate the log prior in the model space, ignoring | 
| 50 | 50 |   any effects of linking | 
| 51 | 51 | - [`getloglikelihood`](@ref): calculate the log likelihood (this is unaffected | 
| 52 |  | -  by linking, since transforms are only applied to random variables)  | 
|  | 52 | +  by linking, since transforms are only applied to random variables) | 
| 53 | 53 | 
 | 
| 54 | 54 | !!! note | 
| 55 | 55 |     By default, `LogDensityFunction` uses `getlogjoint_internal`, i.e., the | 
| @@ -146,7 +146,7 @@ struct LogDensityFunction{ | 
| 146 | 146 |             is_supported(adtype) || | 
| 147 | 147 |                 @warn "The AD backend $adtype is not officially supported by DynamicPPL. Gradient calculations may still work, but compatibility is not guaranteed." | 
| 148 | 148 |             # Get a set of dummy params to use for prep | 
| 149 |  | -            x = map(identity, varinfo[:]) | 
|  | 149 | +            x = [val for val in varinfo[:]] | 
| 150 | 150 |             if use_closure(adtype) | 
| 151 | 151 |                 prep = DI.prepare_gradient( | 
| 152 | 152 |                     LogDensityAt(model, getlogdensity, varinfo), adtype, x | 
| @@ -282,7 +282,7 @@ function LogDensityProblems.logdensity_and_gradient( | 
| 282 | 282 | ) where {M,F,V,AD<:ADTypes.AbstractADType} | 
| 283 | 283 |     f.prep === nothing && | 
| 284 | 284 |         error("Gradient preparation not available; this should not happen") | 
| 285 |  | -    x = map(identity, x)  # Concretise type | 
|  | 285 | +    x = [val for val in x]  # Concretise type | 
| 286 | 286 |     # Make branching statically inferrable, i.e. type-stable (even if the two | 
| 287 | 287 |     # branches happen to return different types) | 
| 288 | 288 |     return if use_closure(f.adtype) | 
|  | 
0 commit comments