diff --git a/src/univariate/continuous/exponential.jl b/src/univariate/continuous/exponential.jl index 573a469b0..96737c94a 100644 --- a/src/univariate/continuous/exponential.jl +++ b/src/univariate/continuous/exponential.jl @@ -107,6 +107,11 @@ cf(d::Exponential, t::Real) = 1/(1 - t * im * scale(d)) #### Sampling rand(rng::AbstractRNG, d::Exponential{T}) where {T} = xval(d, randexp(rng, float(T))) +function rand!(rng::AbstractRNG, d::Exponential, A::AbstractArray{<:Real}) + randexp!(rng, A) + map!(Base.Fix1(xval, d), A, A) + return A +end #### Fit model diff --git a/src/univariate/continuous/logitnormal.jl b/src/univariate/continuous/logitnormal.jl index 7fd5fb911..cf0832d00 100644 --- a/src/univariate/continuous/logitnormal.jl +++ b/src/univariate/continuous/logitnormal.jl @@ -157,7 +157,14 @@ end #### Sampling -rand(rng::AbstractRNG, d::LogitNormal) = logistic(randn(rng) * d.σ + d.μ) +xval(d::LogitNormal, z::Real) = logistic(muladd(d.σ, z, d.μ)) + +rand(rng::AbstractRNG, d::LogitNormal) = xval(d, randn(rng)) +function rand!(rng::AbstractRNG, d::LogitNormal, A::AbstractArray{<:Real}) + randn!(rng, A) + map!(Base.Fix1(xval, d), A, A) + return A +end ## Fitting diff --git a/src/univariate/continuous/lognormal.jl b/src/univariate/continuous/lognormal.jl index 666b14248..d9ada052f 100644 --- a/src/univariate/continuous/lognormal.jl +++ b/src/univariate/continuous/lognormal.jl @@ -156,7 +156,14 @@ end #### Sampling -rand(rng::AbstractRNG, d::LogNormal) = exp(randn(rng) * d.σ + d.μ) +xval(d::LogNormal, z::Real) = exp(muladd(d.σ, z, d.μ)) + +rand(rng::AbstractRNG, d::LogNormal) = xval(d, randn(rng)) +function rand!(rng::AbstractRNG, d::LogNormal, A::AbstractArray{<:Real}) + randn!(rng, A) + map!(Base.Fix1(xval, d), A, A) + return A +end ## Fitting diff --git a/src/univariate/continuous/normal.jl b/src/univariate/continuous/normal.jl index 423152267..ef2b77bb5 100644 --- a/src/univariate/continuous/normal.jl +++ b/src/univariate/continuous/normal.jl @@ -114,9 +114,14 @@ Base.:*(c::Real, d::Normal) = Normal(c * d.μ, abs(c) * d.σ) #### Sampling -rand(rng::AbstractRNG, d::Normal{T}) where {T} = d.μ + d.σ * randn(rng, float(T)) +xval(d::Normal, z::Real) = muladd(d.σ, z, d.μ) -rand!(rng::AbstractRNG, d::Normal, A::AbstractArray{<:Real}) = A .= muladd.(d.σ, randn!(rng, A), d.μ) +rand(rng::AbstractRNG, d::Normal{T}) where {T} = xval(d, randn(rng, float(T))) +function rand!(rng::AbstractRNG, d::Normal, A::AbstractArray{<:Real}) + randn!(rng, A) + map!(Base.Fix1(xval, d), A, A) + return A +end #### Fitting diff --git a/src/univariate/continuous/normalcanon.jl b/src/univariate/continuous/normalcanon.jl index de29c4b19..89d45a4fa 100644 --- a/src/univariate/continuous/normalcanon.jl +++ b/src/univariate/continuous/normalcanon.jl @@ -87,7 +87,13 @@ invlogccdf(d::NormalCanon, lp::Real) = xval(d, norminvlogccdf(lp)) #### Sampling -rand(rng::AbstractRNG, cf::NormalCanon) = cf.μ + randn(rng) / sqrt(cf.λ) +rand(rng::AbstractRNG, cf::NormalCanon) = xval(cf, randn(rng)) + +function rand!(rng::AbstractRNG, cf::NormalCanon, A::AbstractArray{<:Real}) + randn!(rng, A) + map!(Base.Fix1(xval, cf), A, A) + return A +end #### Affine transformations diff --git a/src/univariate/continuous/pareto.jl b/src/univariate/continuous/pareto.jl index 10bb246c5..249fa29eb 100644 --- a/src/univariate/continuous/pareto.jl +++ b/src/univariate/continuous/pareto.jl @@ -110,7 +110,14 @@ quantile(d::Pareto, p::Real) = cquantile(d, 1 - p) #### Sampling -rand(rng::AbstractRNG, d::Pareto) = d.θ * exp(randexp(rng) / d.α) +xval(d::Pareto, z::Real) = d.θ * exp(z / d.α) + +rand(rng::AbstractRNG, d::Pareto) = xval(d, randexp(rng)) +function rand!(rng::AbstractRNG, d::Pareto, A::AbstractArray{<:Real}) + randexp!(rng, A) + map!(Base.Fix1(xval, d), A, A) + return A +end ## Fitting diff --git a/src/univariate/continuous/pgeneralizedgaussian.jl b/src/univariate/continuous/pgeneralizedgaussian.jl index 9d41d9d46..24eb9a969 100644 --- a/src/univariate/continuous/pgeneralizedgaussian.jl +++ b/src/univariate/continuous/pgeneralizedgaussian.jl @@ -141,7 +141,7 @@ function rand(rng::AbstractRNG, d::PGeneralizedGaussian) inv_p = inv(d.p) g = Gamma(inv_p, 1) z = d.α * rand(rng, g)^inv_p - if rand(rng) < 0.5 + if rand(rng, Bool) return d.μ - z else return d.μ + z diff --git a/test/fit.jl b/test/fit.jl index 4483dd1ec..143f0b6ee 100644 --- a/test/fit.jl +++ b/test/fit.jl @@ -369,7 +369,7 @@ end for func in funcs, dist in (Laplace, Laplace{Float64}) d = fit(dist, func[2](dist(5.0, 3.0), N + 1)) @test isa(d, dist) - @test isapprox(location(d), 5.0, atol=0.02) + @test isapprox(location(d), 5.0, atol=0.03) @test isapprox(scale(d) , 3.0, atol=0.03) end end diff --git a/test/multivariate/mvlognormal.jl b/test/multivariate/mvlognormal.jl index 7ef76bef2..af887f40d 100644 --- a/test/multivariate/mvlognormal.jl +++ b/test/multivariate/mvlognormal.jl @@ -105,8 +105,8 @@ end @test entropy(l1) ≈ entropy(l2) @test logpdf(l1,5.0) ≈ logpdf(l2,[5.0]) @test pdf(l1,5.0) ≈ pdf(l2,[5.0]) - @test (Random.seed!(78393) ; [rand(l1)]) == (Random.seed!(78393) ; rand(l2)) - @test [rand(MersenneTwister(78393), l1)] == rand(MersenneTwister(78393), l2) + @test (Random.seed!(78393) ; [rand(l1)]) ≈ (Random.seed!(78393) ; rand(l2)) + @test [rand(MersenneTwister(78393), l1)] ≈ rand(MersenneTwister(78393), l2) end ###### General Testing diff --git a/test/runtests.jl b/test/runtests.jl index c7d73f454..cd3afe17f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -22,6 +22,7 @@ const tests = [ "truncated/discrete_uniform", "censored", "univariate/continuous/normal", + "univariate/continuous/normalcanon", "univariate/continuous/laplace", "univariate/continuous/cauchy", "univariate/continuous/uniform", @@ -83,6 +84,7 @@ const tests = [ "univariate/continuous/noncentralchisq", "univariate/continuous/weibull", "pdfnorm", + "univariate/continuous/pareto", "univariate/continuous/rician", "functionals", "density_interface", @@ -143,9 +145,7 @@ const tests = [ # "univariate/continuous/levy", # "univariate/continuous/noncentralbeta", # "univariate/continuous/noncentralf", - # "univariate/continuous/normalcanon", # "univariate/continuous/normalinversegaussian", - # "univariate/continuous/pareto", # "univariate/continuous/rayleigh", # "univariate/continuous/studentizedrange", # "univariate/continuous/symtriangular", diff --git a/test/testutils.jl b/test/testutils.jl index 2859856a7..8acd37853 100644 --- a/test/testutils.jl +++ b/test/testutils.jl @@ -28,7 +28,7 @@ end # testing the implementation of a discrete univariate distribution # function test_distr(distr::DiscreteUnivariateDistribution, n::Int; - testquan::Bool=true) + testquan::Bool=true, rng::AbstractRNG = Random.default_rng()) test_range(distr) vs = get_evalsamples(distr, 0.00001) @@ -40,7 +40,7 @@ function test_distr(distr::DiscreteUnivariateDistribution, n::Int; test_stats(distr, vs) test_samples(distr, n) - test_samples(distr, n, rng=MersenneTwister()) + test_samples(distr, n; rng=rng) test_params(distr) end @@ -150,23 +150,43 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable samples = rand(s, n) Random.seed!(1234) samples2 = rand(s, n) + Random.seed!(1234) + samples3 = [rand(s) for _ in 1:n] + Random.seed!(1234) + samples4 = [rand(s) for _ in 1:n] else - rng2 = deepcopy(rng) + # RNGs have to be copied with `copy`, not `deepcopy` + # Ref https://github.com/JuliaLang/julia/issues/42899 + rng2 = copy(rng) + rng3 = copy(rng) + rng4 = copy(rng) samples = rand(rng, s, n) samples2 = rand(rng2, s, n) + samples3 = [rand(rng3, s) for _ in 1:n] + samples4 = [rand(rng4, s) for _ in 1:n] end @test length(samples) == n @test samples2 == samples + @test samples3 == samples4 # scan samples and get counts cnts = zeros(Int, m) + cnts_sc = zeros(Int, m) for i = 1:n @inbounds si = samples[i] if rmin <= si <= rmax cnts[si - rmin + 1] += 1 else vmin <= si <= vmax || - error("Sample value out of valid range.") + throw(DomainError(si, "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].")) + end + + @inbounds si_sc = samples3[i] + if rmin <= si_sc <= rmax + cnts_sc[si_sc - rmin + 1] += 1 + else + vmin <= si_sc <= vmax || + throw(DomainError(si, "sample generated by `[rand(s) for _ in 1:n]` is out of valid range [$vmin, $vmax].")) end end @@ -174,7 +194,11 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable for i = 1:m verbose && println("v = $(rmin+i-1) ==> ($(clb[i]), $(cub[i])): $(cnts[i])") clb[i] <= cnts[i] <= cub[i] || - error("The counts are out of the confidence interval.") + error("The counts of samples generated by `rand(s, n)` are out of the confidence interval.") + + verbose && println("v = $(rmin+i-1) ==> ($(clb[i]), $(cub[i])): $(cnts_sc[i])") + clb[i] <= cnts_sc[i] <= cub[i] || + error("The counts of samples generated by `[rand(s) for _ in 1:n]` are out of the confidence interval.") end return samples end @@ -250,13 +274,24 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable samples = rand(s, n) Random.seed!(1234) samples2 = rand(s, n) + Random.seed!(1234) + samples3 = [rand(s) for _ in 1:n] + Random.seed!(1234) + samples4 = [rand(s) for _ in 1:n] else - rng2 = deepcopy(rng) + # RNGs have to be copied with `copy`, not `deepcopy` + # Ref https://github.com/JuliaLang/julia/issues/42899 + rng2 = copy(rng) + rng3 = copy(rng) + rng4 = copy(rng) samples = rand(rng, s, n) samples2 = rand(rng2, s, n) + samples3 = [rand(rng3, s) for _ in 1:n] + samples4 = [rand(rng4, s) for _ in 1:n] end @test length(samples) == n @test samples2 == samples + @test samples3 == samples4 if isa(distr, StudentizedRange) samples[isnan.(samples)] .= 0.0 # Underlying implementation in Rmath can't handle very low values. @@ -266,20 +301,29 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable for i = 1:n @inbounds si = samples[i] vmin <= si <= vmax || - error("Sample value out of valid range.") + throw(DomainError(si, "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].")) + @inbounds si_sc = samples3[i] + vmin <= si_sc <= vmax || + throw(DomainError(si, "sample generated by `[rand(s) for _ in 1:n]` is out of valid range [$vmin, $vmax].")) end # get counts cnts = fit(Histogram, samples, edges; closed=:right).weights @assert length(cnts) == nbins + cnts_sc = fit(Histogram, samples3, edges; closed=:right).weights + @assert length(cnts_sc) == nbins + # check the counts for i = 1:nbins if verbose @printf("[%.4f, %.4f) ==> (%d, %d): %d\n", edges[i], edges[i+1], clb[i], cub[i], cnts[i]) + @printf("[%.4f, %.4f) ==> (%d, %d): %d\n", edges[i], edges[i+1], clb[i], cub[i], cnts_sc[i]) end clb[i] <= cnts[i] <= cub[i] || - error("The counts are out of the confidence interval.") + error("The counts of samples generated by `rand(s, n)` are out of the confidence interval.") + clb[i] <= cnts_sc[i] <= cub[i] || + error("The counts of samples generated by `[rand(s) for _ in 1:n]` are out of the confidence interval.") end return samples end @@ -583,6 +627,7 @@ end allow_test_stats(d::UnivariateDistribution) = true allow_test_stats(d::NoncentralBeta) = false allow_test_stats(::StudentizedRange) = false +allow_test_stats(::LogitNormal) = false # `mean` is not defined since it has no analytical solution function test_stats(d::ContinuousUnivariateDistribution, xs::AbstractVector{Float64}) # using Monte Carlo methods diff --git a/test/univariate/continuous/exponential.jl b/test/univariate/continuous/exponential.jl index 191528fd7..9c5f29aa9 100644 --- a/test/univariate/continuous/exponential.jl +++ b/test/univariate/continuous/exponential.jl @@ -1,4 +1,3 @@ - @testset "Exponential" begin test_cgf(Exponential(1), (0.9, -1, -100f0, -1e6)) test_cgf(Exponential(0.91), (0.9, -1, -100f0, -1e6)) @@ -8,3 +7,18 @@ @test @inferred(rand(Exponential(T(1)))) isa T end end + +test_cgf(Exponential(1), (0.9, -1, -100f0, -1e6)) +test_cgf(Exponential(0.91), (0.9, -1, -100f0, -1e6)) +test_cgf(Exponential(10), (0.08, -1, -100f0, -1e6)) + +# Sampling Tests +@testset "Exponential sampling tests" begin + for d in [ + Exponential(1), + Exponential(0.91), + Exponential(10) + ] + test_distr(d, 10^6) + end +end diff --git a/test/univariate/continuous/logitnormal.jl b/test/univariate/continuous/logitnormal.jl index 454376606..b8d72290e 100644 --- a/test/univariate/continuous/logitnormal.jl +++ b/test/univariate/continuous/logitnormal.jl @@ -64,3 +64,12 @@ end @test convert(LogitNormal{Float32}, d) === d @test typeof(convert(LogitNormal{Float64}, d)) == typeof(LogitNormal(2,1)) end + +@testset "Logitnormal Sampling Tests" begin + for d in [ + LogitNormal(-2, 3), + LogitNormal(0, 0.2) + ] + test_distr(d, 10^6) + end +end diff --git a/test/univariate/continuous/lognormal.jl b/test/univariate/continuous/lognormal.jl index 4a27f2e8a..1cc44b433 100644 --- a/test/univariate/continuous/lognormal.jl +++ b/test/univariate/continuous/lognormal.jl @@ -314,3 +314,17 @@ end @test @inferred(gradlogpdf(LogNormal(0.0, 1.0), BigFloat(-1))) == big(0.0) @test isnan_type(BigFloat, @inferred(gradlogpdf(LogNormal(0.0, 1.0), BigFloat(NaN)))) end + +@testset "LogNormal Sampling Tests" begin + for d in [ + LogNormal() + LogNormal(1.0) + LogNormal(0.0, 2.0) + LogNormal(1.0, 2.0) + LogNormal(3.0, 0.5) + LogNormal(3.0, 1.0) + LogNormal(3.0, 2.0) + ] + test_distr(d, 10^6) + end +end diff --git a/test/univariate/continuous/normalcanon.jl b/test/univariate/continuous/normalcanon.jl new file mode 100644 index 000000000..9ae52e15f --- /dev/null +++ b/test/univariate/continuous/normalcanon.jl @@ -0,0 +1,10 @@ +# Sampling Tests +@testset "NormalCanon sampling tests" begin + for d in [ + NormalCanon() + NormalCanon(-1.0, 2.5) + NormalCanon(2.0, 0.8) + ] + test_distr(d, 10^6) + end +end diff --git a/test/univariate/continuous/pareto.jl b/test/univariate/continuous/pareto.jl new file mode 100644 index 000000000..195e34a96 --- /dev/null +++ b/test/univariate/continuous/pareto.jl @@ -0,0 +1,10 @@ +@testset "Pareto Sampling Tests" begin + for d in [ + Pareto() + Pareto(2.0) + Pareto(2.0, 1.5) + Pareto(3.0, 2.0) + ] + test_distr(d, 10^6) + end +end