From 7b3dd063c88119eee88afa12c34cb4ee8a982ccf Mon Sep 17 00:00:00 2001 From: dwijenchawra Date: Tue, 18 May 2021 13:08:12 -0700 Subject: [PATCH 01/14] fixed majority of issues, still 1 bug remaining --- src/CPDs/categorical_cpd.jl | 6 ++-- src/CPDs/conditional_linear_gaussian_cpd.jl | 8 ++--- src/DiscreteBayesNet/io.jl | 4 +-- src/DiscreteBayesNet/tables.jl | 34 ++++++++++----------- src/gibbs.jl | 2 +- src/sampling.jl | 6 ++-- test/test_cpds.jl | 18 +++++------ test/test_factors.jl | 2 +- test/test_tables.jl | 26 ++++++++-------- 9 files changed, 53 insertions(+), 53 deletions(-) diff --git a/src/CPDs/categorical_cpd.jl b/src/CPDs/categorical_cpd.jl index 7b53eec..c46d5a5 100644 --- a/src/CPDs/categorical_cpd.jl +++ b/src/CPDs/categorical_cpd.jl @@ -37,7 +37,7 @@ function (cpd::CategoricalCPD)(a::Assignment=Assignment()) if isempty(cpd.parents) return first(cpd.distributions) else - sub = [a[p] for p in cpd.parents] + sub = [a[potential] for potential in cpd.parents] shape = ntuple(i -> cpd.parental_ncategories[i], length(cpd.parental_ncategories)) ind = LinearIndices(shape)[sub...] @@ -84,7 +84,7 @@ function Distributions.fit(::Type{CategoricalCPD{D}}, # calc parent_instantiation_counts nparents = length(parents) - parental_ncategories = map!(p->infer_number_of_instantiations(data[!,p]), Array{Int}(undef, length(parents)), parents) + parental_ncategories = map!(potential->infer_number_of_instantiations(data[!,potential]), Array{Int}(undef, length(parents)), parents) dims = [1:parental_ncategories[i] for i in 1:nparents] distributions = Array{D}(undef, prod(parental_ncategories)) for (q, parent_instantiation) in enumerate(Iterators.product(dims...)) @@ -119,7 +119,7 @@ function Distributions.fit(::Type{DiscreteCPD}, data::DataFrame, target::NodeName, parents::NodeNames; - parental_ncategories::Vector{Int} = map!(p->infer_number_of_instantiations(data[!,p]), Array{Int}(undef, length(parents)), parents), + parental_ncategories::Vector{Int} = map!(potential->infer_number_of_instantiations(data[!,potential]), Array{Int}(undef, length(parents)), parents), target_ncategories::Int = infer_number_of_instantiations(data[!,target]), ) diff --git a/src/CPDs/conditional_linear_gaussian_cpd.jl b/src/CPDs/conditional_linear_gaussian_cpd.jl index c8af622..0d323b5 100644 --- a/src/CPDs/conditional_linear_gaussian_cpd.jl +++ b/src/CPDs/conditional_linear_gaussian_cpd.jl @@ -70,8 +70,8 @@ function Distributions.fit(::Type{ConditionalLinearGaussianCPD}, # --------------------- # identify discrete and continuous parents - parents_disc = filter(p->eltype(data[!,p]) <: Int, parents) - parents_cont = filter(p->eltype(data[!,p]) <: AbstractFloat, parents) + parents_disc = filter(potential->eltype(data[!,potential]) <: Int, parents) + parents_cont = filter(potential->eltype(data[!,potential]) <: AbstractFloat, parents) # --------------------- # pull discrete dataset @@ -85,8 +85,8 @@ function Distributions.fit(::Type{ConditionalLinearGaussianCPD}, parental_ncategories = Array{Int}(undef, nparents_disc) dims = Array{UnitRange{Int64}}(undef, nparents_disc) - for (i,p) in enumerate(parents_disc) - parental_ncategories[i] = infer_number_of_instantiations(data[!,p]) + for (i,potential) in enumerate(parents_disc) + parental_ncategories[i] = infer_number_of_instantiations(data[!,potential]) dims[i] = 1:parental_ncategories[i] end diff --git a/src/DiscreteBayesNet/io.jl b/src/DiscreteBayesNet/io.jl index 512062e..6c32b2f 100644 --- a/src/DiscreteBayesNet/io.jl +++ b/src/DiscreteBayesNet/io.jl @@ -157,8 +157,8 @@ function Base.write(io::IO, mime::MIME"text/plain", bn::DiscreteBayesNet) for name in arr_names cpd = get(bn, name) for D in cpd.distributions - for p in probs(D)[1:end-1] - str = @sprintf("%.16g", p) + for potential in probs(D)[1:end-1] + str = @sprintf("%.16g", potential) print(io, space ? " " : "" , str) space = true end diff --git a/src/DiscreteBayesNet/tables.jl b/src/DiscreteBayesNet/tables.jl index 8ffb1b2..cfe6a23 100644 --- a/src/DiscreteBayesNet/tables.jl +++ b/src/DiscreteBayesNet/tables.jl @@ -2,7 +2,7 @@ DataFrames are used to represent factors https://en.wikipedia.org/wiki/Factor_graph - :p is the column containing the probabilities, ::Float64 + :potential is the column containing the probabilities, ::Float64 Each variable has its own column corresponding to its assignments and named with its name These can be obtained using the table() function @@ -34,8 +34,8 @@ function Base.:*(t1::Table, t2::Table) f1 =t1.potential f2 =t2.potential - onnames = setdiff(intersect(propertynames(f1), propertynames(f2)), [:p]) - finalnames = vcat(setdiff(union(propertynames(f1), propertynames(f2)), [:p]), :p) + onnames = setdiff(intersect(propertynames(f1), propertynames(f2)), [:potential]) + finalnames = vcat(setdiff(union(propertynames(f1), propertynames(f2)), [:potential]), :potential) if isempty(onnames) j = join(f1, f2, kind=:cross, makeunique=true) @@ -43,7 +43,7 @@ function Base.:*(t1::Table, t2::Table) j = outerjoin(f1, f2, on=onnames, makeunique=true) end - j[!,:p] = broadcast(*, j[!,:p], j[!,:p_1]) + j[!,:potential] = broadcast(*, j[!,:potential], j[!,:potential_1]) return Table(j[!,finalnames]) end @@ -57,25 +57,25 @@ function sumout(t::Table, v::NodeNameUnion) f = t.potential # vcat works for single values and vectors alike (magic?) - remainingvars = setdiff(propertynames(f), vcat(v, :p)) + remainingvars = setdiff(propertynames(f), vcat(v, :potential)) if isempty(remainingvars) # they want to remove all variables except for prob column # uh ... 'singleton' table? - return Table(DataFrame(p = sum(f[!,:p]))) + return Table(DataFrame(potential = sum(f[!,:potential]))) else # note that this will fail miserably if f is too large (~1E4 maybe?) # nothing I can do; there is a github issue - return Table(combine(df -> DataFrame(p = sum(df[!,:p])), DataFrames.groupby(f, remainingvars))) + return Table(combine(df -> DataFrame(potential = sum(df[!,:potential])), DataFrames.groupby(f, remainingvars))) end end """ Table normalization -Ensures that the `:p` column sums to one +Ensures that the `:potential` column sums to one """ function LinearAlgebra.normalize!(t::Table) - t.potential[!,:p] ./= sum(t.potential[!,:p]) + t.potential[!,:potential] ./= sum(t.potential[!,:potential]) return t end @@ -103,7 +103,7 @@ end """ takes a list of observations of assignments represented as a DataFrame -or a set of data samples (without :p), +or a set of data samples (without :potential), takes the unique assignments, and estimates the associated probability of each assignment based on its frequency of occurrence. @@ -111,15 +111,15 @@ based on its frequency of occurrence. function Distributions.fit(::Type{Table}, f::DataFrame) w = ones(size(f, 1)) t = f - if hasproperty(f, :p) - t = f[:, propertynames(t) .!= :p] - w = f[!,:p] + if hasproperty(f, :potential) + t = f[:, propertynames(t) .!= :potential] + w = f[!,:potential] end # unique samples tu = unique(t) # add column with probabilities of unique samples - tu[!,:p] = Float64[sum(w[Bool[tu[j,:] == t[i,:] for i = 1:size(t,1)]]) for j = 1:size(tu,1)] - tu[!,:p] /= sum(tu[!,:p]) + tu[!,:potential] = Float64[sum(w[Bool[tu[j,:] == t[i,:] for i = 1:size(t,1)]]) for j = 1:size(tu,1)] + tu[!,:potential] /= sum(tu[!,:potential]) return Table(tu) end @@ -133,8 +133,8 @@ end # n = size(f, 1) # p = zeros(n) # w = ones(n) -# if hasproperty(f, :p) -# w = f[!,:p] +# if hasproperty(f, :potential) +# w = f[!,:potential] # end # # dfindex = find([hasproperty(a, n) for n in names(f)]) diff --git a/src/gibbs.jl b/src/gibbs.jl index 54e5e0f..946ae96 100644 --- a/src/gibbs.jl +++ b/src/gibbs.jl @@ -329,7 +329,7 @@ function gibbs_sample(bn::BayesNet, nsamples::Integer, burn_in::Integer; # for burn_in_initial_sample use get_weighted_dataframe, should be consistent with the varibale consistent_with if initial_sample == nothing rand_samples = get_weighted_dataframe(bn, 50, consistent_with) - if reduce(|, isnan.(convert(Array{AbstractFloat}, rand_samples[!,:p]))) + if reduce(|, isnan.(convert(Array{AbstractFloat}, rand_samples[!,:potential]))) error("Gibbs Sampler was unable to find an inital sample with non-zero probability, please supply an inital sample") end burn_in_initial_sample = sample_weighted_dataframe(rand_samples) diff --git a/src/sampling.jl b/src/sampling.jl index 08331b9..5d91f85 100644 --- a/src/sampling.jl +++ b/src/sampling.jl @@ -167,7 +167,7 @@ function get_weighted_dataframe(bn::BayesNet, nsamples::Integer, evidence::Assig end w[i] = weight end - t[:p] = w / sum(w) + t[:potential] = w / sum(w) t = DataFrame(t) @@ -180,7 +180,7 @@ get_weighted_dataframe(bn::BayesNet, nsamples::Integer, pair::Pair{NodeName}...) Chooses a sample at random from a weighted dataframe """ function sample_weighted_dataframe!(a::Assignment, weighted_dataframe::DataFrame) - p = weighted_dataframe[:, :p] + p = weighted_dataframe[:, :potential] n = length(p) i = 1 c = p[1] @@ -189,7 +189,7 @@ function sample_weighted_dataframe!(a::Assignment, weighted_dataframe::DataFrame c += p[i += 1] end for varname in propertynames(weighted_dataframe) - if varname != :p + if varname != :potential a[varname] = weighted_dataframe[i, varname] end end diff --git a/test/test_cpds.jl b/test/test_cpds.jl index c90cc7a..78c361a 100644 --- a/test/test_cpds.jl +++ b/test/test_cpds.jl @@ -56,9 +56,9 @@ let @test !parentless(cpd) @test nparams(cpd) == 6 - @test cpd(:a=>1).p == [0.5,0.5] - @test cpd(:a=>2).p == [1.0,0.0] - @test cpd(:a=>3).p == [0.0,1.0] + @test cpd(:a=>1).potential == [0.5,0.5] + @test cpd(:a=>2).potential == [1.0,0.0] + @test cpd(:a=>3).potential == [0.0,1.0] cpd = fit(DiscreteCPD, df, :b, [:a], parental_ncategories=[3], target_ncategories=5) @test nparams(cpd) == 15 @@ -73,10 +73,10 @@ let @test nparams(cpd) == 4 @test isa(cpd(Assignment(:a=>1, :b=>1)), disttype(cpd)) - @test cpd(Assignment(:a=>1, :b=>1)).p == 1.0 - @test cpd(Assignment(:a=>1, :b=>2)).p == 0.0 - @test cpd(Assignment(:a=>2, :b=>1)).p == 0.5 - @test cpd(Assignment(:a=>2, :b=>2)).p == 0.5 + @test cpd(Assignment(:a=>1, :b=>1)).potential == 1.0 + @test cpd(Assignment(:a=>1, :b=>2)).potential == 0.0 + @test cpd(Assignment(:a=>2, :b=>1)).potential == 0.5 + @test cpd(Assignment(:a=>2, :b=>2)).potential == 0.5 end end @@ -193,8 +193,8 @@ end let a = StaticCPD(:a, Bernoulli(0.5)) b = StaticCPD(:b, Bernoulli(0.6)) - p = [:a,:b] - c = ParentFunctionalCPD{Bernoulli}(:c, p, (seq,par)->begin + potential = [:a,:b] + c = ParentFunctionalCPD{Bernoulli}(:c, potential, (seq,par)->begin Bernoulli(mean(seq[k] for k in par)) end ) diff --git a/test/test_factors.jl b/test/test_factors.jl index 33f5702..e2343da 100644 --- a/test/test_factors.jl +++ b/test/test_factors.jl @@ -46,7 +46,7 @@ name = :N5 ϕ = Factor(bn, name) df = innerjoin(DataFrame(ϕ), table(bn, name).potential, on=names(ϕ)) -diff = abs.(df[!,:p] - df[!,:potential]) +diff = abs.(df[!,:potential] - df[!,:potential]) @test all(diff .< 1E-13) end diff --git a/test/test_tables.jl b/test/test_tables.jl index 98787d7..27485df 100644 --- a/test/test_tables.jl +++ b/test/test_tables.jl @@ -1,45 +1,45 @@ f1 = DataFrame( A = [false, true, false, true], B = [false, false, true, true], - p = [0.75, 0.60, 0.25, 0.40] + potential = [0.75, 0.60, 0.25, 0.40] ) |> Table f2 = DataFrame( A = [false, true], - p = [0.9, 0.1] + potential = [0.9, 0.1] ) |> Table let # factor multiplication f12 = f1 * f2 @test size(f12) == (4,3) - @test elementwise_isapprox(partialsort(f12, Assignment(:A=>false, :B=>false))[:p], [0.75*0.9]) - @test elementwise_isapprox(partialsort(f12, Assignment(:A=>true, :B=>false))[:p], [0.60*0.1]) - @test elementwise_isapprox(partialsort(f12, Assignment(:A=>false, :B=>true))[:p], [0.25*0.9]) - @test elementwise_isapprox(partialsort(f12, Assignment(:A=>true, :B=>true))[:p], [0.40*0.1]) + @test elementwise_isapprox(partialsort(f12, Assignment(:A=>false, :B=>false))[:potential], [0.75*0.9]) + @test elementwise_isapprox(partialsort(f12, Assignment(:A=>true, :B=>false))[:potential], [0.60*0.1]) + @test elementwise_isapprox(partialsort(f12, Assignment(:A=>false, :B=>true))[:potential], [0.25*0.9]) + @test elementwise_isapprox(partialsort(f12, Assignment(:A=>true, :B=>true))[:potential], [0.40*0.1]) end let # factor marginalization f1_sans_B = sumout(f1, :B) @test size(f1_sans_B) == (2,2) - @test elementwise_isapprox(partialsort(f1_sans_B, Assignment(:A=>false))[:p], [0.75 + 0.25]) - @test elementwise_isapprox(partialsort(f1_sans_B, Assignment(:A=>true))[:p], [0.60 + 0.40]) + @test elementwise_isapprox(partialsort(f1_sans_B, Assignment(:A=>false))[:potential], [0.75 + 0.25]) + @test elementwise_isapprox(partialsort(f1_sans_B, Assignment(:A=>true))[:potential], [0.60 + 0.40]) f1_sans_A = sumout(f1, :A) @test size(f1_sans_A) == (2,2) - @test elementwise_isapprox(partialsort(f1_sans_A, Assignment(:B=>false))[:p], [0.75 + 0.60]) - @test elementwise_isapprox(partialsort(f1_sans_A, Assignment(:B=>true))[:p], [0.25 + 0.40]) + @test elementwise_isapprox(partialsort(f1_sans_A, Assignment(:B=>false))[:potential], [0.75 + 0.60]) + @test elementwise_isapprox(partialsort(f1_sans_A, Assignment(:B=>true))[:potential], [0.25 + 0.40]) end let # factor normalization f3 = BayesNets.normalize!(Table(DataFrame( A = [false, true], - p = [1.0, 3.0] + potential = [1.0, 3.0] ))) - @test elementwise_isapprox(f3[:p], [0.25, 0.75]) + @test elementwise_isapprox(f3[:potential], [0.25, 0.75]) end let @@ -47,7 +47,7 @@ let table = fit(Table, DataFrame( A = [false, false, true, true, true] )) - @test elementwise_isapprox(table[:p], [2/5, 3/5]) + @test elementwise_isapprox(table[:potential], [2/5, 3/5]) # TODO: properly test this # estimate_convergence(df, Assignment(:A=>true)) From 3021635c8b0d0a0208a125e3c8f9a1139dccbef6 Mon Sep 17 00:00:00 2001 From: dwijenchawra Date: Thu, 17 Jun 2021 10:10:58 -0700 Subject: [PATCH 02/14] fixed some issues still have p to change --- docs/Project.toml | 8 -------- docs/make.jl | 3 ++- test/test_cpds.jl | 14 +++++++------- test/test_docs.jl | 4 +--- 4 files changed, 10 insertions(+), 19 deletions(-) delete mode 100644 docs/Project.toml diff --git a/docs/Project.toml b/docs/Project.toml deleted file mode 100644 index 15aa336..0000000 --- a/docs/Project.toml +++ /dev/null @@ -1,8 +0,0 @@ -[deps] -Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" - -[compat] -Documenter = "0.26" - -[extras] -TikzGraphs = "b4f28e30-c73f-5eaf-a395-8a9db949a742" \ No newline at end of file diff --git a/docs/make.jl b/docs/make.jl index 59a3c10..552d1ab 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -26,4 +26,5 @@ makedocs( deploydocs( repo = "github.com/sisl/BayesNets.jl.git", -) \ No newline at end of file +) +return true \ No newline at end of file diff --git a/test/test_cpds.jl b/test/test_cpds.jl index 78c361a..4996b5d 100644 --- a/test/test_cpds.jl +++ b/test/test_cpds.jl @@ -56,9 +56,9 @@ let @test !parentless(cpd) @test nparams(cpd) == 6 - @test cpd(:a=>1).potential == [0.5,0.5] - @test cpd(:a=>2).potential == [1.0,0.0] - @test cpd(:a=>3).potential == [0.0,1.0] + @test cpd(:a=>1).p == [0.5,0.5] + @test cpd(:a=>2).p == [1.0,0.0] + @test cpd(:a=>3).p == [0.0,1.0] cpd = fit(DiscreteCPD, df, :b, [:a], parental_ncategories=[3], target_ncategories=5) @test nparams(cpd) == 15 @@ -73,10 +73,10 @@ let @test nparams(cpd) == 4 @test isa(cpd(Assignment(:a=>1, :b=>1)), disttype(cpd)) - @test cpd(Assignment(:a=>1, :b=>1)).potential == 1.0 - @test cpd(Assignment(:a=>1, :b=>2)).potential == 0.0 - @test cpd(Assignment(:a=>2, :b=>1)).potential == 0.5 - @test cpd(Assignment(:a=>2, :b=>2)).potential == 0.5 + @test cpd(Assignment(:a=>1, :b=>1)).p == 1.0 + @test cpd(Assignment(:a=>1, :b=>2)).p == 0.0 + @test cpd(Assignment(:a=>2, :b=>1)).p == 0.5 + @test cpd(Assignment(:a=>2, :b=>2)).p == 0.5 end end diff --git a/test/test_docs.jl b/test/test_docs.jl index 368bb80..cec3b10 100644 --- a/test/test_docs.jl +++ b/test/test_docs.jl @@ -1,5 +1,3 @@ -using NBInclude - let - @nbinclude(joinpath(dirname(@__DIR__), "docs", "BayesNets.ipynb")) + @test include("../docs/make.jl") end From e01996e3031b215db463ea820ce9dc8293aeda1e Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Sat, 26 Jun 2021 20:03:15 -0700 Subject: [PATCH 03/14] fixed issue #67 and #51 --- test/runtests.jl | 6 +++--- test/test_discrete_bayes_nets.jl | 7 +++---- test/test_factors.jl | 8 +++++--- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 566df3b..5045bd5 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -72,7 +72,7 @@ end @testset "gen bn" begin include(joinpath(testdir, "test_genbn.jl")) end - @testset "docs" begin - include(joinpath(testdir, "test_docs.jl")) - end + # @testset "docs" begin + # include(joinpath(testdir, "test_docs.jl")) + # end end diff --git a/test/test_discrete_bayes_nets.jl b/test/test_discrete_bayes_nets.jl index 8ad5ce2..73afb7f 100644 --- a/test/test_discrete_bayes_nets.jl +++ b/test/test_discrete_bayes_nets.jl @@ -10,15 +10,15 @@ let push!(bn, DiscreteCPD(:b, [:a], [2], [Categorical([0.5,0.5]),Categorical([0.2,0.8])])) T = table(bn, :a) - @test T == DataFrame(a=[1,2], p=[0.4,0.6]) + @test T == DataFrame(a=[1,2], potential=[0.4,0.6]) T = table(bn, :b) - @test T == DataFrame(a=[1,2,1,2], b=[1,1,2,2], p=[0.5,0.2,0.5,0.8]) + @test T == DataFrame(a=[1,2,1,2], b=[1,1,2,2], potential=[0.5,0.2,0.5,0.8]) data = DataFrame(a=[1,1,1,1,2,2,2,2], b=[1,2,1,2,1,1,1,2]) T = count(bn, :a, data) - @test T == DataFrame(a=[1,2], count=[4,4]) + @test T == DataFrame(a=[1,2], count=[4,4]) T = count(bn, :b, data) @test nrow(T) == 4 @@ -131,4 +131,3 @@ let @test isapprox(pdf(get(bn, :a), :a=>1, :b=>1, :c=>2), 0.5, atol=1e-4) @test isapprox(pdf(get(bn, :a), :a=>1, :b=>2, :c=>2), 0.7, atol=1e-4) end - diff --git a/test/test_factors.jl b/test/test_factors.jl index e2343da..e0d72b3 100644 --- a/test/test_factors.jl +++ b/test/test_factors.jl @@ -45,8 +45,8 @@ bn = rand_discrete_bn(10, 4) name = :N5 ϕ = Factor(bn, name) -df = innerjoin(DataFrame(ϕ), table(bn, name).potential, on=names(ϕ)) -diff = abs.(df[!,:potential] - df[!,:potential]) +df = innerjoin(DataFrame(ϕ), table(bn, name).potential, on=names(ϕ), makeunique=true) +diff = abs.(df[!,:potential] - df[!,:potential_1]) @test all(diff .< 1E-13) end @@ -64,6 +64,9 @@ end ############################################################################### # normalize + +using LinearAlgebra + let ϕ = Factor([:a, :b], Float64[1 2; 3 4]) ϕ2 = LinearAlgebra.normalize(ϕ, p=1) @@ -203,4 +206,3 @@ let @test_throws DimensionMismatch ϕ1 * ϕ2 end - From 9763c60687b15b25eed8b5fe63d548dc2602c1c0 Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Sat, 26 Jun 2021 20:03:30 -0700 Subject: [PATCH 04/14] fixed issue #67 and #51 --- src/BayesNets.jl | 4 +++- src/DiscreteBayesNet/discrete_bayes_net.jl | 6 +++--- src/Factors/factors_dims.jl | 7 +++---- src/Factors/factors_main.jl | 9 +++++++-- src/Inference/exact.jl | 3 +-- src/ProbabilisticGraphicalModels/assignments.jl | 6 +++--- src/ProbabilisticGraphicalModels/inference.jl | 2 +- src/ProbabilisticGraphicalModels/nodenames.jl | 7 ++++++- src/bayes_nets.jl | 3 --- 9 files changed, 27 insertions(+), 20 deletions(-) diff --git a/src/BayesNets.jl b/src/BayesNets.jl index c7d6134..e7059e8 100644 --- a/src/BayesNets.jl +++ b/src/BayesNets.jl @@ -107,7 +107,9 @@ export adding_edge_preserves_acyclicity, bayesian_score_component, bayesian_score_components, - bayesian_score + bayesian_score, + + nodenames include("bayes_nets.jl") diff --git a/src/DiscreteBayesNet/discrete_bayes_net.jl b/src/DiscreteBayesNet/discrete_bayes_net.jl index b3e32a2..478b6fd 100644 --- a/src/DiscreteBayesNet/discrete_bayes_net.jl +++ b/src/DiscreteBayesNet/discrete_bayes_net.jl @@ -71,15 +71,15 @@ function table(bn::DiscreteBayesNet, name::NodeName) d[!,name] = 1:ncategories(cpd(assignment)) end - p = ones(size(d,1)) # the probability column + potential = ones(size(d,1)) # the probability column for i in 1:size(d,1) assignment = Assignment() for j in 1:length(varnames) assignment[varnames[j]] = d[i,j] end - p[i] = pdf(cpd, assignment) + potential[i] = pdf(cpd, assignment) end - d[!,:p] = p + d[!,:potential] = potential return Table(d) end diff --git a/src/Factors/factors_dims.jl b/src/Factors/factors_dims.jl index 485b017..faab0de 100644 --- a/src/Factors/factors_dims.jl +++ b/src/Factors/factors_dims.jl @@ -22,7 +22,7 @@ Normalize the factor so all instances of dims have (or the entire factors has) p-norm of 1 """ function LinearAlgebra.normalize!(ϕ::Factor, dims::NodeNameUnion; p::Int=1) - dims = unique(convert(NodeNames, dims)) + dims = unique(nodeconvert(NodeNames, dims)) _check_dims_valid(dims, ϕ) inds = indexin(dims, ϕ) @@ -69,7 +69,7 @@ Reduce dimensions `dims` in `ϕ` using function `op`. """ function reducedim(op, ϕ::Factor, dims::NodeNameUnion, v0=nothing) # a (possibly?) more efficient version than reducedim!(deepcopy(ϕ)) - dims = convert(NodeNames, dims) + dims = nodeconvert(NodeNames, dims) _check_dims_valid(dims, ϕ) # needs to be a tuple for squeeze @@ -85,7 +85,7 @@ function reducedim(op, ϕ::Factor, dims::NodeNameUnion, v0=nothing) end function reducedim!(op, ϕ::Factor, dims::NodeNameUnion, v0=nothing) - dims = convert(NodeNames, dims) + dims = nodeconvert(NodeNames, dims) _check_dims_valid(dims, ϕ) # needs to be a tuple for squeeze @@ -270,4 +270,3 @@ end /(ϕ1::Factor, ϕ2::Factor) = join(/, ϕ1, ϕ2) +(ϕ1::Factor, ϕ2::Factor) = join(+, ϕ1, ϕ2) -(ϕ1::Factor, ϕ2::Factor) = join(-, ϕ1, ϕ2) - diff --git a/src/Factors/factors_main.jl b/src/Factors/factors_main.jl index 808b7ea..b5be45a 100644 --- a/src/Factors/factors_main.jl +++ b/src/Factors/factors_main.jl @@ -5,6 +5,12 @@ # THE MOST BASIC ASSUMPTION IS THAT ALL VARIABLES ARE CATEGORICAL AND THEREFORE # Base.OneTo WORTHY. IF THAT IS VIOLATED, NOTHING WILL WORK +nodeconvert(::Type{NodeNames}, names::NodeNameUnion) = names + + +nodeconvert(::Type{NodeNames}, name::NodeName) = [name] + + """ Factor(dims, potential) @@ -16,7 +22,7 @@ mutable struct Factor # In most cases this will be a probability function Factor(dims::NodeNameUnion, potential::Array{Float64}) - dims = convert(NodeNames, dims) + dims = nodeconvert(NodeNames, dims) _ckeck_dims_unique(dims) (length(dims) != ndims(potential)) && @@ -192,4 +198,3 @@ function pattern(ϕ::Factor) hcat([repeat(collect(1:l), inner=i, outer=o) for (l, i, o) in zip(lens, inners, outers)]...) end - diff --git a/src/Inference/exact.jl b/src/Inference/exact.jl index 7800023..ad5b627 100644 --- a/src/Inference/exact.jl +++ b/src/Inference/exact.jl @@ -9,7 +9,7 @@ function infer(im::ExactInference, inf::InferenceState{BN}) where {BN<:DiscreteB nodes = names(bn) query = inf.query evidence = inf.evidence - hidden = setdiff(nodes, vcat(query, names(evidence))) + hidden = setdiff(nodes, vcat(query, keys(evidence))) factors = map(n -> Factor(bn, n, evidence), nodes) @@ -31,4 +31,3 @@ function infer(im::ExactInference, inf::InferenceState{BN}) where {BN<:DiscreteB end infer(inf::InferenceState{BN}) where {BN<:DiscreteBayesNet} = infer(ExactInference(), inf) infer(bn::BN, query::NodeNameUnion; evidence::Assignment=Assignment()) where {BN<:DiscreteBayesNet} = infer(ExactInference(), InferenceState(bn, query, evidence)) - diff --git a/src/ProbabilisticGraphicalModels/assignments.jl b/src/ProbabilisticGraphicalModels/assignments.jl index 60243da..dc26c8a 100644 --- a/src/ProbabilisticGraphicalModels/assignments.jl +++ b/src/ProbabilisticGraphicalModels/assignments.jl @@ -1,10 +1,10 @@ const Assignment = Dict{NodeName, Any} """ - names(a::Assignment) + nodenames(a::Assignment) Return a vector of NodeNames (aka symbols) for the assignment """ -Base.names(a::Assignment) = collect(keys(a)) +nodenames(a::Assignment) = collect(keys(a)) """ consistent(a::Assignment, b::Assignment) @@ -19,4 +19,4 @@ function consistent(a::Assignment, b::Assignment) end return true -end \ No newline at end of file +end diff --git a/src/ProbabilisticGraphicalModels/inference.jl b/src/ProbabilisticGraphicalModels/inference.jl index d6bed23..a857b2a 100644 --- a/src/ProbabilisticGraphicalModels/inference.jl +++ b/src/ProbabilisticGraphicalModels/inference.jl @@ -28,7 +28,7 @@ struct InferenceState{PGM<:ProbabilisticGraphicalModel} end end function InferenceState(pgm::PGM, query::NodeNameUnion, evidence::Assignment=Assignment()) where {PGM<:ProbabilisticGraphicalModel} - query = unique(convert(NodeNames, query)) + query = unique(nodeconvert(NodeNames, query)) return InferenceState{PGM}(pgm, query, evidence) end diff --git a/src/ProbabilisticGraphicalModels/nodenames.jl b/src/ProbabilisticGraphicalModels/nodenames.jl index e2a163e..a9e10bb 100644 --- a/src/ProbabilisticGraphicalModels/nodenames.jl +++ b/src/ProbabilisticGraphicalModels/nodenames.jl @@ -1,5 +1,10 @@ const NodeName = Symbol + + const NodeNames = AbstractVector{NodeName} const NodeNameUnion = Union{NodeName, NodeNames} -Base.convert(::Type{NodeNames}, name::NodeName) = [name] \ No newline at end of file +nodeconvert(::Type{NodeNames}, names::NodeNameUnion) = names + + +nodeconvert(::Type{NodeNames}, name::NodeName) = [name] diff --git a/src/bayes_nets.jl b/src/bayes_nets.jl index b5871c2..7b49e93 100644 --- a/src/bayes_nets.jl +++ b/src/bayes_nets.jl @@ -326,6 +326,3 @@ function CPDs.logpdf(bn::BayesNet, assignment::Assignment) end retval end - - - From c2e484747828f415bf12d6e6649c9dfd0837bcdd Mon Sep 17 00:00:00 2001 From: dwijenchawra Date: Sat, 26 Jun 2021 20:10:15 -0700 Subject: [PATCH 05/14] Update main.yml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7f5db2f..a43b7ac 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: with: version: '1.6' - name: Install LuaLatex - run: sudo apt-get install texlive-full && sudo apt-get install texlive-latex-extra && sudo mktexlsr && sudo updmap-sys + run: sudo apt-get install texlive-full && sudo apt-get install texlive-latex-extra && sudo mktexlsr && sudo updmap-sys --fix-missing - name: Install dependencies run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Build and deploy From de80a85be112e84c1605d28bb460e6ca8744fddd Mon Sep 17 00:00:00 2001 From: dwijenchawra Date: Sat, 26 Jun 2021 20:24:11 -0700 Subject: [PATCH 06/14] Update main.yml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a43b7ac..7f2912d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: with: version: '1.6' - name: Install LuaLatex - run: sudo apt-get install texlive-full && sudo apt-get install texlive-latex-extra && sudo mktexlsr && sudo updmap-sys --fix-missing + run: sudo apt-get update && sudo apt-get install texlive-full && sudo apt-get install texlive-latex-extra && sudo mktexlsr && sudo updmap-sys --fix-missing - name: Install dependencies run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Build and deploy From 586a4c648b9171d843dd9696b96f772df4240e2e Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Thu, 8 Jul 2021 20:50:27 -0700 Subject: [PATCH 07/14] resolved p issues --- src/CPDs/categorical_cpd.jl | 6 +++--- src/CPDs/conditional_linear_gaussian_cpd.jl | 8 ++++---- src/Factors/factors_main.jl | 1 - test/test_cpds.jl | 4 ++-- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/CPDs/categorical_cpd.jl b/src/CPDs/categorical_cpd.jl index c46d5a5..7b53eec 100644 --- a/src/CPDs/categorical_cpd.jl +++ b/src/CPDs/categorical_cpd.jl @@ -37,7 +37,7 @@ function (cpd::CategoricalCPD)(a::Assignment=Assignment()) if isempty(cpd.parents) return first(cpd.distributions) else - sub = [a[potential] for potential in cpd.parents] + sub = [a[p] for p in cpd.parents] shape = ntuple(i -> cpd.parental_ncategories[i], length(cpd.parental_ncategories)) ind = LinearIndices(shape)[sub...] @@ -84,7 +84,7 @@ function Distributions.fit(::Type{CategoricalCPD{D}}, # calc parent_instantiation_counts nparents = length(parents) - parental_ncategories = map!(potential->infer_number_of_instantiations(data[!,potential]), Array{Int}(undef, length(parents)), parents) + parental_ncategories = map!(p->infer_number_of_instantiations(data[!,p]), Array{Int}(undef, length(parents)), parents) dims = [1:parental_ncategories[i] for i in 1:nparents] distributions = Array{D}(undef, prod(parental_ncategories)) for (q, parent_instantiation) in enumerate(Iterators.product(dims...)) @@ -119,7 +119,7 @@ function Distributions.fit(::Type{DiscreteCPD}, data::DataFrame, target::NodeName, parents::NodeNames; - parental_ncategories::Vector{Int} = map!(potential->infer_number_of_instantiations(data[!,potential]), Array{Int}(undef, length(parents)), parents), + parental_ncategories::Vector{Int} = map!(p->infer_number_of_instantiations(data[!,p]), Array{Int}(undef, length(parents)), parents), target_ncategories::Int = infer_number_of_instantiations(data[!,target]), ) diff --git a/src/CPDs/conditional_linear_gaussian_cpd.jl b/src/CPDs/conditional_linear_gaussian_cpd.jl index 0d323b5..c8af622 100644 --- a/src/CPDs/conditional_linear_gaussian_cpd.jl +++ b/src/CPDs/conditional_linear_gaussian_cpd.jl @@ -70,8 +70,8 @@ function Distributions.fit(::Type{ConditionalLinearGaussianCPD}, # --------------------- # identify discrete and continuous parents - parents_disc = filter(potential->eltype(data[!,potential]) <: Int, parents) - parents_cont = filter(potential->eltype(data[!,potential]) <: AbstractFloat, parents) + parents_disc = filter(p->eltype(data[!,p]) <: Int, parents) + parents_cont = filter(p->eltype(data[!,p]) <: AbstractFloat, parents) # --------------------- # pull discrete dataset @@ -85,8 +85,8 @@ function Distributions.fit(::Type{ConditionalLinearGaussianCPD}, parental_ncategories = Array{Int}(undef, nparents_disc) dims = Array{UnitRange{Int64}}(undef, nparents_disc) - for (i,potential) in enumerate(parents_disc) - parental_ncategories[i] = infer_number_of_instantiations(data[!,potential]) + for (i,p) in enumerate(parents_disc) + parental_ncategories[i] = infer_number_of_instantiations(data[!,p]) dims[i] = 1:parental_ncategories[i] end diff --git a/src/Factors/factors_main.jl b/src/Factors/factors_main.jl index b5be45a..97bb1be 100644 --- a/src/Factors/factors_main.jl +++ b/src/Factors/factors_main.jl @@ -7,7 +7,6 @@ nodeconvert(::Type{NodeNames}, names::NodeNameUnion) = names - nodeconvert(::Type{NodeNames}, name::NodeName) = [name] diff --git a/test/test_cpds.jl b/test/test_cpds.jl index 4996b5d..c90cc7a 100644 --- a/test/test_cpds.jl +++ b/test/test_cpds.jl @@ -193,8 +193,8 @@ end let a = StaticCPD(:a, Bernoulli(0.5)) b = StaticCPD(:b, Bernoulli(0.6)) - potential = [:a,:b] - c = ParentFunctionalCPD{Bernoulli}(:c, potential, (seq,par)->begin + p = [:a,:b] + c = ParentFunctionalCPD{Bernoulli}(:c, p, (seq,par)->begin Bernoulli(mean(seq[k] for k in par)) end ) From 5fd63662a8e3aae727198c321d092638c46ad728 Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Thu, 8 Jul 2021 21:02:37 -0700 Subject: [PATCH 08/14] uncommented doctest --- test/runtests.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 5045bd5..566df3b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -72,7 +72,7 @@ end @testset "gen bn" begin include(joinpath(testdir, "test_genbn.jl")) end - # @testset "docs" begin - # include(joinpath(testdir, "test_docs.jl")) - # end + @testset "docs" begin + include(joinpath(testdir, "test_docs.jl")) + end end From 63b725ba6b7f9d19a16aba158dffe83e67f1b7ee Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Thu, 8 Jul 2021 21:11:48 -0700 Subject: [PATCH 09/14] fixed tagbot run times --- .github/workflows/TagBot.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml index d77d3a0..623860f 100644 --- a/.github/workflows/TagBot.yml +++ b/.github/workflows/TagBot.yml @@ -1,11 +1,15 @@ name: TagBot on: - schedule: - - cron: 0 * * * * + issue_comment: + types: + - created + workflow_dispatch: jobs: TagBot: + if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' runs-on: ubuntu-latest steps: - uses: JuliaRegistries/TagBot@v1 with: token: ${{ secrets.GITHUB_TOKEN }} + ssh: ${{ secrets.DOCUMENTER_KEY }} \ No newline at end of file From 834158a7aa4727f79da453e05ccdcc66473c3a34 Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Thu, 8 Jul 2021 21:12:05 -0700 Subject: [PATCH 10/14] moved --fix-missing --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7f2912d..aedc189 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ jobs: with: version: '1.6' - name: Install LuaLatex - run: sudo apt-get update && sudo apt-get install texlive-full && sudo apt-get install texlive-latex-extra && sudo mktexlsr && sudo updmap-sys --fix-missing + run: sudo apt-get update && sudo apt-get install texlive-full --fix-missing && sudo apt-get install texlive-latex-extra && sudo mktexlsr && sudo updmap-sys - name: Install dependencies run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Build and deploy From 120140e4e0923e49dc8bb462adc243aec7bed014 Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Wed, 18 Aug 2021 09:59:17 -0700 Subject: [PATCH 11/14] added examples for discrete CPDS --- docs/src/usage.md | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/docs/src/usage.md b/docs/src/usage.md index 578f11c..a8d10b9 100644 --- a/docs/src/usage.md +++ b/docs/src/usage.md @@ -187,7 +187,7 @@ rand(bn_gibbs, gsampler, 5) BayesNets.jl supports parameter learning for an entire graph. -```julia +```julia fit(BayesNet, data, (:a=>:b), [StaticCPD{Normal}, LinearGaussianCPD]) ``` @@ -223,7 +223,7 @@ Inference methods for discrete Bayesian networks can be used via the `infer` met bn = DiscreteBayesNet() push!(bn, DiscreteCPD(:a, [0.3,0.7])) push!(bn, DiscreteCPD(:b, [0.2,0.8])) -push!(bn, DiscreteCPD(:c, [:a, :b], [2,2], +push!(bn, DiscreteCPD(:c, [:a, :b], [2,2], [Categorical([0.1,0.9]), Categorical([0.2,0.8]), Categorical([1.0,0.0]), @@ -283,7 +283,7 @@ data[1:3,:] # only display a subset... Here we use the K2 structure learning algorithm which runs in polynomial time but requires that we specify a topological node ordering. ```@example bayesnet -parameters = K2GraphSearch([:Species, :SepalLength, :SepalWidth, :PetalLength, :PetalWidth], +parameters = K2GraphSearch([:Species, :SepalLength, :SepalWidth, :PetalLength, :PetalWidth], ConditionalLinearGaussianCPD, max_n_parents=2) bn = fit(BayesNet, data, parameters) @@ -300,7 +300,7 @@ Changing the ordering will change the structure. ```julia CLG = ConditionalLinearGaussianCPD -parameters = K2GraphSearch([:Species, :PetalLength, :PetalWidth, :SepalLength, :SepalWidth], +parameters = K2GraphSearch([:Species, :PetalLength, :PetalWidth, :SepalLength, :SepalWidth], [StaticCPD{Categorical}, CLG, CLG, CLG, CLG], max_n_parents=2) fit(BayesNet, data, parameters) @@ -311,7 +311,7 @@ A `ScoringFunction` allows for extracting a scoring metric for a CPD given data. A `GraphSearchStrategy` defines a structure learning algorithm. The K2 algorithm is defined through `K2GraphSearch` and `GreedyHillClimbing` is implemented for discrete Bayesian networks and the Bayesian score: ```@example bayesnet -data = DataFrame(c=[1,1,1,1,2,2,2,2,3,3,3,3], +data = DataFrame(c=[1,1,1,1,2,2,2,2,3,3,3,3], b=[1,1,1,2,2,2,2,1,1,2,1,1], a=[1,1,1,2,1,1,2,1,1,2,1,1]) parameters = GreedyHillClimbing(ScoreComponentCache(data), max_n_parents=3, prior=UniformPrior()) @@ -339,6 +339,14 @@ A whole suite of features are supported for DiscreteBayesNets. Here, we illustra We also detail obtaining a bayesian score for a network structure in the next section. ```julia +bn = DiscreteBayesNet() +push!(bn, DiscreteCPD(:hospital, [:a, :b], [2,2], + [Categorical([0.9,0.1]), + Categorical([0.2,0.8]), + Categorical([0.7,0.3]), + Categorical([0.01,0.99]), + ])) + count(bn, :a, data) # 1 statistics(bn.dag, data) # 2 table(bn, :b) # 3 @@ -363,12 +371,10 @@ TikzPictures.save(SVG("plot10"), plot) # hide The bayesian score for a discrete-valued BayesNet can can be calculated based only on the structure and data (the CPDs do not need to be defined beforehand). This is implemented with a method of ```bayesian_score``` that takes in a directed graph, the names of the nodes and data. ```@example bayesnet -data = DataFrame(c=[1,1,1,1,2,2,2,2,3,3,3,3], +data = DataFrame(c=[1,1,1,1,2,2,2,2,3,3,3,3], b=[1,1,1,2,2,2,2,1,1,2,1,1], a=[1,1,1,2,1,1,2,1,1,2,1,1]) g = DAG(3) add_edge!(g,1,2); add_edge!(g,2,3); add_edge!(g,1,3) bayesian_score(g, [:a,:b,:c], data) ``` - - From 3a6341e0f55d172409d0d4c22fa2d61184b7c7c6 Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Wed, 18 Aug 2021 09:59:27 -0700 Subject: [PATCH 12/14] deleted travis configs --- .travis.yml | 34 ---------------------------------- 1 file changed, 34 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5eb1272..0000000 --- a/.travis.yml +++ /dev/null @@ -1,34 +0,0 @@ -# Documentation: http://docs.travis-ci.com/user/languages/julia -language: julia -notifications: - email: false -julia: - - 1.0 - - 1.5 - - nightly -os: - - linux - -cache: - directories: - - ~/.julia/artifacts -jobs: - fast_finish: true - allow_failures: - - julia: nightly - include: - - stage: "Documentation" - julia: 1.5 - os: linux - script: - - julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); - Pkg.instantiate()' - - julia --project=docs/ docs/make.jl - after_success: skip -after_success: - - | - julia -e ' - using Pkg - Pkg.add("Coverage") - using Coverage - Coveralls.submit(process_folder())' \ No newline at end of file From 9e301a90a992db1937f3db8ac9b0ce99970d5a76 Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Thu, 19 Aug 2021 08:46:40 -0700 Subject: [PATCH 13/14] testing new docs --- docs/src/usage.md | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/docs/src/usage.md b/docs/src/usage.md index a8d10b9..99726c3 100644 --- a/docs/src/usage.md +++ b/docs/src/usage.md @@ -325,7 +325,7 @@ TikzPictures.save(SVG("plot9"), plot) # hide We can specify the number of categories for each variable in case it cannot be correctly inferred: -```julia +```@example bayesnet bn = fit(DiscreteBayesNet, data, parameters, ncategories=[3,3,2]) ``` @@ -338,19 +338,17 @@ A whole suite of features are supported for DiscreteBayesNets. Here, we illustra We also detail obtaining a bayesian score for a network structure in the next section. -```julia -bn = DiscreteBayesNet() -push!(bn, DiscreteCPD(:hospital, [:a, :b], [2,2], - [Categorical([0.9,0.1]), - Categorical([0.2,0.8]), - Categorical([0.7,0.3]), - Categorical([0.01,0.99]), - ])) - -count(bn, :a, data) # 1 -statistics(bn.dag, data) # 2 -table(bn, :b) # 3 -table(bn, :c, :a=>1) # 4 +```@example bayesnet +count(bn, :a, data) +``` +```@example bayesnet +statistics(bn.dag, data) +``` +```@example bayesnet +table(bn, :b) +``` +```@example bayesnet +table(bn, :c, :a=>1) ``` ## Reading from XDSL From 6bd6e88af7e15e17a8138b9c68ed69708752de12 Mon Sep 17 00:00:00 2001 From: Dwijen Chawra Date: Thu, 19 Aug 2021 18:55:03 -0700 Subject: [PATCH 14/14] fixed repo name for testing --- docs/make.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index 552d1ab..b9aa845 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -25,6 +25,6 @@ makedocs( ) deploydocs( - repo = "github.com/sisl/BayesNets.jl.git", + repo = "github.com/dwijenchawra/BayesNets.jl.git", ) -return true \ No newline at end of file +return true