Skip to content

Commit 23642bc

Browse files
authored
Add Support for Julia v1.0 (#24)
* add julia v0.8, v1.0 and v1.1 to travis * julia v1.0 passing * compat for julia v0.6
1 parent 9c30794 commit 23642bc

File tree

8 files changed

+150
-108
lines changed

8 files changed

+150
-108
lines changed

.travis.yml

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ os:
44
- osx
55
julia:
66
- 0.6
7-
- nightly
7+
- 0.7
8+
- 1.0
9+
- 1.1
810
matrix:
911
allow_failures:
1012
- julia: nightly
@@ -14,19 +16,8 @@ cache:
1416
addons:
1517
apt_packages:
1618
- gfortran
17-
1819
before_install:
19-
# don't keep an old version of the code in the cache
20-
- julia -e 'if "GraphicalModelLearning" in keys(Pkg.installed()) Pkg.rm("GraphicalModelLearning"); Pkg.rm("GraphicalModelLearning") end'
21-
- julia -e 'Pkg.update()' #make sure we get the latest version of METADATA
22-
- julia -e 'if !("Coverage" in keys(Pkg.installed())) Pkg.add("Coverage") end'
23-
- julia -e 'if !("Documenter" in keys(Pkg.installed())) Pkg.add("Documenter") end'
24-
25-
script:
26-
- if [[ -a .git/shallow ]]; then git fetch --unshallow; fi
27-
- julia -e 'Pkg.clone(pwd())'
28-
- julia -e 'Pkg.test("GraphicalModelLearning", coverage=true)'
29-
20+
- julia -e '(VERSION >= v"0.7" && using Pkg); Pkg.rm("GraphicalModelLearning"); Pkg.rm("GraphicalModelLearning")'
21+
- julia -e '(VERSION >= v"0.7" && using Pkg); Pkg.update()'
3022
after_success:
31-
- julia -e 'using Coverage; cd(Pkg.dir("GraphicalModelLearning")); LCOV.writefile("lcov.info", process_folder(".")); run(pipeline(`curl -s https://codecov.io/bash`, `bash`))'
32-
- julia -e 'cd(Pkg.dir("GraphicalModelLearning")); include(joinpath("docs", "make.jl"))'
23+
- julia -e '(VERSION >= v"0.7" && using Pkg); Pkg.add("Coverage"); cd(Pkg.dir("GraphicalModelLearning")); using Coverage; Codecov.submit(process_folder())'

REQUIRE

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
julia 0.6
22

33
JuMP 0.17 0.19-
4+
MathProgBase
45
Ipopt
56
StatsBase
67

7-
Compat 0.17
8+
Compat 1.0

src/GraphicalModelLearning.jl

Lines changed: 51 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
isdefined(Base, :__precompile__) && __precompile__()
2-
31
module GraphicalModelLearning
42

53
export learn, inverse_ising
@@ -11,73 +9,103 @@ using JuMP
119
using MathProgBase # for solver type
1210
using Ipopt
1311

14-
using Compat # used for julia v0.5 abstract types
12+
import Compat.LinearAlgebra
13+
import Compat.LinearAlgebra: diag
14+
import Compat.Statistics: mean
15+
16+
import Compat.Nothing
17+
import Compat.undef
18+
import Compat.@info
19+
20+
if VERSION < v"0.7.0-"
21+
function Base.digits(value; base=0, pad=0)
22+
if base != 0 && pad != 0
23+
return digits(value, base, pad)
24+
elseif base != 0
25+
return digits(value, base)
26+
else
27+
return digits(value)
28+
end
29+
end
30+
31+
function Base.digits!(array, value; base=0)
32+
if base != 0
33+
digits!(array, value, base)
34+
else
35+
digits!(array, value)
36+
end
37+
end
38+
end
1539

1640
include("models.jl")
1741

1842
include("sampling.jl")
1943

20-
@compat abstract type GMLFormulation end
44+
abstract type GMLFormulation end
2145

22-
type multiRISE <: GMLFormulation
46+
mutable struct multiRISE <: GMLFormulation
2347
regularizer::Real
2448
symmetrization::Bool
2549
interaction_order::Integer
2650
end
2751
# default values
2852
multiRISE() = multiRISE(0.4, true, 2)
2953

30-
type RISE <: GMLFormulation
54+
mutable struct RISE <: GMLFormulation
3155
regularizer::Real
3256
symmetrization::Bool
3357
end
3458
# default values
3559
RISE() = RISE(0.4, true)
3660

37-
type RISEA <: GMLFormulation
61+
mutable struct RISEA <: GMLFormulation
3862
regularizer::Real
3963
symmetrization::Bool
4064
end
4165
# default values
4266
RISEA() = RISEA(0.4, true)
4367

44-
type logRISE <: GMLFormulation
68+
mutable struct logRISE <: GMLFormulation
4569
regularizer::Real
4670
symmetrization::Bool
4771
end
4872
# default values
4973
logRISE() = logRISE(0.8, true)
5074

51-
type RPLE <: GMLFormulation
75+
mutable struct RPLE <: GMLFormulation
5276
regularizer::Real
5377
symmetrization::Bool
5478
end
5579
# default values
5680
RPLE() = RPLE(0.2, true)
5781

5882

59-
@compat abstract type GMLMethod end
83+
abstract type GMLMethod end
6084

61-
type NLP <: GMLMethod
85+
mutable struct NLP <: GMLMethod
6286
solver::MathProgBase.AbstractMathProgSolver
6387
end
6488
# default values
6589
NLP() = NLP(IpoptSolver(print_level=0))
6690

6791

6892
# default settings
69-
learn{T <: Real}(samples::Array{T,2}) = learn(samples, RISE(), NLP())
70-
learn{T <: Real, S <: GMLFormulation}(samples::Array{T,2}, formulation::S) = learn(samples, formulation, NLP())
93+
learn(samples::Array{T,2}) where T <: Real = learn(samples, RISE(), NLP())
94+
learn(samples::Array{T,2}, formulation::S) where {T <: Real, S <: GMLFormulation} = learn(samples, formulation, NLP())
7195

96+
if VERSION >= v"0.7.0-"
97+
#TODO add better support for Adjoints
98+
learn(samples::LinearAlgebra.Adjoint, args...) = learn(copy(samples), args...)
99+
end
72100

73-
function data_info{T <: Real}(samples::Array{T,2})
101+
function data_info(samples::Array{T,2}) where T <: Real
74102
(num_conf, num_row) = size(samples)
75103
num_spins = num_row - 1
76104
num_samples = sum(samples[1:num_conf,1])
77105
return num_conf, num_spins, num_samples
78106
end
79107

80-
function learn{T <: Real}(samples::Array{T,2}, formulation::multiRISE, method::NLP)
108+
function learn(samples::Array{T,2}, formulation::multiRISE, method::NLP) where T <: Real
81109
num_conf, num_spins, num_samples = data_info(samples)
82110

83111
lambda = formulation.regularizer*sqrt(log((num_spins^2)/0.05)/num_samples)
@@ -149,12 +177,12 @@ function learn{T <: Real}(samples::Array{T,2}, formulation::multiRISE, method::N
149177
return FactorGraph(inter_order, num_spins, :spin, reconstruction)
150178
end
151179

152-
function learn{T <: Real}(samples::Array{T,2}, formulation::RISE, method::NLP)
180+
function learn(samples::Array{T,2}, formulation::RISE, method::NLP) where T <: Real
153181
num_conf, num_spins, num_samples = data_info(samples)
154182

155183
lambda = formulation.regularizer*sqrt(log((num_spins^2)/0.05)/num_samples)
156184

157-
reconstruction = Array{Float64}(num_spins, num_spins)
185+
reconstruction = Array{Float64}(undef, num_spins, num_spins)
158186

159187
for current_spin = 1:num_spins
160188
nodal_stat = [ samples[k, 1 + current_spin] * (i == current_spin ? 1 : samples[k, 1 + i]) for k=1:num_conf , i=1:num_spins]
@@ -205,12 +233,12 @@ function grad_risea_obj(g, var, stat, weight)
205233
end
206234
end
207235

208-
function learn{T <: Real}(samples::Array{T,2}, formulation::RISEA, method::NLP)
236+
function learn(samples::Array{T,2}, formulation::RISEA, method::NLP) where T <: Real
209237
num_conf, num_spins, num_samples = data_info(samples)
210238

211239
lambda = formulation.regularizer*sqrt(log((num_spins^2)/0.05)/num_samples)
212240

213-
reconstruction = Array{Float64}(num_spins, num_spins)
241+
reconstruction = Array{Float64}(undef, num_spins, num_spins)
214242

215243
for current_spin = 1:num_spins
216244
nodal_stat = [ samples[k, 1 + current_spin] * (i == current_spin ? 1 : samples[k, 1 + i]) for k=1:num_conf , i=1:num_spins]
@@ -259,12 +287,12 @@ function learn{T <: Real}(samples::Array{T,2}, formulation::RISEA, method::NLP)
259287
end
260288

261289

262-
function learn{T <: Real}(samples::Array{T,2}, formulation::logRISE, method::NLP)
290+
function learn(samples::Array{T,2}, formulation::logRISE, method::NLP) where T <: Real
263291
num_conf, num_spins, num_samples = data_info(samples)
264292

265293
lambda = formulation.regularizer*sqrt(log((num_spins^2)/0.05)/num_samples)
266294

267-
reconstruction = Array{Float64}(num_spins, num_spins)
295+
reconstruction = Array{Float64}(undef, num_spins, num_spins)
268296

269297
for current_spin = 1:num_spins
270298
nodal_stat = [ samples[k, 1 + current_spin] * (i == current_spin ? 1 : samples[k, 1 + i]) for k=1:num_conf , i=1:num_spins]
@@ -297,12 +325,12 @@ function learn{T <: Real}(samples::Array{T,2}, formulation::logRISE, method::NLP
297325
end
298326

299327

300-
function learn{T <: Real}(samples::Array{T,2}, formulation::RPLE, method::NLP)
328+
function learn(samples::Array{T,2}, formulation::RPLE, method::NLP) where T <: Real
301329
num_conf, num_spins, num_samples = data_info(samples)
302330

303331
lambda = formulation.regularizer*sqrt(log((num_spins^2)/0.05)/num_samples)
304332

305-
reconstruction = Array{Float64}(num_spins, num_spins)
333+
reconstruction = Array{Float64}(undef, num_spins, num_spins)
306334

307335
for current_spin = 1:num_spins
308336
nodal_stat = [ samples[k, 1 + current_spin] * (i == current_spin ? 1 : samples[k, 1 + i]) for k=1:num_conf , i=1:num_spins]

src/models.jl

Lines changed: 36 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,26 +4,28 @@ export FactorGraph, jsondata
44

55
alphabets = [:spin, :boolean, :integer, :integer_pos, :real, :real_pos]
66

7-
type FactorGraph{T <: Real}
7+
8+
mutable struct FactorGraph{T <: Real}
89
order::Int
910
varible_count::Int
1011
alphabet::Symbol
1112
terms::Dict{Tuple,T} # TODO, would be nice to have a stronger tuple type here
12-
variable_names::Nullable{Vector{String}}
13-
FactorGraph(a,b,c,d,e) = check_model_data(a,b,c,d,e) ? new(a,b,c,d,e) : error("generic init problem")
13+
variable_names::Union{Vector{String}, Nothing}
14+
#FactorGraph(a,b,c,d,e) = check_model_data(a,b,c,d,e) ? new(a,b,c,d,e) : error("generic init problem")
1415
end
15-
FactorGraph{T <: Real}(order::Int, varible_count::Int, alphabet::Symbol, terms::Dict{Tuple,T}) = FactorGraph{T}(order, varible_count, alphabet, terms, Nullable{Vector{String}}())
16-
FactorGraph{T <: Real}(matrix::Array{T,2}) = convert(FactorGraph{T}, matrix)
17-
FactorGraph{T <: Real}(dict::Dict{Tuple,T}) = convert(FactorGraph{T}, dict)
16+
17+
FactorGraph(order::Int, varible_count::Int, alphabet::Symbol, terms::Dict{Tuple,T}) where T <: Real = FactorGraph{T}(order, varible_count, alphabet, terms, nothing)
18+
FactorGraph(matrix::Array{T,2}) where T <: Real = convert(FactorGraph{T}, matrix)
19+
FactorGraph(dict::Dict{Tuple,T}) where T <: Real = convert(FactorGraph{T}, dict)
1820
FactorGraph(list::Array{Any,1}) = convert(FactorGraph, list)
1921

2022

21-
function check_model_data{T <: Real}(order::Int, varible_count::Int, alphabet::Symbol, terms::Dict{Tuple,T}, variable_names::Nullable{Vector{String}})
23+
function check_model_data(order::Int, varible_count::Int, alphabet::Symbol, terms::Dict{Tuple,T}, variable_names::Union{Vector{String}, Nothing}) where T <: Real
2224
if !in(alphabet, alphabets)
2325
error("alphabet $(alphabet) is not supported")
2426
return false
2527
end
26-
if !isnull(variable_names) && length(variable_names) != varible_count
28+
if variable_names != nothing && length(variable_names) != varible_count
2729
error("expected $(varible_count) but only given $(length(variable_names))")
2830
return false
2931
end
@@ -54,7 +56,7 @@ end
5456
function Base.show(io::IO, gm::FactorGraph)
5557
println(io, "alphabet: ", gm.alphabet)
5658
println(io, "vars: ", gm.varible_count)
57-
if !isnull(gm.variable_names)
59+
if gm.variable_names != nothing
5860
println(io, "variable names: ")
5961
println(io, " ", get(gm.variable_names))
6062
end
@@ -65,19 +67,26 @@ function Base.show(io::IO, gm::FactorGraph)
6567
end
6668
end
6769

68-
function jsondata{T <: Real}(gm::FactorGraph{T})
70+
function jsondata(gm::FactorGraph{T}) where T <: Real
6971
data = []
7072
for k in sort(collect(keys(gm.terms)), by=(x)->(length(x),x))
7173
push!(data, Dict("term" => k, "weight" => gm.terms[k]))
7274
end
7375
return data
7476
end
7577

76-
Base.start(gm::FactorGraph) = start(gm.terms)
77-
Base.next(gm::FactorGraph, state) = next(gm.terms, state)
78-
Base.done(gm::FactorGraph, state) = done(gm.terms, state)
78+
79+
if VERSION < v"0.7.0-"
80+
Base.start(gm::FactorGraph) = start(gm.terms)
81+
Base.next(gm::FactorGraph, state) = next(gm.terms, state)
82+
Base.done(gm::FactorGraph, state) = done(gm.terms, state)
83+
else
84+
Base.iterate(gm::FactorGraph, kwargs...) = Base.iterate(gm.terms, kwargs...)
85+
end
86+
7987

8088
Base.length(gm::FactorGraph) = length(gm.terms)
89+
#Base.size(gm::FactorGraph, a...) = size(gm.terms, a...)
8190

8291
Base.getindex(gm::FactorGraph, i) = gm.terms[i]
8392
Base.keys(gm::FactorGraph) = keys(gm.terms)
@@ -95,15 +104,15 @@ end
95104

96105
diag_key(gm::FactorGraph, i::Int) = tuple(fill(i, gm.order)...)
97106

98-
#Base.diag{T <: Real}(gm::FactorGraph{T}) = [ get(gm.terms, diag_key(gm, i), zero(T)) for i in 1:gm.varible_count ]
107+
#Base.diag(gm::FactorGraph{T}) where T <: Real = [ get(gm.terms, diag_key(gm, i), zero(T)) for i in 1:gm.varible_count ]
99108

100-
Base.DataFmt.writecsv{T <: Real}(io, gm::FactorGraph{T}, args...; kwargs...) = writecsv(io, convert(Array{T,2}, gm), args...; kwargs...)
109+
#Base.DataFmt.writecsv(io, gm::FactorGraph{T}, args...; kwargs...) where T <: Real = writecsv(io, convert(Array{T,2}, gm), args...; kwargs...)
101110

102-
Base.convert{T <: Real}(::Type{FactorGraph}, m::Array{T,2}) = convert(FactorGraph{T}, m)
103-
function Base.convert{T <: Real}(::Type{FactorGraph{T}}, m::Array{T,2})
111+
Base.convert(::Type{FactorGraph}, m::Array{T,2}) where T <: Real = convert(FactorGraph{T}, m)
112+
function Base.convert(::Type{FactorGraph{T}}, m::Array{T,2}) where T <: Real
104113
@assert size(m,1) == size(m,2) #check matrix is square
105114

106-
info("assuming spin alphabet")
115+
@info "assuming spin alphabet"
107116
alphabet = :spin
108117
varible_count = size(m,1)
109118

@@ -132,7 +141,7 @@ function Base.convert{T <: Real}(::Type{FactorGraph{T}}, m::Array{T,2})
132141
return FactorGraph(2, varible_count, alphabet, terms)
133142
end
134143

135-
function Base.convert{T <: Real}(::Type{Array{T,2}}, gm::FactorGraph{T})
144+
function Base.convert(::Type{Array{T,2}}, gm::FactorGraph{T}) where T <: Real
136145
if gm.order != 2
137146
error("cannot convert a FactorGraph of order $(gm.order) to a matrix")
138147
end
@@ -152,8 +161,8 @@ function Base.convert{T <: Real}(::Type{Array{T,2}}, gm::FactorGraph{T})
152161
end
153162

154163

155-
Base.convert{T <: Real}(::Type{Dict}, m::Array{T,2}) = convert(Dict{Tuple,T}, m)
156-
function Base.convert{T <: Real}(::Type{Dict{Tuple,T}}, m::Array{T,2})
164+
Base.convert(::Type{Dict}, m::Array{T,2}) where T <: Real = convert(Dict{Tuple,T}, m)
165+
function Base.convert(::Type{Dict{Tuple,T}}, m::Array{T,2}) where T <: Real
157166
@assert size(m,1) == size(m,2) #check matrix is square
158167

159168
varible_count = size(m,1)
@@ -181,7 +190,7 @@ end
181190

182191

183192
function Base.convert(::Type{FactorGraph}, list::Array{Any,1})
184-
info("assuming spin alphabet")
193+
@info "assuming spin alphabet"
185194
alphabet = :spin
186195

187196
max_variable = 0
@@ -198,15 +207,15 @@ function Base.convert(::Type{FactorGraph}, list::Array{Any,1})
198207
max_variable = max(max_variable, maximum(term))
199208
end
200209

201-
info("dectected $(max_variable) variables with order $(max_order)")
210+
@info "dectected $(max_variable) variables with order $(max_order)"
202211

203212
return FactorGraph(max_order, max_variable, alphabet, terms)
204213
end
205214

206215

207-
Base.convert{T <: Real}(::Type{FactorGraph}, dict::Dict{Tuple,T}) = convert(FactorGraph{T}, dict)
208-
function Base.convert{T <: Real}(::Type{FactorGraph{T}}, dict::Dict{Tuple,T})
209-
info("assuming spin alphabet")
216+
Base.convert(::Type{FactorGraph}, dict::Dict{Tuple,T}) where T <: Real = convert(FactorGraph{T}, dict)
217+
function Base.convert(::Type{FactorGraph{T}}, dict::Dict{Tuple,T}) where T <: Real
218+
@info "assuming spin alphabet"
210219
alphabet = :spin
211220

212221
max_variable = 0
@@ -217,7 +226,7 @@ function Base.convert{T <: Real}(::Type{FactorGraph{T}}, dict::Dict{Tuple,T})
217226
max_variable = max(max_variable, maximum(term))
218227
end
219228

220-
info("dectected $(max_variable) variables with order $(max_order)")
229+
@info "dectected $(max_variable) variables with order $(max_order)"
221230

222231
return FactorGraph(max_order, max_variable, alphabet, dict)
223232
end

0 commit comments

Comments
 (0)