Skip to content

Commit

Permalink
Merge pull request #84 from ashutosh-b-b/optimalstopping
Browse files Browse the repository at this point in the history
Initialising the Optimal Stopping Time Problem
  • Loading branch information
ChrisRackauckas authored May 26, 2020
2 parents 72545ee + 7bf14c9 commit 55c1fbc
Show file tree
Hide file tree
Showing 8 changed files with 157 additions and 4 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ env:
- GROUP=NNODE
- GROUP=NNPDEHan
- GROUP=NNPDENS
- GROUP=NNSTOPPINGTIME
notifications:
email: false
# uncomment the following lines to override the default test script
Expand Down
3 changes: 2 additions & 1 deletion src/NeuralNetDiffEq.jl
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,9 @@ include("ode_solve.jl")
include("pde_solve.jl")
include("pde_solve_ns.jl")
include("kolmogorov_solve.jl")
include("stopping_solve.jl")


export NNODE, TerminalPDEProblem, NNPDEHan, NNPDENS, KolmogorovPDEProblem, NNKolmogorov
export NNODE, TerminalPDEProblem, NNPDEHan, NNPDENS, KolmogorovPDEProblem, NNKolmogorov, NNStopping

end # module
98 changes: 98 additions & 0 deletions src/stopping_solve.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
struct NNStopping{C,O,S,E} <: NeuralNetDiffEqAlgorithm
chain::C
opt::O
sdealg::S
ensemblealg::E
end
NNStopping(chain ; opt=Flux.ADAM(0.1) , sdealg = EM() , ensemblealg = EnsembleThreads()) = NNStopping(chain , opt , sdealg , ensemblealg)

function DiffEqBase.solve(
prob::SDEProblem,
alg::NeuralNetDiffEqAlgorithm;
abstol = 1f-6,
verbose = false,
maxiters = 300,
trajectories = 1000,
save_everystep = false,
dt,
kwargs...
)

tspan = prob.tspan
sigma = prob.g
μ = prob.f
g = prob.kwargs.data.g
u0 = prob.u0
ts = tspan[1]:dt:tspan[2]
N = size(ts)[1]
T = tspan[2]

m = alg.chain
opt = alg.opt
sdealg = alg.sdealg
ensemblealg = alg.ensemblealg

prob = SDEProblem(μ,sigma,u0,tspan)
ensembleprob = EnsembleProblem(prob)
sim = solve(ensembleprob, sdealg, ensemblealg, dt=dt,trajectories=trajectories,adaptive=false)
payoff = []
times = []
iter = 0
# for u in sim.u
un = []
function Un(n , X )
if size(un)[1] >= n
return un[n]
else
if(n == 1)
ans = first(m(X[1])[1])
un = [ans]
return ans
else
ans = max(first(m(X[n])[n]) , n + 1 - size(ts)[1])*(1 - sum(Un(i , X ) for i in 1:n-1))
un = vcat( un , ans)
return ans
end
end
end

function loss()
reward = 0.00
for u in sim.u
X = u.u
reward = reward + sum(Un(i , X )*g(ts[i] , X[i]) for i in 1 : size(ts)[1])
un = []
end
return 10000 - reward
end
dataset = Iterators.repeated(() , maxiters)

cb = function ()
l = loss()
un = []
println("Current loss is: $l")
end
Flux.train!(loss, Flux.params(m), dataset, opt; cb = cb)

Usum = 0
ti = 0
Xt = sim.u[1].u
for i in 1:N
un = []
Usum = Usum + Un(i , Xt)
if Usum >= 1 - Un(i , Xt)
ti = i
break
end
end
for u in sim.u
X = u.u
price = g(ts[ti] , X[ti])
payoff = vcat(payoff , price)
times = vcat(times, ti)
iter = iter + 1
# println("SUM : $sump")
# println("TIME : $ti")
end
sum(payoff)/size(payoff)[1]
end #solve
5 changes: 3 additions & 2 deletions test/NNODE_tests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
using Test, Flux, NeuralNetDiffEq, Optim
using DiffEqDevTools
using Test, Flux, Optim
println("NNODE Tests")
using DiffEqDevTools , NeuralNetDiffEq
using Random
Random.seed!(100)
# Run a solve on scalars
Expand Down
1 change: 1 addition & 0 deletions test/NNPDEHan_tests.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
using Flux, Zygote, LinearAlgebra, Statistics
println("NNPDEHAN_tests")
using Test, NeuralNetDiffEq

# one-dimensional heat equation
Expand Down
1 change: 1 addition & 0 deletions test/NNPDENS_tests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
using Flux, Zygote, StochasticDiffEq
using LinearAlgebra, Statistics
println("NNPDENS_tests")
using Test, NeuralNetDiffEq

println("one-dimensional heat equation")
Expand Down
47 changes: 47 additions & 0 deletions test/Stopping_tests.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
using Test, Flux , StochasticDiffEq , LinearAlgebra
println("Optimal Stopping Time Test")
using NeuralNetDiffEq
d = 1
r = 0.04f0
beta = 0.2f0
T = 1
u0 = fill(80.00 , d , 1)
sdealg = EM()
ensemblealg = EnsembleThreads()
f(du,u,p,t) = (du .= r*u)
sigma(du,u,p,t) = (du .= Diagonal(beta*u))
tspan = (0.0 , 1.0)
N = 50
dt = tspan[2]/49
K = 100.00
function g(t , x)
return exp(-r*t)*(max(K - maximum(x) , 0))
end

prob = SDEProblem(f , sigma , u0 , tspan ; g = g)
opt = Flux.ADAM(0.1)
m = Chain(Dense(d , 5, tanh), Dense(5, 16 , tanh) , Dense(16 , N ), softmax)
sol = solve(prob, NeuralNetDiffEq.NNStopping( m, opt , sdealg , ensemblealg), verbose = true, dt = dt,
abstol=1e-6, maxiters = 15 , trajectories = 150)

##Analytical Binomial Tree approach for American Options
function BinomialTreeAM1D(S0 , N , r , beta)
V = zeros(N+1)
dT = T/N
u = exp(beta*sqrt(dT))
d = 1/u
S_T = [S0*(u^j)* (d^(N-j)) for j in 0:N]
a = exp(r*dT)
p = (a - d)/(u - d)
q = 1.0 - p
V = [max(K - x , 0) for x in S_T]
for i in N-1:-1:0
V[1:end-1] = exp(-r*dT).*(p*V[2:end] + q*V[1:end-1])
S_T = S_T*u
V = [max(K - S_T[i] , V[i]) for i in 1:size(S_T)[1]]
end
return V[1]
end
real_sol = BinomialTreeAM1D(u0[1] , N , r , beta)
error = abs(sol - real_sol)
@test error < 0.5
5 changes: 4 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,8 @@ const is_TRAVIS = haskey(ENV,"TRAVIS")
if GROUP == "All" || GROUP == "NNKOLMOGOROV"
@time @safetestset "NNKolmogorov" begin include("NNKolmogorov_tests.jl") end
end

if GROUP == "All" || GROUP == "NNSTOPPINGTIME"
@time @safetestset "NNStopping" begin include("Stopping_tests.jl") end
end

end

0 comments on commit 55c1fbc

Please sign in to comment.