From 15251afb69f5a9771ec5885633f9d2a83c1260f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Tue, 6 Oct 2015 21:24:43 +0200 Subject: [PATCH 001/113] Fixed step integration working in iterator mode --- src/runge_kutta.jl | 153 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 149 insertions(+), 4 deletions(-) diff --git a/src/runge_kutta.jl b/src/runge_kutta.jl index a080602de..8000e2584 100644 --- a/src/runge_kutta.jl +++ b/src/runge_kutta.jl @@ -6,6 +6,9 @@ # Tableaus for explicit Runge-Kutta methods ########################################### +import Base: start, next, done + + immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} order::(@compat(Tuple{Vararg{Int}})) # the order of the methods a::Matrix{T} @@ -20,20 +23,30 @@ immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} @assert istril(a) @assert S==length(c)==size(a,1)==size(a,2)==size(b,2) @assert size(b,1)==length(order) - @assert norm(sum(a,2)-c'',Inf)<1e-10 # consistency. + @assert norm(sum(a,2)-c'',Inf)<1e-10 # consistency. new(order,a,b,c) end end + + function TableauRKExplicit{T}(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), a::Matrix{T}, b::Matrix{T}, c::Vector{T}) TableauRKExplicit{name,length(c),T}(order, a, b, c) end + + function TableauRKExplicit(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), T::Type, a::Matrix, b::Matrix, c::Vector) TableauRKExplicit{name,length(c),T}(order, convert(Matrix{T},a), convert(Matrix{T},b), convert(Vector{T},c) ) end + + conv_field{T,N}(D,a::Array{T,N}) = convert(Array{D,N}, a) + + +S(tab::TableauRKExplicit) = length(tab.c) + function Base.convert{Tnew<:Real,Name,S,T}(::Type{Tnew}, tab::TableauRKExplicit{Name,S,T}) # Converts the tableau coefficients to the new type Tnew newflds = () @@ -100,7 +113,7 @@ const bt_rk23 = TableauRKExplicit(:bogacki_shampine,(2,3), Rational{Int64}, 2/9 1/3 4/9 0], [7/24 1/4 1/3 1/8 2/9 1/3 4/9 0], - [0, 1//2, 3//4, 1] + [0, 1//2, 3//4, 1] ) # Fehlberg https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta%E2%80%93Fehlberg_method @@ -173,7 +186,7 @@ function oderk_fixed{N,S}(fn, y0::AbstractVector, tspan, # TODO: instead of AbstractVector use a Holy-trait # Needed interface: - # On components: + # On components: # On y0 container: length, deepcopy, similar, setindex! # On time container: getindex, convert. length @@ -233,7 +246,7 @@ function oderk_adapt{N,S}(fn, y0::AbstractVector, tspan, btab_::TableauRKExplici # - note that the type of the components might change! # On y0 container: length, similar, setindex! # On time container: getindex, convert, length - + # For y0 which support indexing. Currently y0<:AbstractVector but # that could be relaxed with a Holy-trait. !isadaptive(btab_) && error("Can only use this solver with an adaptive RK Butcher table") @@ -478,3 +491,135 @@ function hermite_interp(tquery,t,dt,y0,y1,f0,f1) hermite_interp!(y,tquery,t,dt,y0,y1,f0,f1) return y end + +#################### +# Iterator methods # +#################### + +type TempArrays + y; ks +end + +type Step + t; y; dy; dt +end + +type State + tmp :: TempArrays + prev_steps :: Vector{Step} + last_tout + first_step +end + +immutable Problem + F + btab + y0 + t0 + dt0 + tspan + points +end + +function newProblem(fn, y0, t0, dt0; tspan = [t0,Inf], method = bt_feuler, points = :all) + return Problem(fn, method, y0, t0, dt0, tspan, points) +end + +function start(problem :: Problem) + S = length(problem.btab.c) + t0 = problem.t0 + y0 = problem.y0 + dy0 = problem.F(t0,y0) + dt0 = problem.dt0 + tmp = TempArrays(problem.y0, Array(typeof(y0), S)) + step0 = Step(t0,y0,dy0,dt0) + # initialize with two identical steps + prev_steps = [step0, deepcopy(step0)] + return State(tmp, prev_steps, t0, true) +end + +function calc_next_k_2!(tmp :: TempArrays, i, step :: Step, prob :: Problem) + # Calculates the next ks and puts it into ks[s] + # - ks and ytmp are modified inside this function. + + # Needed interface: + # On components: +, * + # On y0 container: setindex!, getindex, fn + + dof = length(step.y) + t, dt, a, c = step.t, step.dt, prob.btab.a, prob.btab.c + + tmp.y[:] = step.y + for j=1:i-1 + # tmp.y += dt * btab.a[i,j] * ks[j] + for d=1:dof + tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] + end + end + tmp.ks[i] = prob.F(t + c[i]*dt, tmp.y) + + nothing +end + +function next(prob :: Problem, state :: State) + + s0, s1 = state.prev_steps + t0, t1 = s0.t, s1.t + + if state.first_step + state.first_step = false + return ((s0.t,s0.y),state) + end + + # the next output time that we aim at + t_goal = prob.tspan[findfirst(t->(t>state.last_tout), prob.tspan)] + + # the t0 == t1 part ensures that we make at least one step + while t1 < t_goal + + # s1 is the starting point for the new step, while the new + # step is saved in s0 + + s0.y[:] = s1.y + s0.t = s1.t + + # perform a step and save it to s0 + dof = length(s1.y) + for s=1:S(prob.btab) + calc_next_k_2!(state.tmp, s, s1, prob) + for d=1:dof + s0.y[d] += s1.dt * prob.btab.b[s]*state.tmp.ks[s][d] + end + end + s0.t += s1.dt + s0.dy = prob.F(s0.t,s0.y) + + # swap s0 and s1 + state.prev_steps[:] = [s1,s0] + + # reassign the steps + s0, s1 = state.prev_steps + t0, t1 = s0.t, s1.t + + # we made a successfull step and points == :all + if prob.points == :all + t_goal = min(t_goal,t1) + break + end + end + + # at this point we have t_goal∈[t0,t1] so we can apply the + # interpolation + + hermite_interp!(state.tmp.y,t_goal,t0,t1-t0,s0.y,s1.y,s0.dy,s1.dy) + + # update the last output time + state.last_tout = t_goal + + return ((s1.t,s1.y),state) + +end + +function done(prob :: Problem, state :: State) + state.prev_steps[2].t >= prob.tspan[end] +end From ec2c14f06469f5a988b387c771fda8b00070d794 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 8 Oct 2015 17:57:58 +0200 Subject: [PATCH 002/113] New dense output and file structure --- src/algorithms.jl | 336 ++++++++++++++++++++++++++++++++++++++++++++++ src/dense.jl | 95 +++++++++++++ src/iterators.jl | 69 ++++++++++ src/tableaus.jl | 159 ++++++++++++++++++++++ 4 files changed, 659 insertions(+) create mode 100644 src/algorithms.jl create mode 100644 src/dense.jl create mode 100644 src/iterators.jl create mode 100644 src/tableaus.jl diff --git a/src/algorithms.jl b/src/algorithms.jl new file mode 100644 index 000000000..0589fea14 --- /dev/null +++ b/src/algorithms.jl @@ -0,0 +1,336 @@ +################################ +# Fixed step Runge-Kutta methods +################################ + +type Step + t; y; dy; dt +end + +# TODO: iterator method +ode1(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_feuler) +ode2_midpoint(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_midpoint) +ode2_heun(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_heun) +ode4(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_rk4) + +function oderk_fixed(fn, y0, tspan, btab::TableauRKExplicit) + # Non-arrays y0 treat as scalar + fn_(t, y) = [fn(t, y[1])] + t,y = oderk_fixed(fn_, [y0], tspan, btab) + return t, vcat_nosplat(y) +end +function oderk_fixed{N,S}(fn, y0::AbstractVector, tspan, + btab_::TableauRKExplicit{N,S}) + # TODO: instead of AbstractVector use a Holy-trait + + # Needed interface: + # On components: + # On y0 container: length, deepcopy, similar, setindex! + # On time container: getindex, convert. length + + Et, Eyf, Ty, btab = make_consistent_types(fn, y0, tspan, btab_) + dof = length(y0) + + ys = Array(Ty, length(tspan)) + allocate!(ys, y0, dof) + ys[1] = deepcopy(y0) + + tspan = convert(Vector{Et}, tspan) + # work arrays: + ks = Array(Ty, S) + # allocate!(ks, y0, dof) # no need to allocate as fn is not in-place + ytmp = similar(y0, Eyf, dof) + for i=1:length(tspan)-1 + dt = tspan[i+1]-tspan[i] + ys[i+1][:] = ys[i] + for s=1:S + calc_next_k!(ks, ytmp, ys[i], s, fn, tspan[i], dt, dof, btab) + for d=1:dof + ys[i+1][d] += dt * btab.b[s]*ks[s][d] + end + end + end + return tspan, ys +end + +############################## +# Adaptive Runge-Kutta methods +############################## + +ode21(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk21; kwargs...) +ode23(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk23; kwargs...) +ode45_fe(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk45; kwargs...) +ode45_dp(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_dopri5; kwargs...) +# Use Dormand-Prince version of ode45 by default +const ode45 = ode45_dp +ode78(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_feh78; kwargs...) + +function oderk_adapt(fn, y0, tspan, btab::TableauRKExplicit; kwords...) + # For y0 which don't support indexing. + fn_ = (t, y) -> [fn(t, y[1])] + t,y = oderk_adapt(fn_, [y0], tspan, btab; kwords...) + return t, vcat_nosplat(y) +end +function oderk_adapt{N,S}(fn, y0::AbstractVector, tspan, btab_::TableauRKExplicit{N,S}; + reltol = 1.0e-5, abstol = 1.0e-8, + norm=Base.norm, + minstep=abs(tspan[end] - tspan[1])/1e18, + maxstep=abs(tspan[end] - tspan[1])/2.5, + initstep=0., + points=:all + ) + # Needed interface: + # On components: + # - note that the type of the components might change! + # On y0 container: length, similar, setindex! + # On time container: getindex, convert, length + + # For y0 which support indexing. Currently y0<:AbstractVector but + # that could be relaxed with a Holy-trait. + !isadaptive(btab_) && error("Can only use this solver with an adaptive RK Butcher table") + + Et, Eyf, Ty, btab = make_consistent_types(fn, y0, tspan, btab_) + # parameters + order = minimum(btab.order) + timeout_const = 5 # after step reduction do not increase step for + # timeout_const steps + + ## Initialization + dof = length(y0) + tspan = convert(Vector{Et}, tspan) + t = tspan[1] + tstart = tspan[1] + tend = tspan[end] + + # work arrays: + y = similar(y0, Eyf, dof) # y at time t + y[:] = y0 + ytrial = similar(y0, Eyf, dof) # trial solution at time t+dt + yerr = similar(y0, Eyf, dof) # error of trial solution + ks = Array(Ty, S) + # allocate!(ks, y0, dof) # no need to allocate as fn is not in-place + ytmp = similar(y0, Eyf, dof) + + # output ys + nsteps_fixed = length(tspan) # these are always output + ys = Array(Ty, nsteps_fixed) + allocate!(ys, y0, dof) + ys[1] = y0 + + # Option points determines where solution is returned: + if points==:all + tspan_fixed = tspan + tspan = Et[tstart] + iter_fixed = 2 # index into tspan_fixed + sizehint!(tspan, nsteps_fixed) + elseif points!=:specified + error("Unrecognized option points==$points") + end + # Time + dt, tdir, ks[1] = hinit(fn, y, tstart, tend, order, reltol, abstol) # sets ks[1]=f0 + if initstep!=0 + dt = sign(initstep)==tdir ? initstep : error("initstep has wrong sign.") + end + # Diagnostics + dts = Et[] + errs = Float64[] + steps = [0,0] # [accepted, rejected] + + ## Integration loop + islaststep = abs(t+dt-tend)<=eps(tend) ? true : false + timeout = 0 # for step-control + iter = 2 # the index into tspan and ys + while true + # do one step (assumes ks[1]==f0) + rk_embedded_step!(ytrial, yerr, ks, ytmp, y, fn, t, dt, dof, btab) + # Check error and find a new step size: + err, newdt, timeout = stepsize_hw92!(dt, tdir, y, ytrial, yerr, order, timeout, + dof, abstol, reltol, maxstep, norm) + + if err<=1.0 # accept step + # diagnostics + steps[1] +=1 + push!(dts, dt) + push!(errs, err) + + # Output: + f0 = ks[1] + f1 = isFSAL(btab) ? ks[S] : fn(t+dt, ytrial) + if points==:specified + # interpolate onto given output points + while iter-1= tdir*tend + dt = tend-t + islaststep = true # next step is the last, if it succeeds + end + elseif abs(newdt)0 no step size increase is allowed, timeout is + # decremented in here. + # + # Returns the error, newdt and the number of timeout-steps + # + # TODO: + # - allow component-wise reltol and abstol? + # - allow other norms + + # Needed interface: + # On components: isoutofdomain + # On y0 container: norm, get/setindex + + timout_after_nan = 5 + fac = [0.8, 0.9, 0.25^(1/(order+1)), 0.38^(1/(order+1))][1] + facmax = 5.0 # maximal step size increase. 1.5-5 + facmin = 1./facmax # maximal step size decrease. ? + + # in-place calculate xerr./tol + for d=1:dof + # if outside of domain (usually NaN) then make step size smaller by maximum + isoutofdomain(xtrial[d]) && return 10., dt*facmin, timout_after_nan + xerr[d] = xerr[d]/(abstol + max(norm(x0[d]), norm(xtrial[d]))*reltol) # Eq 4.10 + end + err = norm(xerr, 2) # Eq. 4.11 + newdt = min(maxstep, tdir*dt*max(facmin, fac*(1/err)^(1/(order+1)))) # Eq 4.13 modified + if timeout>0 + newdt = min(newdt, dt) + timeout -= 1 + end + return err, tdir*newdt, timeout +end + +function calc_next_k!{Ty}(ks::Vector, ytmp::Ty, y, s, fn, t, dt, dof, btab) + # Calculates the next ks and puts it into ks[s] + # - ks and ytmp are modified inside this function. + + # Needed interface: + # On components: +, * + # On y0 container: setindex!, getindex, fn + + ytmp[:] = y + for ss=1:s-1, d=1:dof + ytmp[d] += dt * ks[ss][d] * btab.a[s,ss] + end + ks[s] = fn(t + btab.c[s]*dt, ytmp)::Ty + nothing +end + +# Helper functions: +function allocate!{T}(vec::Vector{T}, y0, dof) + # Allocates all vectors inside a Vector{Vector} using the same + # kind of container as y0 has and element type eltype(eltype(vec)). + for s=1:length(vec) + vec[s] = similar(y0, eltype(T), dof) + end +end +function index_or_push!(vec, i, val) + # Fills in the vector until there is no space, then uses push! + # instead. + if length(vec)>=i + vec[i] = val + else + push!(vec, val) + end +end +vcat_nosplat(y) = eltype(y[1])[el[1] for el in y] # Does vcat(y...) without the splatting + +# function hermite_interp!(y, tquery,t,dt,y0,y1,f0,f1) +function hermite_interp!(y,t,step0::Step,step1::Step) + # For dense output see Hairer & Wanner p.190 using Hermite + # interpolation. Updates y in-place. + # + # f_0 = f(x_0 , y_0) , f_1 = f(x_0 + h, y_1 ) + # this is O(3). TODO for higher order. + + y0, y1 = step0.y, step1.y + dy0, dy1 = step0.dy, step1.dy + + dt = step1.t-step0.t + theta = (t-step0.t)/dt + for i=1:length(y0) + y[i] = ((1-theta)*y0[i] + theta*y1[i] + theta*(theta-1) * + ((1-2*theta)*(y1[i]-y0[i]) + (theta-1)*dt*dy0[i] + theta*dt*dy1[i]) ) + end + nothing +end + +function hermite_interp(tquery,t,dt,y0,y1,f0,f1) + # Returns the y instead of in-place + y = similar(y0) + hermite_interp!(y,tquery,t,dt,y0,y1,f0,f1) + return y +end diff --git a/src/dense.jl b/src/dense.jl new file mode 100644 index 000000000..3c700be55 --- /dev/null +++ b/src/dense.jl @@ -0,0 +1,95 @@ +function Step(problem :: RKProblem) + t0 = problem.t0 + y0 = problem.y0 + dy0 = problem.F(t0,y0) + dt0 = problem.dt0 + return Step(t0,y0,dy0,dt0) +end + +type DenseState + s0; s1 + last_tout + first_step + rkstate + # used for storing the interpolation result + ytmp +end + +immutable DenseProblem + rkprob :: RKProblem + points :: Symbol + tspan +end + + +function newDenseProblem(args...; tspan = [Inf], points = :all, opt_args...) + rkprob = newRKProblem(args...; opt_args..., tstop = tspan[end]) + return DenseProblem(rkprob, points, tspan) +end + +function start(prob :: DenseProblem) + step0 = Step(prob.rkprob) + step1 = Step(prob.rkprob) + rkstate = start(prob.rkprob) + ytmp = deepcopy(prob.rkprob.y0) + return DenseState(step0, step1, prob.rkprob.t0, true, rkstate, ytmp) +end + +function next(prob :: DenseProblem, state :: DenseState) + + s0, s1 = state.s0, state.s1 + t0, t1 = s0.t, s1.t + + if state.first_step + state.first_step = false + return ((s0.t,s0.y),state) + end + + # the next output time that we aim at + t_goal = prob.tspan[findfirst(t->(t>state.last_tout), prob.tspan)] + + # the t0 == t1 part ensures that we make at least one step + while t1 < t_goal + + # s1 is the starting point for the new step, while the new + # step is saved in s0 + + if done(prob.rkprob, state.rkstate) + error("The iterator was exhausted before the dense output compltede.") + else + # at this point s0 holds the new step, "s2" if you will + ((s0.t,s0.y[:]),state.rkstate) = next(prob.rkprob, state.rkstate) + end + + # swap s0 and s1 + s0, s1 = s1, s0 + # update the state + state.s0, state.s1 = s0, s1 + # and times + t0, t1 = s0.t, s1.t + + # we made a successfull step and points == :all + if prob.points == :all + t_goal = min(t_goal,t1) + break + end + end + + # at this point we have t_goal∈[t0,t1] so we can apply the + # interpolation + + F = prob.rkprob.F + s0.dy[:], s1.dy[:] = F(t0,s0.y), F(t1,s1.y) + + hermite_interp!(state.ytmp,t_goal,s0,s1) + + # update the last output time + state.last_tout = t_goal + + return ((t_goal,state.ytmp),state) + +end + +function done(prob :: DenseProblem, state :: DenseState) + return done(prob.rkprob, state.rkstate) || state.s1.t >= prob.tspan[end] +end diff --git a/src/iterators.jl b/src/iterators.jl new file mode 100644 index 000000000..88d6b3b7c --- /dev/null +++ b/src/iterators.jl @@ -0,0 +1,69 @@ +#################### +# Iterator methods # +#################### + +type TempArrays + y; ks +end + +type RKState + t; dt; y; tmp :: TempArrays +end + +immutable RKProblem + F + btab + y0 + t0 + dt0 + tstop +end + +function newRKProblem(fn, y0, t0, dt0; tstop = Inf, method = bt_feuler) + return RKProblem(fn, method, y0, t0, dt0, tstop) +end + +function start(problem :: RKProblem) + t0 = problem.t0 + dt0 = problem.dt0 + y0 = problem.y0 + tmp = TempArrays(problem.y0, Array(typeof(y0), S(problem.btab))) + return RKState(t0, dt0, y0, tmp) +end + +function next(prob :: RKProblem, state :: RKState) + + dof = length(state.y) + for s=1:S(prob.btab) + calc_next_k!(state.tmp, s, state, prob) + for d=1:dof + state.y[d] += state.dt * prob.btab.b[s]*state.tmp.ks[s][d] + end + end + + state.t += state.dt + + return ((state.t,state.y), state) + +end + +function done(prob :: RKProblem, state :: RKState) + return state.t >= prob.tstop +end + + +function calc_next_k!(tmp :: TempArrays, i, state :: RKState, prob :: RKProblem) + dof = length(state.y) + t, dt, a, c = state.t, state.dt, prob.btab.a, prob.btab.c + + tmp.y[:] = state.y + for j=1:i-1 + # tmp.y += dt * btab.a[i,j] * ks[j] + for d=1:dof + tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] + end + end + tmp.ks[i] = prob.F(t + c[i]*dt, tmp.y) + + nothing +end diff --git a/src/tableaus.jl b/src/tableaus.jl new file mode 100644 index 000000000..da90d3eb6 --- /dev/null +++ b/src/tableaus.jl @@ -0,0 +1,159 @@ +########################################### +# Tableaus for explicit Runge-Kutta methods +########################################### + + +immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} + order::(@compat(Tuple{Vararg{Int}})) # the order of the methods + a::Matrix{T} + # one or several row vectors. First row is used for the step, + # second for error calc. + b::Matrix{T} + c::Vector{T} + function TableauRKExplicit(order,a,b,c) + @assert isa(S,Integer) + @assert isa(Name,Symbol) + @assert c[1]==0 + @assert istril(a) + @assert S==length(c)==size(a,1)==size(a,2)==size(b,2) + @assert size(b,1)==length(order) + @assert norm(sum(a,2)-c'',Inf)<1e-10 # consistency. + new(order,a,b,c) + end +end + + +function TableauRKExplicit{T}(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), + a::Matrix{T}, b::Matrix{T}, c::Vector{T}) + TableauRKExplicit{name,length(c),T}(order, a, b, c) +end + + +function TableauRKExplicit(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), T::Type, + a::Matrix, b::Matrix, c::Vector) + TableauRKExplicit{name,length(c),T}(order, convert(Matrix{T},a), + convert(Matrix{T},b), convert(Vector{T},c) ) +end + + +conv_field{T,N}(D,a::Array{T,N}) = convert(Array{D,N}, a) + + +S(tab::TableauRKExplicit) = length(tab.c) + +function Base.convert{Tnew<:Real,Name,S,T}(::Type{Tnew}, tab::TableauRKExplicit{Name,S,T}) + # Converts the tableau coefficients to the new type Tnew + newflds = () + @compat for n in fieldnames(tab) + fld = getfield(tab,n) + if eltype(fld)==T + newflds = tuple(newflds..., conv_field(Tnew, fld)) + else + newflds = tuple(newflds..., fld) + end + end + TableauRKExplicit{Name,S,Tnew}(newflds...) # TODO: could this be done more generically in a type-stable way? +end + + +isexplicit(b::TableauRKExplicit) = istril(b.a) # Test whether it's an explicit method +isadaptive(b::TableauRKExplicit) = size(b.b, 1)==2 + + +# First same as last. Means ks[:,end]=ks_nextstep[:,1], c.f. H&W p.167 +isFSAL(btab::TableauRKExplicit) = btab.a[end,:]==btab.b[1,:] && btab.c[end]==1 # the latter is not needed really + +## Tableaus for explicit RK methods +# Fixed step: +const bt_feuler = TableauRKExplicit(:feuler,(1,), Rational{Int64}, + zeros(Int,1,1), + [1]', + [0] + ) +const bt_midpoint = TableauRKExplicit(:midpoint,(2,), Rational{Int64}, + [0 0 + 1//2 0], + [0, 1]', + [0, 1//2] + ) +const bt_heun = TableauRKExplicit(:heun,(2,), Rational{Int64}, + [0 0 + 1 0], + [1//2, 1//2]', + [0, 1]) + +const bt_rk4 = TableauRKExplicit(:rk4,(4,),Rational{Int64}, + [0 0 0 0 + 1//2 0 0 0 + 0 1//2 0 0 + 0 0 1 0], + [1//6, 1//3, 1//3, 1//6]', + [0, 1//2, 1//2, 1]) + +# Adaptive step: +# Heun Euler https://en.wikipedia.org/wiki/Runge–Kutta_methods +const bt_rk21 = TableauRKExplicit(:heun_euler,(2,1), Rational{Int64}, + [0 0 + 1 0], + [1//2 1//2 + 1 0], + [0, 1]) + +# Bogacki–Shampine coefficients +const bt_rk23 = TableauRKExplicit(:bogacki_shampine,(2,3), Rational{Int64}, + [0 0 0 0 + 1/2 0 0 0 + 0 3/4 0 0 + 2/9 1/3 4/9 0], + [7/24 1/4 1/3 1/8 + 2/9 1/3 4/9 0], + [0, 1//2, 3//4, 1] + ) + +# Fehlberg https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta%E2%80%93Fehlberg_method +const bt_rk45 = TableauRKExplicit(:fehlberg,(4,5),Rational{Int64}, + [ 0 0 0 0 0 0 + 1//4 0 0 0 0 0 + 3//32 9//32 0 0 0 0 + 1932//2197 -7200//2197 7296//2197 0 0 0 + 439//216 -8 3680//513 -845//4104 0 0 + -8//27 2 -3544//2565 1859//4104 -11//40 0 ], + [25//216 0 1408//2565 2197//4104 -1//5 0 + 16//135 0 6656//12825 28561//56430 -9//50 2//55], + [0, 1//4, 3//8, 12//13, 1, 1//2]) + +# Dormand-Prince https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method +const bt_dopri5 = TableauRKExplicit(:dopri, (5,4), Rational{Int64}, + [0 0 0 0 0 0 0 + 1//5 0 0 0 0 0 0 + 3//40 9//40 0 0 0 0 0 + 44//45 -56//15 32//9 0 0 0 0 + 19372//6561 -25360//2187 64448//6561 -212//729 0 0 0 + 9017//3168 -355//33 46732//5247 49//176 -5103//18656 0 0 + 35//384 0 500//1113 125//192 -2187//6784 11//84 0], + [35//384 0 500//1113 125//192 -2187//6784 11//84 0 + 5179//57600 0 7571//16695 393//640 -92097//339200 187//2100 1//40], + [0, 1//5, 3//10, 4//5, 8//9, 1, 1] + ) + +# Fehlberg 7(8) coefficients +# Values from pag. 65, Fehlberg, Erwin. "Classical fifth-, sixth-, seventh-, and eighth-order Runge-Kutta formulas with stepsize control". +# National Aeronautics and Space Administration. +const bt_feh78 = TableauRKExplicit(:feh78, (7,8), Rational{Int64}, + [ 0 0 0 0 0 0 0 0 0 0 0 0 0 + 2//27 0 0 0 0 0 0 0 0 0 0 0 0 + 1//36 1//12 0 0 0 0 0 0 0 0 0 0 0 + 1//24 0 1//8 0 0 0 0 0 0 0 0 0 0 + 5//12 0 -25//16 25//16 0 0 0 0 0 0 0 0 0 + 1//20 0 0 1//4 1//5 0 0 0 0 0 0 0 0 + -25//108 0 0 125//108 -65//27 125//54 0 0 0 0 0 0 0 + 31//300 0 0 0 61//225 -2//9 13//900 0 0 0 0 0 0 + 2 0 0 -53//6 704//45 -107//9 67//90 3 0 0 0 0 0 + -91//108 0 0 23//108 -976//135 311//54 -19//60 17//6 -1//12 0 0 0 0 + 2383//4100 0 0 -341//164 4496//1025 -301//82 2133//4100 45//82 45//164 18//41 0 0 0 + 3//205 0 0 0 0 -6//41 -3//205 -3//41 3//41 6//41 0 0 0 + -1777//4100 0 0 -341//164 4496//1025 -289//82 2193//4100 51//82 33//164 12//41 0 1 0], + [41//840 0 0 0 0 34//105 9//35 9//35 9//280 9//280 41//840 0 0 + 0 0 0 0 0 34//105 9//35 9//35 9//280 9//280 0 41//840 41//840], + [0, 2//27, 1//9, 1//6 , 5//12, 1//2 , 5//6 , 1//6 , 2//3 , 1//3 , 1 , 0, 1] + ) From cb676f133a7ab2e64f57a57f5ea495a896745e02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 18 Nov 2015 16:18:21 +0100 Subject: [PATCH 003/113] WIP: merging fixed and adaptive methods --- src/algorithms.jl | 30 --- src/dense.jl | 56 +++- src/iterators.jl | 290 +++++++++++++++++++-- src/runge_kutta.jl | 625 +-------------------------------------------- src/tableaus.jl | 4 +- 5 files changed, 323 insertions(+), 682 deletions(-) diff --git a/src/algorithms.jl b/src/algorithms.jl index 0589fea14..54e21e547 100644 --- a/src/algorithms.jl +++ b/src/algorithms.jl @@ -205,36 +205,6 @@ function oderk_adapt{N,S}(fn, y0::AbstractVector, tspan, btab_::TableauRKExplici return tspan, ys end -function rk_embedded_step!{N,S}(ytrial, yerr, ks, ytmp, y, fn, t, dt, dof, btab::TableauRKExplicit{N,S}) - # Does one embedded R-K step updating ytrial, yerr and ks. - # - # Assumes that ks[:,1] is already calculated! - # - # Modifies ytrial, yerr, ks, and ytmp - - # Needed interface: - # On components: arithmetic, zero - # On y0 container: fill!, setindex!, getindex - - fill!(ytrial, zero(eltype(ytrial)) ) - fill!(yerr, zero(eltype(ytrial)) ) - for d=1:dof - ytrial[d] += btab.b[1,1]*ks[1][d] - yerr[d] += btab.b[2,1]*ks[1][d] - end - for s=2:S - calc_next_k!(ks, ytmp, y, s, fn, t, dt, dof, btab) - for d=1:dof - ytrial[d] += btab.b[1,s]*ks[s][d] - yerr[d] += btab.b[2,s]*ks[s][d] - end - end - for d=1:dof - yerr[d] = dt * (ytrial[d]-yerr[d]) - ytrial[d] = y[d] + dt * ytrial[d] - end -end - function stepsize_hw92!(dt, tdir, x0, xtrial, xerr, order, timeout, dof, abstol, reltol, maxstep, norm) # Estimates the error and a new step size following Hairer & diff --git a/src/dense.jl b/src/dense.jl index 3c700be55..afdd98460 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -1,11 +1,12 @@ -function Step(problem :: RKProblem) - t0 = problem.t0 - y0 = problem.y0 - dy0 = problem.F(t0,y0) - dt0 = problem.dt0 - return Step(t0,y0,dy0,dt0) +# iterator for the dense output, can be wrapped around any other +# iterator supporting tspan by using the method keyword, for example +# ODE.newDenseProblem(..., method = ODE.bt_rk23, ...) + +type Step + t; y; dy; dt end + type DenseState s0; s1 last_tout @@ -15,18 +16,30 @@ type DenseState ytmp end + immutable DenseProblem - rkprob :: RKProblem + rkprob points :: Symbol tspan end -function newDenseProblem(args...; tspan = [Inf], points = :all, opt_args...) - rkprob = newRKProblem(args...; opt_args..., tstop = tspan[end]) +function DenseProblem(args...; tspan = [Inf], points = :all, method = bt_feuler, opt_args...) + rkprob = method(args...; opt_args..., tstop = tspan[end]) return DenseProblem(rkprob, points, tspan) end + +# create an instance of the zero-th step for RKProblem +function Step(problem :: AbstractProblem) + t0 = problem.t0 + y0 = problem.y0 + dy0 = problem.F(t0,y0) + dt0 = problem.dt0 + return Step(t0,y0,dy0,dt0) +end + + function start(prob :: DenseProblem) step0 = Step(prob.rkprob) step1 = Step(prob.rkprob) @@ -35,6 +48,7 @@ function start(prob :: DenseProblem) return DenseState(step0, step1, prob.rkprob.t0, true, rkstate, ytmp) end + function next(prob :: DenseProblem, state :: DenseState) s0, s1 = state.s0, state.s1 @@ -55,10 +69,11 @@ function next(prob :: DenseProblem, state :: DenseState) # step is saved in s0 if done(prob.rkprob, state.rkstate) + # TODO: this shouldn't happen error("The iterator was exhausted before the dense output compltede.") else # at this point s0 holds the new step, "s2" if you will - ((s0.t,s0.y[:]),state.rkstate) = next(prob.rkprob, state.rkstate) + ((s0.t,s0.y[:]), state.rkstate) = next(prob.rkprob, state.rkstate) end # swap s0 and s1 @@ -90,6 +105,27 @@ function next(prob :: DenseProblem, state :: DenseState) end + function done(prob :: DenseProblem, state :: DenseState) return done(prob.rkprob, state.rkstate) || state.s1.t >= prob.tspan[end] end + + +function hermite_interp!(y,t,step0::Step,step1::Step) + # For dense output see Hairer & Wanner p.190 using Hermite + # interpolation. Updates y in-place. + # + # f_0 = f(x_0 , y_0) , f_1 = f(x_0 + h, y_1 ) + # this is O(3). TODO for higher order. + + y0, y1 = step0.y, step1.y + dy0, dy1 = step0.dy, step1.dy + + dt = step1.t-step0.t + theta = (t-step0.t)/dt + for i=1:length(y0) + y[i] = ((1-theta)*y0[i] + theta*y1[i] + theta*(theta-1) * + ((1-2*theta)*(y1[i]-y0[i]) + (theta-1)*dt*dy0[i] + theta*dt*dy1[i]) ) + end + nothing +end diff --git a/src/iterators.jl b/src/iterators.jl index 88d6b3b7c..2528916dd 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -2,63 +2,307 @@ # Iterator methods # #################### +# common structures and functions + +const timeout_const = 5 + type TempArrays - y; ks + y; ks; yerr end -type RKState +type State t; dt; y; tmp :: TempArrays + timeout +end + +abstract AbstractProblem + + +##################### +# Fixed step method # +##################### + +# type FixedState <: AbstractState +# t; dt; y; tmp :: TempArrays +# end + + +immutable FixedProblem <: AbstractProblem + F + method + y0 + t0 + dt0 + tstop +end + + +# # generates the iterator from TableauRKExplicit +# function call(tab::TableauRKExplicit, fn, y0, t0, dt0; tstop = Inf) +# return FixedProblem(fn, tab, y0, t0, dt0, tstop) +# end + +# function start(problem :: FixedProblem) +# t0 = problem.t0 +# dt0 = problem.dt0 +# y0 = problem.y0 +# tmp = TempArrays(similar(y0), Array(typeof(y0), S(problem.btab)), Void()) +# return FixedState(t0, dt0, y0, tmp) +# end + + +function next(prob :: FixedProblem, state :: State) + dof = length(state.y) + for s=1:S(prob.method) + calc_next_k!(state.tmp, s, state, prob) + for d=1:dof + state.y[d] += state.dt * prob.method.b[s]*state.tmp.ks[s][d] + end + end + state.t += state.dt + return ((state.t,state.y), state) end -immutable RKProblem + +# function done(prob :: FixedProblem, state :: FixedState) +# return state.t >= prob.tstop # || abs(state.dt) < prob.minstep +# end + + +######################## +# Adaptive step method # +######################## + +immutable AdaptiveProblem <: AbstractProblem F - btab + method y0 t0 dt0 tstop + reltol + abstol + minstep + maxstep +end + + +# function AdaptiveProblem(fn, y0, t0; tstop = Inf, method = bt_feuler, reltol = 1e-5, abstol = 1e-5, minstep = 1e-10, maxstep = 1/minstep) +# return AdaptiveProblem{:adaptive}(fn, method, y0, t0, tstop, reltol, abstol, minstep, maxstep) +# end + + +function solver(F, y0, t0; + tstop = Inf, + method = bt_feuler, + reltol = 1e-5, + abstol = 1e-5, + minstep = 1e-10, + maxstep = 1/minstep, + dt0 = hinit(F, y0, t0, tstop, method, reltol, abstol)) + if isadaptive(method) + return AdaptiveProblem(F, method, y0, t0, dt0, tstop, reltol, abstol, minstep, maxstep) + else + return FixedProblem(F, method, y0, t0, dt0, tstop) + end end -function newRKProblem(fn, y0, t0, dt0; tstop = Inf, method = bt_feuler) - return RKProblem(fn, method, y0, t0, dt0, tstop) + +function start(problem :: AbstractProblem) + t0, dt0, y0 = problem.t0, problem.dt0, problem.y0 + + tmp = TempArrays(similar(y0), Array(typeof(y0), S(problem.method)), similar(y0)) + tmp.ks[1] = problem.F(t0,y0) # we assume that ks[1] is already initialized + + timeout = 0 # for step control + return State(t0,dt0,y0,tmp,timeout) end -function start(problem :: RKProblem) - t0 = problem.t0 - dt0 = problem.dt0 - y0 = problem.y0 - tmp = TempArrays(problem.y0, Array(typeof(y0), S(problem.btab))) - return RKState(t0, dt0, y0, tmp) + +function done(prob :: AbstractProblem, state :: State) + return state.t >= prob.tstop || state.dt < prob.minstep end -function next(prob :: RKProblem, state :: RKState) - dof = length(state.y) - for s=1:S(prob.btab) +function next(prob :: AdaptiveProblem, state :: State) + + # the initial values + dt = state.dt # dt is the previous stepisze, it is + # modified inside the loop + timeout = state.timeout + + # for aesthetical reasons we extract the temporary componen + tmp = state.tmp + + # The while loop continues until we either find a stepsize which + # leads to a small enough error or the stepsize reaches + # prob.minstep + + while true + + # do one step (assumes ks[1]==f0), changes only tmp + err, newdt, timeout = rk_trial_step!(tmp, state, prob, dt, timeout) + + if abs(newdt) < prob.minstep # minimum step size reached, break + println("Warning: dt < minstep. Stopping.") + # passing the newdt to state will result in done() + state.dt = newdt + break + end + + if err > 1 # error is too large, repeat the step with smaller dt + # redo step with smaller dt and reset the timeout + dt = newdt + timeout = timeout_const + else + # step is accepted + + # preload ks[1] for the next step + if isFSAL(prob.method) + tmp.ks[1] = tmp.ks[S(prob.method)] + else + tmp.ks[1] = prob.F(state.t+dt, state.tmp.y) + end + + # Swap bindings of y and ytrial, avoids one copy + state.y, state.tmp.y = state.tmp.y, state.y + + # Update state with the data from the step we have just + # made: + state.t += dt + state.dt = newdt + state.timeout = timeout + break + end + end + return ((state.t,state.y),state) +end + + +function rk_trial_step!(tmp, state, prob, dt, timeout) + + # tmp.y and tmp.yerr and tmp.ks are updated after this step + rk_embedded_step!(tmp, state, prob, dt) + + # changes tmp.yerr (via in place update) + err, newdt, timeout = stepsize_hw92!(tmp, state, prob, dt, timeout) + + return err, newdt, timeout +end + + +function rk_embedded_step!(tmp :: TempArrays, state :: State, prob :: AdaptiveProblem, dt) + # Does one embedded R-K step updating ytrial, yerr and ks. + # + # Assumes that ks[:,1] is already calculated! + # + # Modifies tmp.y and tmp.yerr only + + y = state.y + dof = length(y) + b = prob.method.b + + tmp.y[:] = 0 + tmp.yerr[:] = 0 + + for d=1:dof + tmp.y[d] += b[1,1]*tmp.ks[1][d] + tmp.yerr[d] += b[2,1]*tmp.ks[1][d] + end + + for s=2:S(prob.method) calc_next_k!(state.tmp, s, state, prob) for d=1:dof - state.y[d] += state.dt * prob.btab.b[s]*state.tmp.ks[s][d] + tmp.y[d] += b[1,s]*tmp.ks[s][d] + tmp.yerr[d] += b[2,s]*tmp.ks[s][d] end end - state.t += state.dt + for d=1:dof + tmp.yerr[d] = dt * (tmp.y[d]-tmp.yerr[d]) + tmp.y[d] = y[d] + dt * tmp.y[d] + end +end - return ((state.t,state.y), state) +function stepsize_hw92!(tmp, state, prob, dt, timeout) + # Estimates the error and a new step size following Hairer & + # Wanner 1992, p167 (with some modifications) + # + # If timeout>0 no step size increase is allowed, timeout is + # decremented in here. + # + # Returns the error, newdt and the number of timeout-steps + # + # TODO: + # - allow component-wise reltol and abstol? + # - allow other norms + + order = minimum(prob.method.order) + timout_after_nan = 5 + fac = [0.8, 0.9, 0.25^(1/(order+1)), 0.38^(1/(order+1))][1] + facmax = 5.0 # maximal step size increase. 1.5-5 + facmin = 1./facmax # maximal step size decrease. ? + dof = length(state.y) + + # in-place calculate yerr./tol + for d=1:dof + + # if outside of domain (usually NaN) then make step size smaller by maximum + if isoutofdomain(tmp.y[d]) + return 10., dt*facmin, timout_after_nan + end + + tmp.yerr[d] = tmp.yerr[d]/(prob.abstol + max(norm(prob.y0[d]), norm(tmp.y[d]))*prob.reltol) # Eq 4.10 + end + + err = norm(tmp.yerr, 2) # Eq. 4.11 + newdt = min(prob.maxstep, dt*max(facmin, fac*(1/err)^(1/(order+1)))) # Eq 4.13 modified + + if timeout > 0 + newdt = min(newdt, dt) + timeout -= 1 + end + + return err, newdt, timeout end -function done(prob :: RKProblem, state :: RKState) - return state.t >= prob.tstop + +function hinit(F, y0, t0, tstop, method, reltol, abstol) + # Returns first step size + order = minimum(method.order) + tau = max(reltol*norm(y0, Inf), abstol) + d0 = norm(y0, Inf)/tau + f0 = F(t0, y0) + d1 = norm(f0, Inf)/tau + if d0 < 1e-5 || d1 < 1e-5 + h0 = 1e-6 + else + h0 = 0.01*(d0/d1) + end + # perform Euler step + y1 = y0 + h0*f0 + f1 = F(t0 + h0, y1) + # estimate second derivative + d2 = norm(f1 - f0, Inf)/(tau*h0) + if max(d1, d2) <= 1e-15 + h1 = max(1e-6, 1e-3*h0) + else + pow = -(2 + log10(max(d1, d2)))/(order+1) + h1 = 10^pow + end + return min(100*h0, h1, tstop-t0) end -function calc_next_k!(tmp :: TempArrays, i, state :: RKState, prob :: RKProblem) +# For clarity we pass the TempArrays part of the state separately, +# this is the only part of state that can be changed here +function calc_next_k!(tmp :: TempArrays, i, state :: State, prob :: AbstractProblem) dof = length(state.y) - t, dt, a, c = state.t, state.dt, prob.btab.a, prob.btab.c + t, dt, a, c = state.t, state.dt, prob.method.a, prob.method.c tmp.y[:] = state.y for j=1:i-1 - # tmp.y += dt * btab.a[i,j] * ks[j] for d=1:dof tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] end diff --git a/src/runge_kutta.jl b/src/runge_kutta.jl index 8000e2584..4a3095cd5 100644 --- a/src/runge_kutta.jl +++ b/src/runge_kutta.jl @@ -2,624 +2,15 @@ ############################## # (Hairer & Wanner 1992 p.134, p.165-169) -########################################### -# Tableaus for explicit Runge-Kutta methods -########################################### +import Base: start, next, done, call -import Base: start, next, done +include("tableaus.jl") +# include("algorithms.jl") -immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} - order::(@compat(Tuple{Vararg{Int}})) # the order of the methods - a::Matrix{T} - # one or several row vectors. First row is used for the step, - # second for error calc. - b::Matrix{T} - c::Vector{T} - function TableauRKExplicit(order,a,b,c) - @assert isa(S,Integer) - @assert isa(Name,Symbol) - @assert c[1]==0 - @assert istril(a) - @assert S==length(c)==size(a,1)==size(a,2)==size(b,2) - @assert size(b,1)==length(order) - @assert norm(sum(a,2)-c'',Inf)<1e-10 # consistency. - new(order,a,b,c) - end -end +include("iterators.jl") - -function TableauRKExplicit{T}(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), - a::Matrix{T}, b::Matrix{T}, c::Vector{T}) - TableauRKExplicit{name,length(c),T}(order, a, b, c) -end - - -function TableauRKExplicit(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), T::Type, - a::Matrix, b::Matrix, c::Vector) - TableauRKExplicit{name,length(c),T}(order, convert(Matrix{T},a), - convert(Matrix{T},b), convert(Vector{T},c) ) -end - - -conv_field{T,N}(D,a::Array{T,N}) = convert(Array{D,N}, a) - - -S(tab::TableauRKExplicit) = length(tab.c) - -function Base.convert{Tnew<:Real,Name,S,T}(::Type{Tnew}, tab::TableauRKExplicit{Name,S,T}) - # Converts the tableau coefficients to the new type Tnew - newflds = () - @compat for n in fieldnames(tab) - fld = getfield(tab,n) - if eltype(fld)==T - newflds = tuple(newflds..., conv_field(Tnew, fld)) - else - newflds = tuple(newflds..., fld) - end - end - TableauRKExplicit{Name,S,Tnew}(newflds...) # TODO: could this be done more generically in a type-stable way? -end - - -isexplicit(b::TableauRKExplicit) = istril(b.a) # Test whether it's an explicit method -isadaptive(b::TableauRKExplicit) = size(b.b, 1)==2 - - -# First same as last. Means ks[:,end]=ks_nextstep[:,1], c.f. H&W p.167 -isFSAL(btab::TableauRKExplicit) = btab.a[end,:]==btab.b[1,:] && btab.c[end]==1 # the latter is not needed really - -## Tableaus for explicit RK methods -# Fixed step: -const bt_feuler = TableauRKExplicit(:feuler,(1,), Rational{Int64}, - zeros(Int,1,1), - [1]', - [0] - ) -const bt_midpoint = TableauRKExplicit(:midpoint,(2,), Rational{Int64}, - [0 0 - 1//2 0], - [0, 1]', - [0, 1//2] - ) -const bt_heun = TableauRKExplicit(:heun,(2,), Rational{Int64}, - [0 0 - 1 0], - [1//2, 1//2]', - [0, 1]) - -const bt_rk4 = TableauRKExplicit(:rk4,(4,),Rational{Int64}, - [0 0 0 0 - 1//2 0 0 0 - 0 1//2 0 0 - 0 0 1 0], - [1//6, 1//3, 1//3, 1//6]', - [0, 1//2, 1//2, 1]) - -# Adaptive step: -# Heun Euler https://en.wikipedia.org/wiki/Runge–Kutta_methods -const bt_rk21 = TableauRKExplicit(:heun_euler,(2,1), Rational{Int64}, - [0 0 - 1 0], - [1//2 1//2 - 1 0], - [0, 1]) - -# Bogacki–Shampine coefficients -const bt_rk23 = TableauRKExplicit(:bogacki_shampine,(2,3), Rational{Int64}, - [0 0 0 0 - 1/2 0 0 0 - 0 3/4 0 0 - 2/9 1/3 4/9 0], - [7/24 1/4 1/3 1/8 - 2/9 1/3 4/9 0], - [0, 1//2, 3//4, 1] - ) - -# Fehlberg https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta%E2%80%93Fehlberg_method -const bt_rk45 = TableauRKExplicit(:fehlberg,(4,5),Rational{Int64}, - [ 0 0 0 0 0 0 - 1//4 0 0 0 0 0 - 3//32 9//32 0 0 0 0 - 1932//2197 -7200//2197 7296//2197 0 0 0 - 439//216 -8 3680//513 -845//4104 0 0 - -8//27 2 -3544//2565 1859//4104 -11//40 0 ], - [25//216 0 1408//2565 2197//4104 -1//5 0 - 16//135 0 6656//12825 28561//56430 -9//50 2//55], - [0, 1//4, 3//8, 12//13, 1, 1//2]) - -# Dormand-Prince https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method -const bt_dopri5 = TableauRKExplicit(:dopri, (5,4), Rational{Int64}, - [0 0 0 0 0 0 0 - 1//5 0 0 0 0 0 0 - 3//40 9//40 0 0 0 0 0 - 44//45 -56//15 32//9 0 0 0 0 - 19372//6561 -25360//2187 64448//6561 -212//729 0 0 0 - 9017//3168 -355//33 46732//5247 49//176 -5103//18656 0 0 - 35//384 0 500//1113 125//192 -2187//6784 11//84 0], - [35//384 0 500//1113 125//192 -2187//6784 11//84 0 - 5179//57600 0 7571//16695 393//640 -92097//339200 187//2100 1//40], - [0, 1//5, 3//10, 4//5, 8//9, 1, 1] - ) - -# Fehlberg 7(8) coefficients -# Values from pag. 65, Fehlberg, Erwin. "Classical fifth-, sixth-, seventh-, and eighth-order Runge-Kutta formulas with stepsize control". -# National Aeronautics and Space Administration. -const bt_feh78 = TableauRKExplicit(:feh78, (7,8), Rational{Int64}, - [ 0 0 0 0 0 0 0 0 0 0 0 0 0 - 2//27 0 0 0 0 0 0 0 0 0 0 0 0 - 1//36 1//12 0 0 0 0 0 0 0 0 0 0 0 - 1//24 0 1//8 0 0 0 0 0 0 0 0 0 0 - 5//12 0 -25//16 25//16 0 0 0 0 0 0 0 0 0 - 1//20 0 0 1//4 1//5 0 0 0 0 0 0 0 0 - -25//108 0 0 125//108 -65//27 125//54 0 0 0 0 0 0 0 - 31//300 0 0 0 61//225 -2//9 13//900 0 0 0 0 0 0 - 2 0 0 -53//6 704//45 -107//9 67//90 3 0 0 0 0 0 - -91//108 0 0 23//108 -976//135 311//54 -19//60 17//6 -1//12 0 0 0 0 - 2383//4100 0 0 -341//164 4496//1025 -301//82 2133//4100 45//82 45//164 18//41 0 0 0 - 3//205 0 0 0 0 -6//41 -3//205 -3//41 3//41 6//41 0 0 0 - -1777//4100 0 0 -341//164 4496//1025 -289//82 2193//4100 51//82 33//164 12//41 0 1 0], - [41//840 0 0 0 0 34//105 9//35 9//35 9//280 9//280 41//840 0 0 - 0 0 0 0 0 34//105 9//35 9//35 9//280 9//280 0 41//840 41//840], - [0, 2//27, 1//9, 1//6 , 5//12, 1//2 , 5//6 , 1//6 , 2//3 , 1//3 , 1 , 0, 1] - ) - - -################################ -# Fixed step Runge-Kutta methods -################################ - -# TODO: iterator method -ode1(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_feuler) -ode2_midpoint(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_midpoint) -ode2_heun(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_heun) -ode4(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_rk4) - -function oderk_fixed(fn, y0, tspan, btab::TableauRKExplicit) - # Non-arrays y0 treat as scalar - fn_(t, y) = [fn(t, y[1])] - t,y = oderk_fixed(fn_, [y0], tspan, btab) - return t, vcat_nosplat(y) -end -function oderk_fixed{N,S}(fn, y0::AbstractVector, tspan, - btab_::TableauRKExplicit{N,S}) - # TODO: instead of AbstractVector use a Holy-trait - - # Needed interface: - # On components: - # On y0 container: length, deepcopy, similar, setindex! - # On time container: getindex, convert. length - - Et, Eyf, Ty, btab = make_consistent_types(fn, y0, tspan, btab_) - dof = length(y0) - - ys = Array(Ty, length(tspan)) - allocate!(ys, y0, dof) - ys[1] = deepcopy(y0) - - tspan = convert(Vector{Et}, tspan) - # work arrays: - ks = Array(Ty, S) - # allocate!(ks, y0, dof) # no need to allocate as fn is not in-place - ytmp = similar(y0, Eyf, dof) - for i=1:length(tspan)-1 - dt = tspan[i+1]-tspan[i] - ys[i+1][:] = ys[i] - for s=1:S - calc_next_k!(ks, ytmp, ys[i], s, fn, tspan[i], dt, dof, btab) - for d=1:dof - ys[i+1][d] += dt * btab.b[s]*ks[s][d] - end - end - end - return tspan, ys -end - -############################## -# Adaptive Runge-Kutta methods -############################## - -ode21(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk21; kwargs...) -ode23(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk23; kwargs...) -ode45_fe(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk45; kwargs...) -ode45_dp(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_dopri5; kwargs...) -# Use Dormand-Prince version of ode45 by default -const ode45 = ode45_dp -ode78(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_feh78; kwargs...) - -function oderk_adapt(fn, y0, tspan, btab::TableauRKExplicit; kwords...) - # For y0 which don't support indexing. - fn_ = (t, y) -> [fn(t, y[1])] - t,y = oderk_adapt(fn_, [y0], tspan, btab; kwords...) - return t, vcat_nosplat(y) -end -function oderk_adapt{N,S}(fn, y0::AbstractVector, tspan, btab_::TableauRKExplicit{N,S}; - reltol = 1.0e-5, abstol = 1.0e-8, - norm=Base.norm, - minstep=abs(tspan[end] - tspan[1])/1e18, - maxstep=abs(tspan[end] - tspan[1])/2.5, - initstep=0., - points=:all - ) - # Needed interface: - # On components: - # - note that the type of the components might change! - # On y0 container: length, similar, setindex! - # On time container: getindex, convert, length - - # For y0 which support indexing. Currently y0<:AbstractVector but - # that could be relaxed with a Holy-trait. - !isadaptive(btab_) && error("Can only use this solver with an adaptive RK Butcher table") - - Et, Eyf, Ty, btab = make_consistent_types(fn, y0, tspan, btab_) - # parameters - order = minimum(btab.order) - timeout_const = 5 # after step reduction do not increase step for - # timeout_const steps - - ## Initialization - dof = length(y0) - tspan = convert(Vector{Et}, tspan) - t = tspan[1] - tstart = tspan[1] - tend = tspan[end] - - # work arrays: - y = similar(y0, Eyf, dof) # y at time t - y[:] = y0 - ytrial = similar(y0, Eyf, dof) # trial solution at time t+dt - yerr = similar(y0, Eyf, dof) # error of trial solution - ks = Array(Ty, S) - # allocate!(ks, y0, dof) # no need to allocate as fn is not in-place - ytmp = similar(y0, Eyf, dof) - - # output ys - nsteps_fixed = length(tspan) # these are always output - ys = Array(Ty, nsteps_fixed) - allocate!(ys, y0, dof) - ys[1] = y0 - - # Option points determines where solution is returned: - if points==:all - tspan_fixed = tspan - tspan = Et[tstart] - iter_fixed = 2 # index into tspan_fixed - sizehint!(tspan, nsteps_fixed) - elseif points!=:specified - error("Unrecognized option points==$points") - end - # Time - dt, tdir, ks[1] = hinit(fn, y, tstart, tend, order, reltol, abstol) # sets ks[1]=f0 - if initstep!=0 - dt = sign(initstep)==tdir ? initstep : error("initstep has wrong sign.") - end - # Diagnostics - dts = Et[] - errs = Float64[] - steps = [0,0] # [accepted, rejected] - - ## Integration loop - islaststep = abs(t+dt-tend)<=eps(tend) ? true : false - timeout = 0 # for step-control - iter = 2 # the index into tspan and ys - while true - # do one step (assumes ks[1]==f0) - rk_embedded_step!(ytrial, yerr, ks, ytmp, y, fn, t, dt, dof, btab) - # Check error and find a new step size: - err, newdt, timeout = stepsize_hw92!(dt, tdir, y, ytrial, yerr, order, timeout, - dof, abstol, reltol, maxstep, norm) - - if err<=1.0 # accept step - # diagnostics - steps[1] +=1 - push!(dts, dt) - push!(errs, err) - - # Output: - f0 = ks[1] - f1 = isFSAL(btab) ? ks[S] : fn(t+dt, ytrial) - if points==:specified - # interpolate onto given output points - while iter-1= tdir*tend - dt = tend-t - islaststep = true # next step is the last, if it succeeds - end - elseif abs(newdt)0 no step size increase is allowed, timeout is - # decremented in here. - # - # Returns the error, newdt and the number of timeout-steps - # - # TODO: - # - allow component-wise reltol and abstol? - # - allow other norms - - # Needed interface: - # On components: isoutofdomain - # On y0 container: norm, get/setindex - - timout_after_nan = 5 - fac = [0.8, 0.9, 0.25^(1/(order+1)), 0.38^(1/(order+1))][1] - facmax = 5.0 # maximal step size increase. 1.5-5 - facmin = 1./facmax # maximal step size decrease. ? - - # in-place calculate xerr./tol - for d=1:dof - # if outside of domain (usually NaN) then make step size smaller by maximum - isoutofdomain(xtrial[d]) && return 10., dt*facmin, timout_after_nan - xerr[d] = xerr[d]/(abstol + max(norm(x0[d]), norm(xtrial[d]))*reltol) # Eq 4.10 - end - err = norm(xerr, 2) # Eq. 4.11 - newdt = min(maxstep, tdir*dt*max(facmin, fac*(1/err)^(1/(order+1)))) # Eq 4.13 modified - if timeout>0 - newdt = min(newdt, dt) - timeout -= 1 - end - return err, tdir*newdt, timeout -end - -function calc_next_k!{Ty}(ks::Vector, ytmp::Ty, y, s, fn, t, dt, dof, btab) - # Calculates the next ks and puts it into ks[s] - # - ks and ytmp are modified inside this function. - - # Needed interface: - # On components: +, * - # On y0 container: setindex!, getindex, fn - - ytmp[:] = y - for ss=1:s-1, d=1:dof - ytmp[d] += dt * ks[ss][d] * btab.a[s,ss] - end - ks[s] = fn(t + btab.c[s]*dt, ytmp)::Ty - nothing -end - -# Helper functions: -function allocate!{T}(vec::Vector{T}, y0, dof) - # Allocates all vectors inside a Vector{Vector} using the same - # kind of container as y0 has and element type eltype(eltype(vec)). - for s=1:length(vec) - vec[s] = similar(y0, eltype(T), dof) - end -end -function index_or_push!(vec, i, val) - # Fills in the vector until there is no space, then uses push! - # instead. - if length(vec)>=i - vec[i] = val - else - push!(vec, val) - end -end -vcat_nosplat(y) = eltype(y[1])[el[1] for el in y] # Does vcat(y...) without the splatting - -function hermite_interp!(y, tquery,t,dt,y0,y1,f0,f1) - # For dense output see Hairer & Wanner p.190 using Hermite - # interpolation. Updates y in-place. - # - # f_0 = f(x_0 , y_0) , f_1 = f(x_0 + h, y_1 ) - # this is O(3). TODO for higher order. - - theta = (tquery-t)/dt - for i=1:length(y0) - y[i] = ((1-theta)*y0[i] + theta*y1[i] + theta*(theta-1) * - ((1-2*theta)*(y1[i]-y0[i]) + (theta-1)*dt*f0[i] + theta*dt*f1[i]) ) - end - nothing -end -function hermite_interp(tquery,t,dt,y0,y1,f0,f1) - # Returns the y instead of in-place - y = similar(y0) - hermite_interp!(y,tquery,t,dt,y0,y1,f0,f1) - return y -end - -#################### -# Iterator methods # -#################### - -type TempArrays - y; ks -end - -type Step - t; y; dy; dt -end - -type State - tmp :: TempArrays - prev_steps :: Vector{Step} - last_tout - first_step -end - -immutable Problem - F - btab - y0 - t0 - dt0 - tspan - points -end - -function newProblem(fn, y0, t0, dt0; tspan = [t0,Inf], method = bt_feuler, points = :all) - return Problem(fn, method, y0, t0, dt0, tspan, points) -end - -function start(problem :: Problem) - S = length(problem.btab.c) - t0 = problem.t0 - y0 = problem.y0 - dy0 = problem.F(t0,y0) - dt0 = problem.dt0 - tmp = TempArrays(problem.y0, Array(typeof(y0), S)) - step0 = Step(t0,y0,dy0,dt0) - # initialize with two identical steps - prev_steps = [step0, deepcopy(step0)] - return State(tmp, prev_steps, t0, true) -end - -function calc_next_k_2!(tmp :: TempArrays, i, step :: Step, prob :: Problem) - # Calculates the next ks and puts it into ks[s] - # - ks and ytmp are modified inside this function. - - # Needed interface: - # On components: +, * - # On y0 container: setindex!, getindex, fn - - dof = length(step.y) - t, dt, a, c = step.t, step.dt, prob.btab.a, prob.btab.c - - tmp.y[:] = step.y - for j=1:i-1 - # tmp.y += dt * btab.a[i,j] * ks[j] - for d=1:dof - tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] - end - end - tmp.ks[i] = prob.F(t + c[i]*dt, tmp.y) - - nothing -end - -function next(prob :: Problem, state :: State) - - s0, s1 = state.prev_steps - t0, t1 = s0.t, s1.t - - if state.first_step - state.first_step = false - return ((s0.t,s0.y),state) - end - - # the next output time that we aim at - t_goal = prob.tspan[findfirst(t->(t>state.last_tout), prob.tspan)] - - # the t0 == t1 part ensures that we make at least one step - while t1 < t_goal - - # s1 is the starting point for the new step, while the new - # step is saved in s0 - - s0.y[:] = s1.y - s0.t = s1.t - - # perform a step and save it to s0 - dof = length(s1.y) - for s=1:S(prob.btab) - calc_next_k_2!(state.tmp, s, s1, prob) - for d=1:dof - s0.y[d] += s1.dt * prob.btab.b[s]*state.tmp.ks[s][d] - end - end - s0.t += s1.dt - s0.dy = prob.F(s0.t,s0.y) - - # swap s0 and s1 - state.prev_steps[:] = [s1,s0] - - # reassign the steps - s0, s1 = state.prev_steps - t0, t1 = s0.t, s1.t - - # we made a successfull step and points == :all - if prob.points == :all - t_goal = min(t_goal,t1) - break - end - end - - # at this point we have t_goal∈[t0,t1] so we can apply the - # interpolation - - hermite_interp!(state.tmp.y,t_goal,t0,t1-t0,s0.y,s1.y,s0.dy,s1.dy) - - # update the last output time - state.last_tout = t_goal - - return ((s1.t,s1.y),state) - -end - -function done(prob :: Problem, state :: State) - state.prev_steps[2].t >= prob.tspan[end] -end +# include("iterators.jl") +# include("fixed.jl") +# include("variable.jl") +include("dense.jl") diff --git a/src/tableaus.jl b/src/tableaus.jl index da90d3eb6..143eedbd3 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -24,13 +24,13 @@ end function TableauRKExplicit{T}(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), - a::Matrix{T}, b::Matrix{T}, c::Vector{T}) + a::Matrix{T}, b::Matrix{T}, c::Vector{T}) TableauRKExplicit{name,length(c),T}(order, a, b, c) end function TableauRKExplicit(name::Symbol, order::(@compat(Tuple{Vararg{Int}})), T::Type, - a::Matrix, b::Matrix, c::Vector) + a::Matrix, b::Matrix, c::Vector) TableauRKExplicit{name,length(c),T}(order, convert(Matrix{T},a), convert(Matrix{T},b), convert(Vector{T},c) ) end From df951d773d3671285c756d785a44e7c4622809e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 18 Nov 2015 16:54:56 +0100 Subject: [PATCH 004/113] Iterator version of rk solvers --- src/ODE.jl | 2 +- src/algorithms.jl | 306 ------------------------------------------- src/dense.jl | 46 +++---- src/iterators.jl | 315 +-------------------------------------------- src/rk.jl | 290 +++++++++++++++++++++++++++++++++++++++++ src/runge_kutta.jl | 16 --- 6 files changed, 314 insertions(+), 661 deletions(-) delete mode 100644 src/algorithms.jl create mode 100644 src/rk.jl delete mode 100644 src/runge_kutta.jl diff --git a/src/ODE.jl b/src/ODE.jl index e8ccc55c1..e13927ab3 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -146,7 +146,7 @@ end ## NON-STIFF SOLVERS ############################################################################### -include("runge_kutta.jl") +include("iterators.jl") # ODE_MS Fixed-step, fixed-order multi-step numerical method # with Adams-Bashforth-Moulton coefficients diff --git a/src/algorithms.jl b/src/algorithms.jl deleted file mode 100644 index 54e21e547..000000000 --- a/src/algorithms.jl +++ /dev/null @@ -1,306 +0,0 @@ -################################ -# Fixed step Runge-Kutta methods -################################ - -type Step - t; y; dy; dt -end - -# TODO: iterator method -ode1(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_feuler) -ode2_midpoint(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_midpoint) -ode2_heun(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_heun) -ode4(fn, y0, tspan) = oderk_fixed(fn, y0, tspan, bt_rk4) - -function oderk_fixed(fn, y0, tspan, btab::TableauRKExplicit) - # Non-arrays y0 treat as scalar - fn_(t, y) = [fn(t, y[1])] - t,y = oderk_fixed(fn_, [y0], tspan, btab) - return t, vcat_nosplat(y) -end -function oderk_fixed{N,S}(fn, y0::AbstractVector, tspan, - btab_::TableauRKExplicit{N,S}) - # TODO: instead of AbstractVector use a Holy-trait - - # Needed interface: - # On components: - # On y0 container: length, deepcopy, similar, setindex! - # On time container: getindex, convert. length - - Et, Eyf, Ty, btab = make_consistent_types(fn, y0, tspan, btab_) - dof = length(y0) - - ys = Array(Ty, length(tspan)) - allocate!(ys, y0, dof) - ys[1] = deepcopy(y0) - - tspan = convert(Vector{Et}, tspan) - # work arrays: - ks = Array(Ty, S) - # allocate!(ks, y0, dof) # no need to allocate as fn is not in-place - ytmp = similar(y0, Eyf, dof) - for i=1:length(tspan)-1 - dt = tspan[i+1]-tspan[i] - ys[i+1][:] = ys[i] - for s=1:S - calc_next_k!(ks, ytmp, ys[i], s, fn, tspan[i], dt, dof, btab) - for d=1:dof - ys[i+1][d] += dt * btab.b[s]*ks[s][d] - end - end - end - return tspan, ys -end - -############################## -# Adaptive Runge-Kutta methods -############################## - -ode21(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk21; kwargs...) -ode23(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk23; kwargs...) -ode45_fe(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk45; kwargs...) -ode45_dp(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_dopri5; kwargs...) -# Use Dormand-Prince version of ode45 by default -const ode45 = ode45_dp -ode78(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_feh78; kwargs...) - -function oderk_adapt(fn, y0, tspan, btab::TableauRKExplicit; kwords...) - # For y0 which don't support indexing. - fn_ = (t, y) -> [fn(t, y[1])] - t,y = oderk_adapt(fn_, [y0], tspan, btab; kwords...) - return t, vcat_nosplat(y) -end -function oderk_adapt{N,S}(fn, y0::AbstractVector, tspan, btab_::TableauRKExplicit{N,S}; - reltol = 1.0e-5, abstol = 1.0e-8, - norm=Base.norm, - minstep=abs(tspan[end] - tspan[1])/1e18, - maxstep=abs(tspan[end] - tspan[1])/2.5, - initstep=0., - points=:all - ) - # Needed interface: - # On components: - # - note that the type of the components might change! - # On y0 container: length, similar, setindex! - # On time container: getindex, convert, length - - # For y0 which support indexing. Currently y0<:AbstractVector but - # that could be relaxed with a Holy-trait. - !isadaptive(btab_) && error("Can only use this solver with an adaptive RK Butcher table") - - Et, Eyf, Ty, btab = make_consistent_types(fn, y0, tspan, btab_) - # parameters - order = minimum(btab.order) - timeout_const = 5 # after step reduction do not increase step for - # timeout_const steps - - ## Initialization - dof = length(y0) - tspan = convert(Vector{Et}, tspan) - t = tspan[1] - tstart = tspan[1] - tend = tspan[end] - - # work arrays: - y = similar(y0, Eyf, dof) # y at time t - y[:] = y0 - ytrial = similar(y0, Eyf, dof) # trial solution at time t+dt - yerr = similar(y0, Eyf, dof) # error of trial solution - ks = Array(Ty, S) - # allocate!(ks, y0, dof) # no need to allocate as fn is not in-place - ytmp = similar(y0, Eyf, dof) - - # output ys - nsteps_fixed = length(tspan) # these are always output - ys = Array(Ty, nsteps_fixed) - allocate!(ys, y0, dof) - ys[1] = y0 - - # Option points determines where solution is returned: - if points==:all - tspan_fixed = tspan - tspan = Et[tstart] - iter_fixed = 2 # index into tspan_fixed - sizehint!(tspan, nsteps_fixed) - elseif points!=:specified - error("Unrecognized option points==$points") - end - # Time - dt, tdir, ks[1] = hinit(fn, y, tstart, tend, order, reltol, abstol) # sets ks[1]=f0 - if initstep!=0 - dt = sign(initstep)==tdir ? initstep : error("initstep has wrong sign.") - end - # Diagnostics - dts = Et[] - errs = Float64[] - steps = [0,0] # [accepted, rejected] - - ## Integration loop - islaststep = abs(t+dt-tend)<=eps(tend) ? true : false - timeout = 0 # for step-control - iter = 2 # the index into tspan and ys - while true - # do one step (assumes ks[1]==f0) - rk_embedded_step!(ytrial, yerr, ks, ytmp, y, fn, t, dt, dof, btab) - # Check error and find a new step size: - err, newdt, timeout = stepsize_hw92!(dt, tdir, y, ytrial, yerr, order, timeout, - dof, abstol, reltol, maxstep, norm) - - if err<=1.0 # accept step - # diagnostics - steps[1] +=1 - push!(dts, dt) - push!(errs, err) - - # Output: - f0 = ks[1] - f1 = isFSAL(btab) ? ks[S] : fn(t+dt, ytrial) - if points==:specified - # interpolate onto given output points - while iter-1= tdir*tend - dt = tend-t - islaststep = true # next step is the last, if it succeeds - end - elseif abs(newdt)0 no step size increase is allowed, timeout is - # decremented in here. - # - # Returns the error, newdt and the number of timeout-steps - # - # TODO: - # - allow component-wise reltol and abstol? - # - allow other norms - - # Needed interface: - # On components: isoutofdomain - # On y0 container: norm, get/setindex - - timout_after_nan = 5 - fac = [0.8, 0.9, 0.25^(1/(order+1)), 0.38^(1/(order+1))][1] - facmax = 5.0 # maximal step size increase. 1.5-5 - facmin = 1./facmax # maximal step size decrease. ? - - # in-place calculate xerr./tol - for d=1:dof - # if outside of domain (usually NaN) then make step size smaller by maximum - isoutofdomain(xtrial[d]) && return 10., dt*facmin, timout_after_nan - xerr[d] = xerr[d]/(abstol + max(norm(x0[d]), norm(xtrial[d]))*reltol) # Eq 4.10 - end - err = norm(xerr, 2) # Eq. 4.11 - newdt = min(maxstep, tdir*dt*max(facmin, fac*(1/err)^(1/(order+1)))) # Eq 4.13 modified - if timeout>0 - newdt = min(newdt, dt) - timeout -= 1 - end - return err, tdir*newdt, timeout -end - -function calc_next_k!{Ty}(ks::Vector, ytmp::Ty, y, s, fn, t, dt, dof, btab) - # Calculates the next ks and puts it into ks[s] - # - ks and ytmp are modified inside this function. - - # Needed interface: - # On components: +, * - # On y0 container: setindex!, getindex, fn - - ytmp[:] = y - for ss=1:s-1, d=1:dof - ytmp[d] += dt * ks[ss][d] * btab.a[s,ss] - end - ks[s] = fn(t + btab.c[s]*dt, ytmp)::Ty - nothing -end - -# Helper functions: -function allocate!{T}(vec::Vector{T}, y0, dof) - # Allocates all vectors inside a Vector{Vector} using the same - # kind of container as y0 has and element type eltype(eltype(vec)). - for s=1:length(vec) - vec[s] = similar(y0, eltype(T), dof) - end -end -function index_or_push!(vec, i, val) - # Fills in the vector until there is no space, then uses push! - # instead. - if length(vec)>=i - vec[i] = val - else - push!(vec, val) - end -end -vcat_nosplat(y) = eltype(y[1])[el[1] for el in y] # Does vcat(y...) without the splatting - -# function hermite_interp!(y, tquery,t,dt,y0,y1,f0,f1) -function hermite_interp!(y,t,step0::Step,step1::Step) - # For dense output see Hairer & Wanner p.190 using Hermite - # interpolation. Updates y in-place. - # - # f_0 = f(x_0 , y_0) , f_1 = f(x_0 + h, y_1 ) - # this is O(3). TODO for higher order. - - y0, y1 = step0.y, step1.y - dy0, dy1 = step0.dy, step1.dy - - dt = step1.t-step0.t - theta = (t-step0.t)/dt - for i=1:length(y0) - y[i] = ((1-theta)*y0[i] + theta*y1[i] + theta*(theta-1) * - ((1-2*theta)*(y1[i]-y0[i]) + (theta-1)*dt*dy0[i] + theta*dt*dy1[i]) ) - end - nothing -end - -function hermite_interp(tquery,t,dt,y0,y1,f0,f1) - # Returns the y instead of in-place - y = similar(y0) - hermite_interp!(y,tquery,t,dt,y0,y1,f0,f1) - return y -end diff --git a/src/dense.jl b/src/dense.jl index afdd98460..8159cb628 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -3,7 +3,7 @@ # ODE.newDenseProblem(..., method = ODE.bt_rk23, ...) type Step - t; y; dy; dt + t; y; dy end @@ -11,41 +11,36 @@ type DenseState s0; s1 last_tout first_step - rkstate + solver_state # used for storing the interpolation result ytmp end immutable DenseProblem - rkprob + F + y0 + t0 + solver points :: Symbol tspan end -function DenseProblem(args...; tspan = [Inf], points = :all, method = bt_feuler, opt_args...) - rkprob = method(args...; opt_args..., tstop = tspan[end]) - return DenseProblem(rkprob, points, tspan) -end - - -# create an instance of the zero-th step for RKProblem -function Step(problem :: AbstractProblem) - t0 = problem.t0 - y0 = problem.y0 - dy0 = problem.F(t0,y0) - dt0 = problem.dt0 - return Step(t0,y0,dy0,dt0) +function dense(F, y0, t0, solver; tspan = [Inf], points = :all, kargs...) + return DenseProblem(F, y0, t0, solver, points, tspan) end function start(prob :: DenseProblem) - step0 = Step(prob.rkprob) - step1 = Step(prob.rkprob) - rkstate = start(prob.rkprob) - ytmp = deepcopy(prob.rkprob.y0) - return DenseState(step0, step1, prob.rkprob.t0, true, rkstate, ytmp) + t0 = prob.t0 + y0 = prob.y0 + dy0 = prob.F(t0,y0) + step0 = Step(t0,y0,dy0) + step1 = Step(t0,y0,dy0) + solver_state = start(prob.solver) + ytmp = deepcopy(prob.y0) + return DenseState(step0, step1, prob.t0, true, solver_state, ytmp) end @@ -68,12 +63,12 @@ function next(prob :: DenseProblem, state :: DenseState) # s1 is the starting point for the new step, while the new # step is saved in s0 - if done(prob.rkprob, state.rkstate) + if done(prob.solver, state.solver_state) # TODO: this shouldn't happen error("The iterator was exhausted before the dense output compltede.") else # at this point s0 holds the new step, "s2" if you will - ((s0.t,s0.y[:]), state.rkstate) = next(prob.rkprob, state.rkstate) + ((s0.t,s0.y[:]), state.solver_state) = next(prob.solver, state.solver_state) end # swap s0 and s1 @@ -93,8 +88,7 @@ function next(prob :: DenseProblem, state :: DenseState) # at this point we have t_goal∈[t0,t1] so we can apply the # interpolation - F = prob.rkprob.F - s0.dy[:], s1.dy[:] = F(t0,s0.y), F(t1,s1.y) + s0.dy[:], s1.dy[:] = prob.F(t0,s0.y), prob.F(t1,s1.y) hermite_interp!(state.ytmp,t_goal,s0,s1) @@ -107,7 +101,7 @@ end function done(prob :: DenseProblem, state :: DenseState) - return done(prob.rkprob, state.rkstate) || state.s1.t >= prob.tspan[end] + return done(prob.solver, state.solver_state) || state.s1.t >= prob.tspan[end] end diff --git a/src/iterators.jl b/src/iterators.jl index 2528916dd..489c3aefb 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -1,313 +1,4 @@ -#################### -# Iterator methods # -#################### +import Base: start, next, done, call -# common structures and functions - -const timeout_const = 5 - -type TempArrays - y; ks; yerr -end - -type State - t; dt; y; tmp :: TempArrays - timeout -end - -abstract AbstractProblem - - -##################### -# Fixed step method # -##################### - -# type FixedState <: AbstractState -# t; dt; y; tmp :: TempArrays -# end - - -immutable FixedProblem <: AbstractProblem - F - method - y0 - t0 - dt0 - tstop -end - - -# # generates the iterator from TableauRKExplicit -# function call(tab::TableauRKExplicit, fn, y0, t0, dt0; tstop = Inf) -# return FixedProblem(fn, tab, y0, t0, dt0, tstop) -# end - -# function start(problem :: FixedProblem) -# t0 = problem.t0 -# dt0 = problem.dt0 -# y0 = problem.y0 -# tmp = TempArrays(similar(y0), Array(typeof(y0), S(problem.btab)), Void()) -# return FixedState(t0, dt0, y0, tmp) -# end - - -function next(prob :: FixedProblem, state :: State) - dof = length(state.y) - for s=1:S(prob.method) - calc_next_k!(state.tmp, s, state, prob) - for d=1:dof - state.y[d] += state.dt * prob.method.b[s]*state.tmp.ks[s][d] - end - end - state.t += state.dt - return ((state.t,state.y), state) -end - - -# function done(prob :: FixedProblem, state :: FixedState) -# return state.t >= prob.tstop # || abs(state.dt) < prob.minstep -# end - - -######################## -# Adaptive step method # -######################## - -immutable AdaptiveProblem <: AbstractProblem - F - method - y0 - t0 - dt0 - tstop - reltol - abstol - minstep - maxstep -end - - -# function AdaptiveProblem(fn, y0, t0; tstop = Inf, method = bt_feuler, reltol = 1e-5, abstol = 1e-5, minstep = 1e-10, maxstep = 1/minstep) -# return AdaptiveProblem{:adaptive}(fn, method, y0, t0, tstop, reltol, abstol, minstep, maxstep) -# end - - -function solver(F, y0, t0; - tstop = Inf, - method = bt_feuler, - reltol = 1e-5, - abstol = 1e-5, - minstep = 1e-10, - maxstep = 1/minstep, - dt0 = hinit(F, y0, t0, tstop, method, reltol, abstol)) - if isadaptive(method) - return AdaptiveProblem(F, method, y0, t0, dt0, tstop, reltol, abstol, minstep, maxstep) - else - return FixedProblem(F, method, y0, t0, dt0, tstop) - end -end - - -function start(problem :: AbstractProblem) - t0, dt0, y0 = problem.t0, problem.dt0, problem.y0 - - tmp = TempArrays(similar(y0), Array(typeof(y0), S(problem.method)), similar(y0)) - tmp.ks[1] = problem.F(t0,y0) # we assume that ks[1] is already initialized - - timeout = 0 # for step control - return State(t0,dt0,y0,tmp,timeout) -end - - -function done(prob :: AbstractProblem, state :: State) - return state.t >= prob.tstop || state.dt < prob.minstep -end - - -function next(prob :: AdaptiveProblem, state :: State) - - # the initial values - dt = state.dt # dt is the previous stepisze, it is - # modified inside the loop - timeout = state.timeout - - # for aesthetical reasons we extract the temporary componen - tmp = state.tmp - - # The while loop continues until we either find a stepsize which - # leads to a small enough error or the stepsize reaches - # prob.minstep - - while true - - # do one step (assumes ks[1]==f0), changes only tmp - err, newdt, timeout = rk_trial_step!(tmp, state, prob, dt, timeout) - - if abs(newdt) < prob.minstep # minimum step size reached, break - println("Warning: dt < minstep. Stopping.") - # passing the newdt to state will result in done() - state.dt = newdt - break - end - - if err > 1 # error is too large, repeat the step with smaller dt - # redo step with smaller dt and reset the timeout - dt = newdt - timeout = timeout_const - else - # step is accepted - - # preload ks[1] for the next step - if isFSAL(prob.method) - tmp.ks[1] = tmp.ks[S(prob.method)] - else - tmp.ks[1] = prob.F(state.t+dt, state.tmp.y) - end - - # Swap bindings of y and ytrial, avoids one copy - state.y, state.tmp.y = state.tmp.y, state.y - - # Update state with the data from the step we have just - # made: - state.t += dt - state.dt = newdt - state.timeout = timeout - break - end - end - return ((state.t,state.y),state) -end - - -function rk_trial_step!(tmp, state, prob, dt, timeout) - - # tmp.y and tmp.yerr and tmp.ks are updated after this step - rk_embedded_step!(tmp, state, prob, dt) - - # changes tmp.yerr (via in place update) - err, newdt, timeout = stepsize_hw92!(tmp, state, prob, dt, timeout) - - return err, newdt, timeout -end - - -function rk_embedded_step!(tmp :: TempArrays, state :: State, prob :: AdaptiveProblem, dt) - # Does one embedded R-K step updating ytrial, yerr and ks. - # - # Assumes that ks[:,1] is already calculated! - # - # Modifies tmp.y and tmp.yerr only - - y = state.y - dof = length(y) - b = prob.method.b - - tmp.y[:] = 0 - tmp.yerr[:] = 0 - - for d=1:dof - tmp.y[d] += b[1,1]*tmp.ks[1][d] - tmp.yerr[d] += b[2,1]*tmp.ks[1][d] - end - - for s=2:S(prob.method) - calc_next_k!(state.tmp, s, state, prob) - for d=1:dof - tmp.y[d] += b[1,s]*tmp.ks[s][d] - tmp.yerr[d] += b[2,s]*tmp.ks[s][d] - end - end - - for d=1:dof - tmp.yerr[d] = dt * (tmp.y[d]-tmp.yerr[d]) - tmp.y[d] = y[d] + dt * tmp.y[d] - end -end - - -function stepsize_hw92!(tmp, state, prob, dt, timeout) - # Estimates the error and a new step size following Hairer & - # Wanner 1992, p167 (with some modifications) - # - # If timeout>0 no step size increase is allowed, timeout is - # decremented in here. - # - # Returns the error, newdt and the number of timeout-steps - # - # TODO: - # - allow component-wise reltol and abstol? - # - allow other norms - - order = minimum(prob.method.order) - timout_after_nan = 5 - fac = [0.8, 0.9, 0.25^(1/(order+1)), 0.38^(1/(order+1))][1] - facmax = 5.0 # maximal step size increase. 1.5-5 - facmin = 1./facmax # maximal step size decrease. ? - dof = length(state.y) - - # in-place calculate yerr./tol - for d=1:dof - - # if outside of domain (usually NaN) then make step size smaller by maximum - if isoutofdomain(tmp.y[d]) - return 10., dt*facmin, timout_after_nan - end - - tmp.yerr[d] = tmp.yerr[d]/(prob.abstol + max(norm(prob.y0[d]), norm(tmp.y[d]))*prob.reltol) # Eq 4.10 - end - - err = norm(tmp.yerr, 2) # Eq. 4.11 - newdt = min(prob.maxstep, dt*max(facmin, fac*(1/err)^(1/(order+1)))) # Eq 4.13 modified - - if timeout > 0 - newdt = min(newdt, dt) - timeout -= 1 - end - - return err, newdt, timeout -end - - -function hinit(F, y0, t0, tstop, method, reltol, abstol) - # Returns first step size - order = minimum(method.order) - tau = max(reltol*norm(y0, Inf), abstol) - d0 = norm(y0, Inf)/tau - f0 = F(t0, y0) - d1 = norm(f0, Inf)/tau - if d0 < 1e-5 || d1 < 1e-5 - h0 = 1e-6 - else - h0 = 0.01*(d0/d1) - end - # perform Euler step - y1 = y0 + h0*f0 - f1 = F(t0 + h0, y1) - # estimate second derivative - d2 = norm(f1 - f0, Inf)/(tau*h0) - if max(d1, d2) <= 1e-15 - h1 = max(1e-6, 1e-3*h0) - else - pow = -(2 + log10(max(d1, d2)))/(order+1) - h1 = 10^pow - end - return min(100*h0, h1, tstop-t0) -end - - -# For clarity we pass the TempArrays part of the state separately, -# this is the only part of state that can be changed here -function calc_next_k!(tmp :: TempArrays, i, state :: State, prob :: AbstractProblem) - dof = length(state.y) - t, dt, a, c = state.t, state.dt, prob.method.a, prob.method.c - - tmp.y[:] = state.y - for j=1:i-1 - for d=1:dof - tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] - end - end - tmp.ks[i] = prob.F(t + c[i]*dt, tmp.y) - - nothing -end +include("dense.jl") +include("rk.jl") diff --git a/src/rk.jl b/src/rk.jl new file mode 100644 index 000000000..c3bfc9560 --- /dev/null +++ b/src/rk.jl @@ -0,0 +1,290 @@ +# This file contains the implementation of explicit Runkge-Kutta +# solver from (Hairer & Wanner 1992 p.134, p.165-169). + +# include the Butcher tableaus. +include("tableaus.jl") + +#################### +# Iterator methods # +#################### + +# common structures and functions + +type TempArrays + y; ks; yerr +end + +type State + t; dt; y; tmp :: TempArrays + timeout +end + + +immutable Problem{MethodType} + F + method + y0 + t0 + dt0 + tstop + reltol + abstol + minstep + maxstep +end + + +function solver(F, y0, t0; + tstop = Inf, + method = bt_feuler, + reltol = 1e-5, + abstol = 1e-5, + minstep = 1e-10, + maxstep = 1/minstep, + dt0 = hinit(F, y0, t0, tstop, method, reltol, abstol), + tspan = [tstop], + kargs...) + + if isadaptive(method) + methodtype = :adaptive + else + methodtype = :fixed + end + + solver = Problem{methodtype}(F, method, y0, t0, dt0, tstop, reltol, abstol, minstep, maxstep) + dense_solver = dense(F, y0, t0, solver; tspan = tspan, kargs...) + + return dense_solver + +end + + +function start(problem :: Problem) + t0, dt0, y0 = problem.t0, problem.dt0, problem.y0 + + tmp = TempArrays(similar(y0), Array(typeof(y0), S(problem.method)), similar(y0)) + tmp.ks[1] = problem.F(t0,y0) # we assume that ks[1] is already initialized + + timeout = 0 # for step control + return State(t0,dt0,y0,tmp,timeout) +end + + +function done(prob :: Problem, state :: State) + return state.t >= prob.tstop || state.dt < prob.minstep +end + +##################### +# Fixed step method # +##################### + +function next(prob :: Problem{:fixed}, state :: State) + dof = length(state.y) + for s=1:S(prob.method) + calc_next_k!(state.tmp, s, state, prob) + for d=1:dof + state.y[d] += state.dt * prob.method.b[s]*state.tmp.ks[s][d] + end + end + state.t += state.dt + return ((state.t,state.y), state) +end + +######################## +# Adaptive step method # +######################## + +function next(prob :: Problem{:adaptive}, state :: State) + + const timeout_const = 5 + + # the initial values + dt = state.dt # dt is the previous stepisze, it is + # modified inside the loop + timeout = state.timeout + + # for aesthetical reasons we extract the temporary componen + tmp = state.tmp + + # The while loop continues until we either find a stepsize which + # leads to a small enough error or the stepsize reaches + # prob.minstep + + while true + + # do one step (assumes ks[1]==f0), changes only tmp + err, newdt, timeout = rk_trial_step!(tmp, state, prob, dt, timeout) + + if abs(newdt) < prob.minstep # minimum step size reached, break + println("Warning: dt < minstep. Stopping.") + # passing the newdt to state will result in done() + state.dt = newdt + break + end + + if err > 1 # error is too large, repeat the step with smaller dt + # redo step with smaller dt and reset the timeout + dt = newdt + timeout = timeout_const + else + # step is accepted + + # preload ks[1] for the next step + if isFSAL(prob.method) + tmp.ks[1] = tmp.ks[S(prob.method)] + else + tmp.ks[1] = prob.F(state.t+dt, state.tmp.y) + end + + # Swap bindings of y and ytrial, avoids one copy + state.y, state.tmp.y = state.tmp.y, state.y + + # Update state with the data from the step we have just + # made: + state.t += dt + state.dt = newdt + state.timeout = timeout + break + end + end + return ((state.t,state.y),state) +end + + +########################## +# Lower level algorithms # +########################## + + +function rk_trial_step!(tmp, state, prob, dt, timeout) + + # tmp.y and tmp.yerr and tmp.ks are updated after this step + rk_embedded_step!(tmp, state, prob, dt) + + # changes tmp.yerr (via in place update) + err, newdt, timeout = stepsize_hw92!(tmp, state, prob, dt, timeout) + + return err, newdt, timeout +end + + +function rk_embedded_step!(tmp :: TempArrays, state :: State, prob :: Problem, dt) + # Does one embedded R-K step updating ytrial, yerr and ks. + # + # Assumes that ks[:,1] is already calculated! + # + # Modifies tmp.y and tmp.yerr only + + y = state.y + dof = length(y) + b = prob.method.b + + tmp.y[:] = 0 + tmp.yerr[:] = 0 + + for d=1:dof + tmp.y[d] += b[1,1]*tmp.ks[1][d] + tmp.yerr[d] += b[2,1]*tmp.ks[1][d] + end + + for s=2:S(prob.method) + calc_next_k!(state.tmp, s, state, prob) + for d=1:dof + tmp.y[d] += b[1,s]*tmp.ks[s][d] + tmp.yerr[d] += b[2,s]*tmp.ks[s][d] + end + end + + for d=1:dof + tmp.yerr[d] = dt * (tmp.y[d]-tmp.yerr[d]) + tmp.y[d] = y[d] + dt * tmp.y[d] + end +end + + +function stepsize_hw92!(tmp, state, prob, dt, timeout) + # Estimates the error and a new step size following Hairer & + # Wanner 1992, p167 (with some modifications) + # + # If timeout>0 no step size increase is allowed, timeout is + # decremented in here. + # + # Returns the error, newdt and the number of timeout-steps + # + # TODO: + # - allow component-wise reltol and abstol? + # - allow other norms + + order = minimum(prob.method.order) + timout_after_nan = 5 + fac = [0.8, 0.9, 0.25^(1/(order+1)), 0.38^(1/(order+1))][1] + facmax = 5.0 # maximal step size increase. 1.5-5 + facmin = 1./facmax # maximal step size decrease. ? + dof = length(state.y) + + # in-place calculate yerr./tol + for d=1:dof + + # if outside of domain (usually NaN) then make step size smaller by maximum + if isoutofdomain(tmp.y[d]) + return 10., dt*facmin, timout_after_nan + end + + tmp.yerr[d] = tmp.yerr[d]/(prob.abstol + max(norm(prob.y0[d]), norm(tmp.y[d]))*prob.reltol) # Eq 4.10 + end + + err = norm(tmp.yerr, 2) # Eq. 4.11 + newdt = min(prob.maxstep, dt*max(facmin, fac*(1/err)^(1/(order+1)))) # Eq 4.13 modified + + if timeout > 0 + newdt = min(newdt, dt) + timeout -= 1 + end + + return err, newdt, timeout +end + + +function hinit(F, y0, t0, tstop, method, reltol, abstol) + # Returns first step size + order = minimum(method.order) + tau = max(reltol*norm(y0, Inf), abstol) + d0 = norm(y0, Inf)/tau + f0 = F(t0, y0) + d1 = norm(f0, Inf)/tau + if d0 < 1e-5 || d1 < 1e-5 + h0 = 1e-6 + else + h0 = 0.01*(d0/d1) + end + # perform Euler step + y1 = y0 + h0*f0 + f1 = F(t0 + h0, y1) + # estimate second derivative + d2 = norm(f1 - f0, Inf)/(tau*h0) + if max(d1, d2) <= 1e-15 + h1 = max(1e-6, 1e-3*h0) + else + pow = -(2 + log10(max(d1, d2)))/(order+1) + h1 = 10^pow + end + return min(100*h0, h1, tstop-t0) +end + + +# For clarity we pass the TempArrays part of the state separately, +# this is the only part of state that can be changed here +function calc_next_k!(tmp :: TempArrays, i, state :: State, prob :: Problem) + dof = length(state.y) + t, dt, a, c = state.t, state.dt, prob.method.a, prob.method.c + + tmp.y[:] = state.y + for j=1:i-1 + for d=1:dof + tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] + end + end + tmp.ks[i] = prob.F(t + c[i]*dt, tmp.y) + + nothing +end diff --git a/src/runge_kutta.jl b/src/runge_kutta.jl deleted file mode 100644 index 4a3095cd5..000000000 --- a/src/runge_kutta.jl +++ /dev/null @@ -1,16 +0,0 @@ -# Explicit Runge-Kutta solvers -############################## -# (Hairer & Wanner 1992 p.134, p.165-169) - -import Base: start, next, done, call - -include("tableaus.jl") - -# include("algorithms.jl") - -include("iterators.jl") - -# include("iterators.jl") -# include("fixed.jl") -# include("variable.jl") -include("dense.jl") From 1c8f561c3beba5f7bba0781e965189a222c1de88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 19 Nov 2015 15:43:46 +0100 Subject: [PATCH 005/113] Bug fixes and reverse time integration --- src/dense.jl | 10 ++++++---- src/iterators.jl | 28 +++++++++++++++++++++++++++ src/rk.jl | 49 ++++++++++++++++++++++++++---------------------- 3 files changed, 61 insertions(+), 26 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 8159cb628..db382e141 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -24,11 +24,12 @@ immutable DenseProblem solver points :: Symbol tspan + stopevent end -function dense(F, y0, t0, solver; tspan = [Inf], points = :all, kargs...) - return DenseProblem(F, y0, t0, solver, points, tspan) +function dense(F, y0, t0, solver; tspan = [Inf], points = :all, stopevent = ()->false, kargs...) + return DenseProblem(F, y0, t0, solver, points, tspan, stopevent) end @@ -36,8 +37,8 @@ function start(prob :: DenseProblem) t0 = prob.t0 y0 = prob.y0 dy0 = prob.F(t0,y0) - step0 = Step(t0,y0,dy0) - step1 = Step(t0,y0,dy0) + step0 = Step(t0,deepcopy(y0),deepcopy(dy0)) + step1 = Step(t0,deepcopy(y0),deepcopy(dy0)) solver_state = start(prob.solver) ytmp = deepcopy(prob.y0) return DenseState(step0, step1, prob.t0, true, solver_state, ytmp) @@ -83,6 +84,7 @@ function next(prob :: DenseProblem, state :: DenseState) t_goal = min(t_goal,t1) break end + end # at this point we have t_goal∈[t0,t1] so we can apply the diff --git a/src/iterators.jl b/src/iterators.jl index 489c3aefb..c18489547 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -2,3 +2,31 @@ import Base: start, next, done, call include("dense.jl") include("rk.jl") + +# this wraps any iterator (method) returning pairs (t,y) in a dense +# output and also covers the reverse time integration +function solver(F, y0, t0; + tstop = Inf, + tspan = [tstop], + method = bt_feuler, + kargs...) + + if tstop >= t0 + # forward time integration + sol = method(F,y0,t0; tstop = tstop, kargs...) + dense_sol = dense(F, y0, t0, sol; tspan = tspan, kargs...) + return dense_sol + else + # reverse time integration + F_reverse(t,y)=-F(2*t0-t,y) + reverse_output(t,y)=(2*t0-t,y) + sol = solver(F_reverse,y0,t0; + tstop = 2*t0 -tstop, + tspan = 2*t0.-tspan, + kargs...) + dense_sol = dense(F, y0, t0, sol; tspan = tspan, kargs...) + + return imap(x->reverse_output(x...),sol) + end + +end diff --git a/src/rk.jl b/src/rk.jl index c3bfc9560..9622843b2 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -1,6 +1,8 @@ # This file contains the implementation of explicit Runkge-Kutta # solver from (Hairer & Wanner 1992 p.134, p.165-169). +using Iterators + # include the Butcher tableaus. include("tableaus.jl") @@ -21,8 +23,8 @@ end immutable Problem{MethodType} - F method + F y0 t0 dt0 @@ -34,28 +36,26 @@ immutable Problem{MethodType} end -function solver(F, y0, t0; - tstop = Inf, - method = bt_feuler, - reltol = 1e-5, - abstol = 1e-5, - minstep = 1e-10, - maxstep = 1/minstep, - dt0 = hinit(F, y0, t0, tstop, method, reltol, abstol), - tspan = [tstop], - kargs...) - - if isadaptive(method) +# overload the call method for TableauRKExplicit, it returns the +# iterator (fixed or variable step according to the tableau) +function call(tab::TableauRKExplicit, + F, y0, t0; + tstop = Inf, + reltol = 1e-5, + abstol = 1e-5, + minstep = 1e-10, + maxstep = 1/minstep, + dt0 = hinit(F, y0, t0, tstop, tab, reltol, abstol), + kargs... + ) + + if isadaptive(tab) methodtype = :adaptive else methodtype = :fixed end - solver = Problem{methodtype}(F, method, y0, t0, dt0, tstop, reltol, abstol, minstep, maxstep) - dense_solver = dense(F, y0, t0, solver; tspan = tspan, kargs...) - - return dense_solver - + return Problem{methodtype}(tab, F, y0, t0, dt0, tstop, reltol, abstol, minstep, maxstep) end @@ -66,7 +66,7 @@ function start(problem :: Problem) tmp.ks[1] = problem.F(t0,y0) # we assume that ks[1] is already initialized timeout = 0 # for step control - return State(t0,dt0,y0,tmp,timeout) + return State(t0,dt0,deepcopy(y0),tmp,timeout) end @@ -74,10 +74,12 @@ function done(prob :: Problem, state :: State) return state.t >= prob.tstop || state.dt < prob.minstep end + ##################### # Fixed step method # ##################### + function next(prob :: Problem{:fixed}, state :: State) dof = length(state.y) for s=1:S(prob.method) @@ -90,10 +92,12 @@ function next(prob :: Problem{:fixed}, state :: State) return ((state.t,state.y), state) end + ######################## # Adaptive step method # ######################## + function next(prob :: Problem{:adaptive}, state :: State) const timeout_const = 5 @@ -247,6 +251,7 @@ end function hinit(F, y0, t0, tstop, method, reltol, abstol) # Returns first step size + tdir = sign(tstop - t0) order = minimum(method.order) tau = max(reltol*norm(y0, Inf), abstol) d0 = norm(y0, Inf)/tau @@ -258,8 +263,8 @@ function hinit(F, y0, t0, tstop, method, reltol, abstol) h0 = 0.01*(d0/d1) end # perform Euler step - y1 = y0 + h0*f0 - f1 = F(t0 + h0, y1) + y1 = y0 + tdir*h0*f0 + f1 = F(t0 + tdir*h0, y1) # estimate second derivative d2 = norm(f1 - f0, Inf)/(tau*h0) if max(d1, d2) <= 1e-15 @@ -268,7 +273,7 @@ function hinit(F, y0, t0, tstop, method, reltol, abstol) pow = -(2 + log10(max(d1, d2)))/(order+1) h1 = 10^pow end - return min(100*h0, h1, tstop-t0) + return min(100*h0, h1, tdir*abs(tstop-t0)) end From 333d422f3c3a077c5abbff90a7569c24c710e662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 19 Nov 2015 16:02:55 +0100 Subject: [PATCH 006/113] Rootfinding --- src/dense.jl | 53 +++++++++++++++++++++++++++++++++++++++++++----- src/iterators.jl | 18 +++++++++------- 2 files changed, 59 insertions(+), 12 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index db382e141..ab4087a14 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -25,11 +25,12 @@ immutable DenseProblem points :: Symbol tspan stopevent + roottol end -function dense(F, y0, t0, solver; tspan = [Inf], points = :all, stopevent = ()->false, kargs...) - return DenseProblem(F, y0, t0, solver, points, tspan, stopevent) +function dense(F, y0, t0, solver; tspan = [Inf], points = :all, stopevent = (t,y)->false, roottol = 1e-5, kargs...) + return DenseProblem(F, y0, t0, solver, points, tspan, stopevent, roottol) end @@ -80,7 +81,7 @@ function next(prob :: DenseProblem, state :: DenseState) t0, t1 = s0.t, s1.t # we made a successfull step and points == :all - if prob.points == :all + if prob.points == :all || prob.stopevent(t1,s1.y) t_goal = min(t_goal,t1) break end @@ -92,7 +93,18 @@ function next(prob :: DenseProblem, state :: DenseState) s0.dy[:], s1.dy[:] = prob.F(t0,s0.y), prob.F(t1,s1.y) - hermite_interp!(state.ytmp,t_goal,s0,s1) + if prob.stopevent(t1,s1.y) + function stopfun(t) + hermite_interp!(state.ytmp,t,s0,s1) + res = typeof(t0)(prob.stopevent(t,state.ytmp)) + return 2*res-1 # -1 if false, +1 if true + end + t_goal = findroot(stopfun, [s0.t,s1.t], prob.roottol) + # state.ytmp is already overwwriten to the correct result as a + # side-effect of calling stopfun + else + hermite_interp!(state.ytmp,t_goal,s0,s1) + end # update the last output time state.last_tout = t_goal @@ -103,7 +115,12 @@ end function done(prob :: DenseProblem, state :: DenseState) - return done(prob.solver, state.solver_state) || state.s1.t >= prob.tspan[end] + + return ( + done(prob.solver, state.solver_state) || + state.s1.t >= prob.tspan[end] || + prob.stopevent(state.s1.t,state.s1.y) + ) end @@ -125,3 +142,29 @@ function hermite_interp!(y,t,step0::Step,step1::Step) end nothing end + + +function findroot(f,rng,eps;args...) + xl,xr = rng + fl = f(xl;args...) + fr = f(xr;args...) + + if fl*fr > 0 || xl > xr + error("Inconsistent bracket") + end + + while xr-xl > eps + xm = (xl+xr)/2 + fm = f(xm;args...) + + if fm*fr > 0 + xr = xm + fr = fm + else + xl = xm + fl = fm + end + end + + return (xr+xl)/2 +end diff --git a/src/iterators.jl b/src/iterators.jl index c18489547..43595848e 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -9,6 +9,7 @@ function solver(F, y0, t0; tstop = Inf, tspan = [tstop], method = bt_feuler, + stopevent = (t,y)->false, kargs...) if tstop >= t0 @@ -18,15 +19,18 @@ function solver(F, y0, t0; return dense_sol else # reverse time integration - F_reverse(t,y)=-F(2*t0-t,y) + F_reverse(t,y) = -F(2*t0-t,y) reverse_output(t,y)=(2*t0-t,y) - sol = solver(F_reverse,y0,t0; - tstop = 2*t0 -tstop, - tspan = 2*t0.-tspan, - kargs...) - dense_sol = dense(F, y0, t0, sol; tspan = tspan, kargs...) + sol = method(F_reverse,y0,t0; + tstop = 2*t0 -tstop, + tspan = 2*t0.-tspan, + kargs...) + dense_sol = dense(F_reverse, y0, t0, sol; + tspan = 2*t0-tspan, + stopevent = (t,y)->stopevent(2*t0-t,y), + kargs...) - return imap(x->reverse_output(x...),sol) + return imap(x->reverse_output(x...),dense_sol) end end From f229ee9dc1fed58f0fd25fb5b9f33fe7af2f318d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 20 Nov 2015 11:29:21 +0100 Subject: [PATCH 007/113] Minor fixes and cleanup of the code --- src/dense.jl | 18 +++++++++++------- src/iterators.jl | 7 +++++-- src/rk.jl | 20 ++++++++++---------- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index ab4087a14..6fb86e129 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -29,6 +29,12 @@ immutable DenseProblem end +# normally we return the working array, which changes at each step and +# expect the user to copy it if necessary. In order for collect to +# return the expected result we need to copy the output at each step. +collect{T}(t::Type{T}, prob::DenseProblem) = collect(t, imap(x->deepcopy(x),prob)) + + function dense(F, y0, t0, solver; tspan = [Inf], points = :all, stopevent = (t,y)->false, roottol = 1e-5, kargs...) return DenseProblem(F, y0, t0, solver, points, tspan, stopevent, roottol) end @@ -66,8 +72,7 @@ function next(prob :: DenseProblem, state :: DenseState) # step is saved in s0 if done(prob.solver, state.solver_state) - # TODO: this shouldn't happen - error("The iterator was exhausted before the dense output compltede.") + warn("The iterator was exhausted before the dense output completed.") else # at this point s0 holds the new step, "s2" if you will ((s0.t,s0.y[:]), state.solver_state) = next(prob.solver, state.solver_state) @@ -144,10 +149,9 @@ function hermite_interp!(y,t,step0::Step,step1::Step) end -function findroot(f,rng,eps;args...) - xl,xr = rng - fl = f(xl;args...) - fr = f(xr;args...) +function findroot(f,rng,eps) + xl, xr = rng + fl, fr = f(xl), f(xr) if fl*fr > 0 || xl > xr error("Inconsistent bracket") @@ -155,7 +159,7 @@ function findroot(f,rng,eps;args...) while xr-xl > eps xm = (xl+xr)/2 - fm = f(xm;args...) + fm = f(xm) if fm*fr > 0 xr = xm diff --git a/src/iterators.jl b/src/iterators.jl index 43595848e..206d1a12d 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -1,11 +1,11 @@ -import Base: start, next, done, call +import Base: start, next, done, call, collect include("dense.jl") include("rk.jl") # this wraps any iterator (method) returning pairs (t,y) in a dense # output and also covers the reverse time integration -function solver(F, y0, t0; +function solver(F, y0::AbstractArray, t0; tstop = Inf, tspan = [tstop], method = bt_feuler, @@ -34,3 +34,6 @@ function solver(F, y0, t0; end end + + +solver(F,y0,t0;kargs...)=solver((t,y)->[F(t,y[1])],[y0],t0;kargs...) diff --git a/src/rk.jl b/src/rk.jl index 9622843b2..bf391f628 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -41,9 +41,9 @@ end function call(tab::TableauRKExplicit, F, y0, t0; tstop = Inf, - reltol = 1e-5, - abstol = 1e-5, - minstep = 1e-10, + reltol = eps(typeof(t0))^(1/3), + abstol = reltol, + minstep = 10*eps(typeof(t0)), maxstep = 1/minstep, dt0 = hinit(F, y0, t0, tstop, tab, reltol, abstol), kargs... @@ -62,7 +62,7 @@ end function start(problem :: Problem) t0, dt0, y0 = problem.t0, problem.dt0, problem.y0 - tmp = TempArrays(similar(y0), Array(typeof(y0), S(problem.method)), similar(y0)) + tmp = TempArrays(deepcopy(y0), Array(typeof(y0), S(problem.method)), deepcopy(y0)) tmp.ks[1] = problem.F(t0,y0) # we assume that ks[1] is already initialized timeout = 0 # for step control @@ -249,7 +249,7 @@ function stepsize_hw92!(tmp, state, prob, dt, timeout) end -function hinit(F, y0, t0, tstop, method, reltol, abstol) +function hinit{T}(F, y0, t0::T, tstop, method, reltol, abstol) # Returns first step size tdir = sign(tstop - t0) order = minimum(method.order) @@ -257,18 +257,18 @@ function hinit(F, y0, t0, tstop, method, reltol, abstol) d0 = norm(y0, Inf)/tau f0 = F(t0, y0) d1 = norm(f0, Inf)/tau - if d0 < 1e-5 || d1 < 1e-5 - h0 = 1e-6 + if min(d0,d1) < eps(T)^(1/3) + h0 = eps(T)^(1/3)/10 else - h0 = 0.01*(d0/d1) + h0 = (d0/d1)/100 end # perform Euler step y1 = y0 + tdir*h0*f0 f1 = F(t0 + tdir*h0, y1) # estimate second derivative d2 = norm(f1 - f0, Inf)/(tau*h0) - if max(d1, d2) <= 1e-15 - h1 = max(1e-6, 1e-3*h0) + if max(d1, d2) <= 10*eps(T) + h1 = max(eps(T)^(1/3)/10, h0/10^3) else pow = -(2 + log10(max(d1, d2)))/(order+1) h1 = 10^pow From da5a09bf20333d3713a8b90974015d679d129766 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 6 Apr 2016 19:01:56 +0200 Subject: [PATCH 008/113] Some WIP for the ode23s --- src/ODE.jl | 418 ++--------------------------------------------- src/dense.jl | 38 +++-- src/helpers.jl | 25 +++ src/iterators.jl | 3 - src/ode23s.jl | 177 ++++++++++++++++++++ src/rk.jl | 37 +---- src/tableaus.jl | 89 ++++++++++ src/types.jl | 155 ++++++++++++++++++ 8 files changed, 486 insertions(+), 456 deletions(-) create mode 100644 src/helpers.jl create mode 100644 src/ode23s.jl create mode 100644 src/types.jl diff --git a/src/ODE.jl b/src/ODE.jl index e13927ab3..5ced478ed 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -1,10 +1,10 @@ -isdefined(Base, :__precompile__) && __precompile__() # Ordinary Differential Equation Solvers module ODE using Polynomials using Compat +using Iterators ## minimal function export list # adaptive non-stiff: @@ -16,413 +16,23 @@ export ode23s # non-adaptive stiff: export ode4s -## complete function export list: see runtests.jl - -############################################################################### -## Coefficient Tableaus -############################################################################### - -# Butcher Tableaus, or more generally coefficient tables -# see Hairer & Wanner 1992, p. 134, 166 - -abstract Tableau{Name, S, T<:Real} -# Name is the name of the tableau/method (a symbol) -# S is the number of stages (an int) -# T is the type of the coefficients -# -# TODO: have a type parameter which specifies adaptive vs non-adaptive -# -# For all types of tableaus it assumes fields: -# order::(Int...) # order of the method(s) -# -# For Runge-Kutta methods it assumes fields: -# a::Matrix{T} # SxS matrix -# b::Matrix{T} # 1 or 2 x S matrix (fixed step/ adaptive) -# c::Vector{T} # S -# -# For a tableau: -# c1 | a_11 .... a_1s -# . | a_21 . . -# . | a_31 . . -# . | .... . . -# c_s | a_s1 ....... a_ss -# -----+-------------------- -# | b_1 ... b_s this is the one used for stepping -# | b'_1 ... b'_s this is the one used for error-checking - -Base.eltype{N,S,T}(b::Tableau{N,S,T}) = T -order(b::Tableau) = b.order -# Subtypes need to define a convert method to convert to a different -# eltype with signature: -Base.convert{Tnew<:Real}(::Type{Tnew}, tab::Tableau) = error("Define convert method for concrete Tableau types") - -############################################################################### -## HELPER FUNCTIONS -############################################################################### - -# estimator for initial step based on book -# "Solving Ordinary Differential Equations I" by Hairer et al., p.169 -function hinit(F, x0, t0, tend, p, reltol, abstol) - # Returns first step, direction of integration and F evaluated at t0 - tdir = sign(tend-t0) - tdir==0 && error("Zero time span") - tau = max(reltol*norm(x0, Inf), abstol) - d0 = norm(x0, Inf)/tau - f0 = F(t0, x0) - d1 = norm(f0, Inf)/tau - if d0 < 1e-5 || d1 < 1e-5 - h0 = 1e-6 - else - h0 = 0.01*(d0/d1) - end - # perform Euler step - x1 = x0 + tdir*h0*f0 - f1 = F(t0 + tdir*h0, x1) - # estimate second derivative - d2 = norm(f1 - f0, Inf)/(tau*h0) - if max(d1, d2) <= 1e-15 - h1 = max(1e-6, 1e-3*h0) - else - pow = -(2. + log10(max(d1, d2)))/(p + 1.) - h1 = 10.^pow - end - return tdir*min(100*h0, h1, tdir*(tend-t0)), tdir, f0 -end - -# isoutofdomain takes the state and returns true if state is outside -# of the allowed domain. Used in adaptive step-control. -isoutofdomain(x) = isnan(x) - -function make_consistent_types(fn, y0, tspan, btab::Tableau) - # There are a few types involved in a call to a ODE solver which - # somehow need to be consistent: - # - # Et = eltype(tspan) - # Ey = eltype(y0) - # Ef = eltype(Tf) - # - # There are also the types of the containers, but they are not - # needed as `similar` is used to make containers. - # Tt = typeof(tspan) - # Ty = typeof(y0) # note, this can be a scalar - # Tf = typeof(F(tspan(1),y0)) # note, this can be a scalar - # - # Returns - # - Et: eltype of time, needs to be a real "continuous" type, at - # the moment a AbstractFloat - # - Eyf: suitable eltype of y and f(t,y) - # --> both of these are set to typeof(y0[1]/(tspan[end]-tspan[1])) - # - Ty: container type of y0 - # - btab: tableau with entries converted to Et - - # Needed interface: - # On components: /, - - # On container: eltype, promote_type - # On time container: eltype - - Ty = typeof(y0) - Eyf = typeof(y0[1]/(tspan[end]-tspan[1])) - - Et = eltype(tspan) - @assert Et<:Real - if !(Et<:AbstractFloat) - Et = promote_type(Et, Float64) - end - - # if all are Floats, make them the same - if Et<:AbstractFloat && Eyf<:AbstractFloat - Et = promote_type(Et, Eyf) - Eyf = Et - end - - !isleaftype(Et) && warn("The eltype(tspan) is not a concrete type! Change type of tspan for better performance.") - !isleaftype(Eyf) && warn("The eltype(y0/tspan[1]) is not a concrete type! Change type of y0 and/or tspan for better performance.") - - btab_ = convert(Et, btab) - return Et, Eyf, Ty, btab_ -end - -############################################################################### -## NON-STIFF SOLVERS -############################################################################### - -include("iterators.jl") - -# ODE_MS Fixed-step, fixed-order multi-step numerical method -# with Adams-Bashforth-Moulton coefficients -function ode_ms(F, x0, tspan, order::Integer) - h = diff(tspan) - x = Array(typeof(x0), length(tspan)) - x[1] = x0 - - if 1 <= order <= 4 - b = ms_coefficients4 - else - b = zeros(order, order) - b[1:4, 1:4] = ms_coefficients4 - for s = 5:order - for j = 0:(s - 1) - # Assign in correct order for multiplication below - # (a factor depending on j and s) .* (an integral of a polynomial with -(0:s), except -j, as roots) - p_int = polyint(poly(diagm(-[0:j - 1; j + 1:s - 1]))) - b[s, s - j] = ((-1)^j / factorial(j) - / factorial(s - 1 - j) * polyval(p_int, 1)) - end - end - end - - # TODO: use a better data structure here (should be an order-element circ buffer) - xdot = similar(x) - for i = 1:length(tspan)-1 - # Need to run the first several steps at reduced order - steporder = min(i, order) - xdot[i] = F(tspan[i], x[i]) - - x[i+1] = x[i] - for j = 1:steporder - x[i+1] += h[i]*b[steporder, j]*xdot[i-(steporder-1) + (j-1)] - end - end - return vcat(tspan), x -end - -# Use order 4 by default -ode4ms(F, x0, tspan) = ode_ms(F, x0, tspan, 4) -ode5ms(F, x0, tspan) = ODE.ode_ms(F, x0, tspan, 5) - -############################################################################### -## STIFF SOLVERS -############################################################################### - -# Crude forward finite differences estimator of Jacobian as fallback - -# FIXME: This doesn't really work if x is anything but a Vector or a scalar -function fdjacobian(F, x::Number, t) - ftx = F(t, x) - - # The 100 below is heuristic - dx = (x .+ (x==0))./100 - dFdx = (F(t,x+dx)-ftx)./dx - - return dFdx -end - -function fdjacobian(F, x, t) - ftx = F(t, x) - lx = max(length(x),1) - dFdx = zeros(eltype(x), lx, lx) - for j = 1:lx - # The 100 below is heuristic - dx = zeros(eltype(x), lx) - dx[j] = (x[j] .+ (x[j]==0))./100 - dFdx[:,j] = (F(t,x+dx)-ftx)./dx[j] - end - return dFdx -end - -# ODE23S Solve stiff systems based on a modified Rosenbrock triple -# (also used by MATLAB's ODE23s); see Sec. 4.1 in -# -# [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 -# -# supports keywords: points = :all | :specified (using dense output) -# jacobian = G(t,y)::Function | nothing (FD) -function ode23s(F, y0, tspan; reltol = 1.0e-5, abstol = 1.0e-8, - jacobian=nothing, - points=:all, - norm=Base.norm, - minstep=abs(tspan[end] - tspan[1])/1e18, - maxstep=abs(tspan[end] - tspan[1])/2.5, - initstep=0.) - - - # select method for computing the Jacobian - if typeof(jacobian) == Function - jac = jacobian - else - # fallback finite-difference - jac = (t, y)->fdjacobian(F, y, t) - end - - # constants - const d = 1/(2 + sqrt(2)) - const e32 = 6 + sqrt(2) +import Base.convert, Base.show +import Base: start, next, done, call, collect +## complete function export list: see runtests.jl - # initialization - t = tspan[1] - - tfinal = tspan[end] - - h = initstep - if h == 0. - # initial guess at a step size - h, tdir, F0 = hinit(F, y0, t, tfinal, 3, reltol, abstol) - else - tdir = sign(tfinal - t) - F0 = F(t,y0) - end - h = tdir * min(abs(h), maxstep) - - y = y0 - tout = Array(typeof(t), 1) - tout[1] = t # first output time - yout = Array(typeof(y0), 1) - yout[1] = deepcopy(y) # first output solution - - - J = jac(t,y) # get Jacobian of F wrt y - - while abs(t - tfinal) > 0 && minstep < abs(h) - if abs(t-tfinal) < abs(h) - h = tfinal - t - end - - if size(J,1) == 1 - W = one(J) - h*d*J - else - # note: if there is a mass matrix M on the lhs of the ODE, i.e., - # M * dy/dt = F(t,y) - # we can simply replace eye(J) by M in the following expression - # (see Sec. 5 in [SR97]) - - W = lufact( eye(J) - h*d*J ) - end - - # approximate time-derivative of F - T = h*d*(F(t + h/100, y) - F0)/(h/100) - - # modified Rosenbrock formula - k1 = W\(F0 + T) - F1 = F(t + 0.5*h, y + 0.5*h*k1) - k2 = W\(F1 - k1) + k1 - ynew = y + h*k2 - F2 = F(t + h, ynew) - k3 = W\(F2 - e32*(k2 - F1) - 2*(k1 - F0) + T ) - - err = (abs(h)/6)*norm(k1 - 2*k2 + k3) # error estimate - delta = max(reltol*max(norm(y),norm(ynew)), abstol) # allowable error - - # check if new solution is acceptable - if err <= delta - - if points==:specified || points==:all - # only points in tspan are requested - # -> find relevant points in (t,t+h] - for toi in tspan[(tspan.>t) & (tspan.<=t+h)] - # rescale to (0,1] - s = (toi-t)/h - - # use interpolation formula to get solutions at t=toi - push!(tout, toi) - push!(yout, y + h*( k1*s*(1-s)/(1-2*d) + k2*s*(s-2*d)/(1-2*d))) - end - end - if (points==:all) && (tout[end]!=t+h) - # add the intermediate points - push!(tout, t + h) - push!(yout, ynew) - end - - # update solution - t = t + h - y = ynew - - F0 = F2 # use FSAL property - J = jac(t,y) # get Jacobian of F wrt y - # for new solution - end - - # update of the step size - h = tdir*min( maxstep, abs(h)*0.8*(delta/err)^(1/3) ) - end - - return tout, yout -end - - -#ODEROSENBROCK Solve stiff differential equations, Rosenbrock method -# with provided coefficients. -function oderosenbrock(F, x0, tspan, gamma, a, b, c; jacobian=nothing) - - if typeof(jacobian) == Function - G = jacobian - else - G = (t, x)->fdjacobian(F, x, t) - end - - h = diff(tspan) - x = Array(typeof(x0), length(tspan)) - x[1] = x0 - - solstep = 1 - while solstep < length(tspan) - ts = tspan[solstep] - hs = h[solstep] - xs = x[solstep] - dFdx = G(ts, xs) - # FIXME - if size(dFdx,1) == 1 - jac = 1/gamma/hs - dFdx[1] - else - jac = eye(dFdx)/gamma/hs - dFdx - end - - g = Array(typeof(x0), size(a,1)) - g[1] = (jac \ F(ts + b[1]*hs, xs)) - x[solstep+1] = x[solstep] + b[1]*g[1] - - for i = 2:size(a,1) - dx = zero(x0) - dF = zero(x0/hs) - for j = 1:i-1 - dx += a[i,j]*g[j] - dF += c[i,j]*g[j] - end - g[i] = (jac \ (F(ts + b[i]*hs, xs + dx) + dF/hs)) - x[solstep+1] += b[i]*g[i] - end - solstep += 1 - end - return vcat(tspan), x -end - - -# Kaps-Rentrop coefficients -const kr4_coefficients = (0.231, - [0 0 0 0 - 2 0 0 0 - 4.452470820736 4.16352878860 0 0 - 4.452470820736 4.16352878860 0 0], - [3.95750374663 4.62489238836 0.617477263873 1.28261294568], - [ 0 0 0 0 - -5.07167533877 0 0 0 - 6.02015272865 0.1597500684673 0 0 - -1.856343618677 -8.50538085819 -2.08407513602 0],) - -ode4s_kr(F, x0, tspan; jacobian=nothing) = oderosenbrock(F, x0, tspan, kr4_coefficients...; jacobian=jacobian) - -# Shampine coefficients -const s4_coefficients = (0.5, - [ 0 0 0 0 - 2 0 0 0 - 48/25 6/25 0 0 - 48/25 6/25 0 0], - [19/9 1/2 25/108 125/108], - [ 0 0 0 0 - -8 0 0 0 - 372/25 12/5 0 0 - -112/125 -54/125 -2/5 0],) - -ode4s_s(F, x0, tspan; jacobian=nothing) = oderosenbrock(F, x0, tspan, s4_coefficients...; jacobian=jacobian) +# basic type definitions +include("types.jl") +include("helpers.jl") -# Use Shampine coefficients by default (matching Numerical Recipes) -const ode4s = ode4s_s +# dense output wrapper +include("dense.jl") -const ms_coefficients4 = [ 1 0 0 0 - -1/2 3/2 0 0 - 5/12 -4/3 23/12 0 - -9/24 37/24 -59/24 55/24] +# particular solvers +include("ode23s.jl") +include("tableaus.jl") +# include("iterators.jl") +# include("multistep.jl") end # module ODE diff --git a/src/dense.jl b/src/dense.jl index 6fb86e129..4da1fbf7d 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -2,13 +2,13 @@ # iterator supporting tspan by using the method keyword, for example # ODE.newDenseProblem(..., method = ODE.bt_rk23, ...) -type Step - t; y; dy -end +# type Step +# t; y; dy +# end type DenseState - s0; s1 + s0 :: Step; s1 :: Step last_tout first_step solver_state @@ -18,13 +18,10 @@ end immutable DenseProblem - F - y0 - t0 - solver + solver :: Solution points :: Symbol tspan - stopevent + stopevent :: Function roottol end @@ -35,20 +32,26 @@ end collect{T}(t::Type{T}, prob::DenseProblem) = collect(t, imap(x->deepcopy(x),prob)) -function dense(F, y0, t0, solver; tspan = [Inf], points = :all, stopevent = (t,y)->false, roottol = 1e-5, kargs...) - return DenseProblem(F, y0, t0, solver, points, tspan, stopevent, roottol) +function dense(solver :: Solution; + tspan = [Inf], + points = :all, + stopevent = (t,y)->false, + roottol = 1e-5, + kargs...) + return DenseProblem(solver, points, tspan, stopevent, roottol) end function start(prob :: DenseProblem) - t0 = prob.t0 - y0 = prob.y0 - dy0 = prob.F(t0,y0) + t0 = prob.solver.ode.t0 + y0 = prob.solver.ode.y0 + dy0 = deepcopy(y0) + prob.solver.ode.F!(t0,y0,dy0) step0 = Step(t0,deepcopy(y0),deepcopy(dy0)) step1 = Step(t0,deepcopy(y0),deepcopy(dy0)) solver_state = start(prob.solver) - ytmp = deepcopy(prob.y0) - return DenseState(step0, step1, prob.t0, true, solver_state, ytmp) + ytmp = deepcopy(y0) + return DenseState(step0, step1, t0, true, solver_state, ytmp) end @@ -96,7 +99,8 @@ function next(prob :: DenseProblem, state :: DenseState) # at this point we have t_goal∈[t0,t1] so we can apply the # interpolation - s0.dy[:], s1.dy[:] = prob.F(t0,s0.y), prob.F(t1,s1.y) + prob.solver.ode.F!(t0,s0.y,s0.dy) + prob.solver.ode.F!(t1,s1.y,s1.dy) if prob.stopevent(t1,s1.y) function stopfun(t) diff --git a/src/helpers.jl b/src/helpers.jl new file mode 100644 index 000000000..bed4cf2c9 --- /dev/null +++ b/src/helpers.jl @@ -0,0 +1,25 @@ +function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = Inf, order = 1) + # Returns first step size + tdir = sign(tstop-t0) + tau = max(reltol*norm(y0, Inf), abstol) + d0 = norm(y0, Inf)/tau + f0 = F(t0, y0) + d1 = norm(f0, Inf)/tau + if min(d0,d1) < eps(T)^(1/3) + h0 = eps(T)^(1/3)/10 + else + h0 = (d0/d1)/100 + end + # perform Euler step + y1 = y0 + tdir*h0*f0 + f1 = F(t0 + tdir*h0, y1) + # estimate second derivative + d2 = norm(f1 - f0, Inf)/(tau*h0) + if max(d1, d2) <= 10*eps(T) + h1 = max(eps(T)^(1/3)/10, h0/10^3) + else + pow = -(2 + log10(max(d1, d2)))/(order+1) + h1 = 10^pow + end + return min(100*h0, h1, tdir*abs(tstop-t0)) +end diff --git a/src/iterators.jl b/src/iterators.jl index 206d1a12d..1accbf5d3 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -1,6 +1,3 @@ -import Base: start, next, done, call, collect - -include("dense.jl") include("rk.jl") # this wraps any iterator (method) returning pairs (t,y) in a dense diff --git a/src/ode23s.jl b/src/ode23s.jl new file mode 100644 index 000000000..4fcb8817e --- /dev/null +++ b/src/ode23s.jl @@ -0,0 +1,177 @@ +# ODE23S Solve stiff systems based on a modified Rosenbrock triple +# (also used by MATLAB's ODE23s); see Sec. 4.1 in +# +# [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 +# +# supports keywords: points = :all | :specified (using dense output) +# jacobian = G(t,y)::Function | nothing (FD) + +# Internal + +immutable RosenbrockStepper{T<:Number} <: AbstractStepper + d :: T + e32 :: T + + function RosenbrockStepper() + d = T(1/(2 + sqrt(2))) + e32 = T(6 + sqrt(2)) + new(d,e32) + end +end + +# default to floating point precision +RosenbrockStepper() = RosenbrockStepper{Float64}() + + +# TODO: is this correct? +order(RosenbrockStepper) = 2 + + +# define the set of ODE problems with which this stepper can work +solve(ode :: ExplicitODEInPlace, stepper :: RosenbrockStepper, options :: Options) = + Solution{RosenbrockStepper}(ode,stepper,options) + + +# higher level interface +# TODO: how to set the default initstep? +function ode23s{T}(F, y0, t0 :: T; + jacobian = (t,y)->fdjacobian(F, y, t), + # we can make a better guess for the initstep if we have access to ExplicitODE + # initstep = hinit(F, y0, t0, reltol, abstol, order = order(RosenbrockStepper)) + kargs... + ) + + ode = ExplicitODE(t0,y0,F,jacobian) + + stepper = RosenbrockStepper{T}(;kargs... + # ,initstep = initstep + ) + return solve(ode,stepper) +end + + +# lower level interface (iterator) + + +# TODO: should we re-use Step or should we just put t,y,dy explicitly +# there? +type RosenbrockState{T,S} <: AbstractState + step :: Step{T,S} + dt :: T + F1 :: S; F2 :: S + J # :: ? +end + + +# for debugging +function show(io::IO, state :: RosenbrockState) + show(io,state.step) + println("dt =$(state.dt)") + println("F1 =$(state.F1)") + println("F2 =$(state.F2)") + println("J =$(state.J)") +end + + +function start(s :: Solution{RosenbrockStepper}) + t = s.ode.t0 + dt = s.options.initstep + y = s.ode.y0 + dy = zero(y) + + J = Array(eltype(y),length(y),length(y)) + + step = Step(t,deepcopy(y),deepcopy(dy)) + state = RosenbrockState(step, + dt, + zero(y), # F1 + zero(y), # F2 + J) # J + # initialize the derivative and the Jacobian + s.ode.F!(t,y,step.dy) + s.ode.jac!(t,y,state.J) + + return state +end + + +function done(s :: Solution{RosenbrockStepper}, + state :: RosenbrockState) + return state.dt <= s.options.minstep +end + + +function next(s :: Solution{RosenbrockStepper}, + state :: RosenbrockState) + + stepper = s.stepper + ode = s.ode + step = state.step + opts = s.options + + F1, F2, J = state.F1, state.F2, state.J + + t, dt, y, dy = step.t, state.dt, step.y, step.dy + # F!, jac! = ode.F!, ode.jac! + d, e32 = stepper.d, stepper.e32 + + F0 = dy + + while true + + # TODO: this should go to a specialized function for type stabilty sake + # maybe make W a part of ExplicitODE? + if size(J,1) == 1 + W = one(J) - dt*d*J + else + # note: if there is a mass matrix M on the lhs of the ODE, i.e., + # M * dy/dt = F(t,y) + # we can simply replace eye(J) by M in the following expression + # (see Sec. 5 in [SR97]) + + W = lufact( eye(J) - dt*d*J ) + end + + # TODO: same for tder? + # approximate time-derivative of F + ode.F!(t+dt/100,y,F1) + tder = 100*d*(F1-F0) + + # modified Rosenbrock formula + # TODO: allocate some temporary space for these variables + k1 = W \ (dy + tder) + ode.F!(t+dt/2, y+dt*k1/2, F1) + k2 = W \ (F1 - k1) + k1 + ynew = y + dt*k2 + ode.F!(t+dt, ynew, F2) + k3 = W \ (F2 - e32*(k2 - F1) - 2*(k1 - dy) + tder ) + + delta = max(opts.reltol*max(opts.norm(y), + opts.norm(ynew)), + opts.abstol) # allowable error + + + err = (dt/6)*opts.norm(k1 - 2*k2 + k3)/delta # error estimate + + # upon a failed step decrease the step size + dtnew = min(opts.maxstep, + dt*0.8*max(1,err^(-1/3)) ) + + # check if the new solution is acceptable + if err <= 1 + + # update the state and return + step.t = t+dt + state.dt = dtnew + step.y[:] = ynew + step.dy[:] = F2 + ode.jac!(step.t,step.y,J) + + return ((step.t,step.y), state) + end + + end + + return tout, yout + +end diff --git a/src/rk.jl b/src/rk.jl index bf391f628..15c46c845 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -3,19 +3,19 @@ using Iterators -# include the Butcher tableaus. -include("tableaus.jl") - #################### # Iterator methods # #################### + # common structures and functions + type TempArrays y; ks; yerr end + type State t; dt; y; tmp :: TempArrays timeout @@ -45,10 +45,11 @@ function call(tab::TableauRKExplicit, abstol = reltol, minstep = 10*eps(typeof(t0)), maxstep = 1/minstep, - dt0 = hinit(F, y0, t0, tstop, tab, reltol, abstol), + dt0 = hinit(F, y0, t0, reltol, abstol; tstop, minimum(tab.order)), kargs... ) + # TODO make methodtype a parameter of the `tab` if isadaptive(tab) methodtype = :adaptive else @@ -249,34 +250,6 @@ function stepsize_hw92!(tmp, state, prob, dt, timeout) end -function hinit{T}(F, y0, t0::T, tstop, method, reltol, abstol) - # Returns first step size - tdir = sign(tstop - t0) - order = minimum(method.order) - tau = max(reltol*norm(y0, Inf), abstol) - d0 = norm(y0, Inf)/tau - f0 = F(t0, y0) - d1 = norm(f0, Inf)/tau - if min(d0,d1) < eps(T)^(1/3) - h0 = eps(T)^(1/3)/10 - else - h0 = (d0/d1)/100 - end - # perform Euler step - y1 = y0 + tdir*h0*f0 - f1 = F(t0 + tdir*h0, y1) - # estimate second derivative - d2 = norm(f1 - f0, Inf)/(tau*h0) - if max(d1, d2) <= 10*eps(T) - h1 = max(eps(T)^(1/3)/10, h0/10^3) - else - pow = -(2 + log10(max(d1, d2)))/(order+1) - h1 = 10^pow - end - return min(100*h0, h1, tdir*abs(tstop-t0)) -end - - # For clarity we pass the TempArrays part of the state separately, # this is the only part of state that can be changed here function calc_next_k!(tmp :: TempArrays, i, state :: State, prob :: Problem) diff --git a/src/tableaus.jl b/src/tableaus.jl index 143eedbd3..219895ea7 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -1,3 +1,42 @@ +############################################################################### +## Coefficient Tableaus +############################################################################### + +# Butcher Tableaus, or more generally coefficient tables +# see Hairer & Wanner 1992, p. 134, 166 + +abstract Tableau{Name, S, T<:Real} +# Name is the name of the tableau/method (a symbol) +# S is the number of stages (an int) +# T is the type of the coefficients +# +# TODO: have a type parameter which specifies adaptive vs non-adaptive +# +# For all types of tableaus it assumes fields: +# order::(Int...) # order of the method(s) +# +# For Runge-Kutta methods it assumes fields: +# a::Matrix{T} # SxS matrix +# b::Matrix{T} # 1 or 2 x S matrix (fixed step/ adaptive) +# c::Vector{T} # S +# +# For a tableau: +# c1 | a_11 .... a_1s +# . | a_21 . . +# . | a_31 . . +# . | .... . . +# c_s | a_s1 ....... a_ss +# -----+-------------------- +# | b_1 ... b_s this is the one used for stepping +# | b'_1 ... b'_s this is the one used for error-checking + +Base.eltype{N,S,T}(b::Tableau{N,S,T}) = T +order(b::Tableau) = b.order +# Subtypes need to define a convert method to convert to a different +# eltype with signature: +Base.convert{Tnew<:Real}(::Type{Tnew}, tab::Tableau) = error("Define convert method for concrete Tableau types") + + ########################################### # Tableaus for explicit Runge-Kutta methods ########################################### @@ -157,3 +196,53 @@ const bt_feh78 = TableauRKExplicit(:feh78, (7,8), Rational{Int64}, 0 0 0 0 0 34//105 9//35 9//35 9//280 9//280 0 41//840 41//840], [0, 2//27, 1//9, 1//6 , 5//12, 1//2 , 5//6 , 1//6 , 2//3 , 1//3 , 1 , 0, 1] ) + + +function make_consistent_types(fn, y0, tspan, btab::Tableau) + # There are a few types involved in a call to a ODE solver which + # somehow need to be consistent: + # + # Et = eltype(tspan) + # Ey = eltype(y0) + # Ef = eltype(Tf) + # + # There are also the types of the containers, but they are not + # needed as `similar` is used to make containers. + # Tt = typeof(tspan) + # Ty = typeof(y0) # note, this can be a scalar + # Tf = typeof(F(tspan(1),y0)) # note, this can be a scalar + # + # Returns + # - Et: eltype of time, needs to be a real "continuous" type, at + # the moment a AbstractFloat + # - Eyf: suitable eltype of y and f(t,y) + # --> both of these are set to typeof(y0[1]/(tspan[end]-tspan[1])) + # - Ty: container type of y0 + # - btab: tableau with entries converted to Et + + # Needed interface: + # On components: /, - + # On container: eltype, promote_type + # On time container: eltype + + Ty = typeof(y0) + Eyf = typeof(y0[1]/(tspan[end]-tspan[1])) + + Et = eltype(tspan) + @assert Et<:Real + if !(Et<:AbstractFloat) + Et = promote_type(Et, Float64) + end + + # if all are Floats, make them the same + if Et<:AbstractFloat && Eyf<:AbstractFloat + Et = promote_type(Et, Eyf) + Eyf = Et + end + + !isleaftype(Et) && warn("The eltype(tspan) is not a concrete type! Change type of tspan for better performance.") + !isleaftype(Eyf) && warn("The eltype(y0/tspan[1]) is not a concrete type! Change type of y0 and/or tspan for better performance.") + + btab_ = convert(Et, btab) + return Et, Eyf, Ty, btab_ +end diff --git a/src/types.jl b/src/types.jl new file mode 100644 index 000000000..51717a499 --- /dev/null +++ b/src/types.jl @@ -0,0 +1,155 @@ +abstract AbstractODE + +type ExplicitODE <: AbstractODE + t0; y0 # initial data + F :: Function # solve y'=F(t,y) + jac :: Function # optional jacobian of F +end + +type ExplicitODEInPlace <: AbstractODE + t0; y0 + F! :: Function + jac! :: Function +end + + +# plug in the numerical jacobian if none is provided +ExplicitODE(t,y,F)=ExplicitODE(t,y,F,(t,y)->fdjacobian(F,t,y)) + + +function convert(::Type{ExplicitODEInPlace}, ode :: ExplicitODE) + function F!(t,y,dy) + dy[:] = ode.F(t,y) + end + function jac!(t,y,J) + J[:] = ode.jac(t,y) + end + return ExplicitODEInPlace(ode.t0,ode.y0,F!,jac!) +end + + +function convert(::Type{ExplicitODE}, ode :: ExplicitODEInPlace) + function F(t,y) + dy = deepcopy(y) + ode.F!(t,y,dy) + return dy + end + function jac(t,y) + n = length(y) + J = Array(eltype(dy),n,n) + ode.jac!(t,y,J) + return J + end + return ExplicitODE(ode.t0,ode.y0,F,jac) +end + + +abstract AbstractStepper +abstract AbstractState + + +# this might suffice for some solvers +type Step{T,S} <: AbstractState + t :: T + y :: S + dy :: S +end + + +# for debugging +function show(io::IO, state :: Step) + println("t =$(state.t)") + println("y =$(state.y)") + println("dy =$(state.dy)") +end + + +# general purpose options +immutable Options{T} + initstep :: T + tstop :: T + reltol :: T + abstol :: T + minstep :: T + maxstep :: T + norm :: Function + + function Options(; + tstop = T(Inf), + reltol = eps(T)^(1/3), + abstol = reltol, + minstep = 10*eps(T), + maxstep = 1/minstep, + initstep = reltol, # TODO: we need a better guess + # here, possibly overwrite it + # in the call to solve() + norm = Base.norm, + kargs...) + new(initstep,tstop,reltol,abstol,minstep,maxstep,norm) + end + +end + + +function show{T}(io::IO, opts :: Options{T}) + println("") + println("Options{$T}") + println("tstop = $(opts.tstop)") + println("reltol = $(opts.reltol)") + println("abstol = $(opts.abstol)") + println("minstep = $(opts.minstep)") + println("maxstep = $(opts.maxstep)") + println("initstep = $(opts.initstep)") + println("norm = $(opts.norm)") +end + + +# default to floating point precision +Options(args...) = Options{Float64}(args...) + + +# solution is a collection of an equation, an integration method +# (stepper) and its options +type Solution{T<:AbstractStepper} + ode :: AbstractODE + stepper :: T + options :: Options +end + + +# TODO: is this the right way to implement the mid level interface? +solve(ode, stepper; kargs...) = solve(ode, stepper, Options(kargs...)) + +# filter the wrong combinations of ode and stepper +solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't support $T") + +# convert the ExplicitODE to the default in place version +solve(ode :: ExplicitODE, stepper, options :: Options) = solve(convert(ExplicitODEInPlace,ode), stepper, options) + + + +# some leftovers from the previous implementation + +# FIXME: This doesn't really work if x is anything but a Vector or a scalar +function fdjacobian(F, t, x::Number) + ftx = F(t, x) + + # The 100 below is heuristic + dx = (x .+ (x==0))./100 + dFdx = (F(t,x+dx)-ftx)./dx + + return dFdx +end + +function fdjacobian(F, t, x) + ftx = F(t, x) + lx = max(length(x),1) + dFdx = zeros(eltype(x), lx, lx) + for j = 1:lx + # The 100 below is heuristic + dx = zeros(eltype(x), lx) + dx[j] = (x[j] .+ (x[j]==0))./100 + dFdx[:,j] = (F(t,x+dx)-ftx)./dx[j] + end + return dFdx +end From 1b7742df339d8f9326f67f2c5dc5623d1f58dfe7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 8 Apr 2016 15:36:35 +0200 Subject: [PATCH 009/113] RK stepper WIP --- src/ODE.jl | 7 +- src/dense.jl | 12 +++ src/helpers.jl | 2 + src/iterators.jl | 2 - src/ode23s.jl | 36 ++----- src/rk.jl | 251 ++++++++++++++++++++++++++++------------------- src/tableaus.jl | 2 +- src/types.jl | 8 +- 8 files changed, 182 insertions(+), 138 deletions(-) diff --git a/src/ODE.jl b/src/ODE.jl index 5ced478ed..07fa57f62 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -30,9 +30,10 @@ include("dense.jl") # particular solvers include("ode23s.jl") - -include("tableaus.jl") -# include("iterators.jl") +include("rk.jl") # include("multistep.jl") +include("iterators.jl") +# include("interfaces.jl") + end # module ODE diff --git a/src/dense.jl b/src/dense.jl index 4da1fbf7d..484171b77 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -17,7 +17,12 @@ type DenseState end +# TODO: would it be possible to make the DenseProblem a +# Solver{DenseProblem} instead? immutable DenseProblem + # TODO: solver has options in it, maybe we should move the points, + # tspan, stopevent, roottol to options instead of having them + # hanging around here? solver :: Solution points :: Symbol tspan @@ -25,6 +30,13 @@ immutable DenseProblem roottol end +# TODO: perhaps something like this? +# solve(ode, stepper :: DenseStepper, options :: Options) = Solver(ode,stepper,options) +# function solve(ode, stepper, options :: Options) +# solver = Solver(ode, stepper, options) +# Solver(ode,DenseStepper(solver),options) +# end + # normally we return the working array, which changes at each step and # expect the user to copy it if necessary. In order for collect to diff --git a/src/helpers.jl b/src/helpers.jl index bed4cf2c9..f815f8000 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,3 +1,5 @@ +isoutofdomain = isnan + function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = Inf, order = 1) # Returns first step size tdir = sign(tstop-t0) diff --git a/src/iterators.jl b/src/iterators.jl index 1accbf5d3..1bfab4863 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -1,5 +1,3 @@ -include("rk.jl") - # this wraps any iterator (method) returning pairs (t,y) in a dense # output and also covers the reverse time integration function solver(F, y0::AbstractArray, t0; diff --git a/src/ode23s.jl b/src/ode23s.jl index 4fcb8817e..e79c635b0 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -8,11 +8,11 @@ # Internal -immutable RosenbrockStepper{T<:Number} <: AbstractStepper +immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper d :: T e32 :: T - function RosenbrockStepper() + function ModifiedRosenbrockStepper() d = T(1/(2 + sqrt(2))) e32 = T(6 + sqrt(2)) new(d,e32) @@ -20,34 +20,16 @@ immutable RosenbrockStepper{T<:Number} <: AbstractStepper end # default to floating point precision -RosenbrockStepper() = RosenbrockStepper{Float64}() +ModifiedRosenbrockStepper() = ModifiedRosenbrockStepper{Float64}() # TODO: is this correct? -order(RosenbrockStepper) = 2 +order(ModifiedRosenbrockStepper) = 2 # define the set of ODE problems with which this stepper can work -solve(ode :: ExplicitODEInPlace, stepper :: RosenbrockStepper, options :: Options) = - Solution{RosenbrockStepper}(ode,stepper,options) - - -# higher level interface -# TODO: how to set the default initstep? -function ode23s{T}(F, y0, t0 :: T; - jacobian = (t,y)->fdjacobian(F, y, t), - # we can make a better guess for the initstep if we have access to ExplicitODE - # initstep = hinit(F, y0, t0, reltol, abstol, order = order(RosenbrockStepper)) - kargs... - ) - - ode = ExplicitODE(t0,y0,F,jacobian) - - stepper = RosenbrockStepper{T}(;kargs... - # ,initstep = initstep - ) - return solve(ode,stepper) -end +solve(ode :: ExplicitODEInPlace, stepper :: ModifiedRosenbrockStepper, options :: Options) = + Solution{ModifiedRosenbrockStepper}(ode,stepper,options) # lower level interface (iterator) @@ -73,7 +55,7 @@ function show(io::IO, state :: RosenbrockState) end -function start(s :: Solution{RosenbrockStepper}) +function start(s :: Solution{ModifiedRosenbrockStepper}) t = s.ode.t0 dt = s.options.initstep y = s.ode.y0 @@ -95,13 +77,13 @@ function start(s :: Solution{RosenbrockStepper}) end -function done(s :: Solution{RosenbrockStepper}, +function done(s :: Solution{ModifiedRosenbrockStepper}, state :: RosenbrockState) return state.dt <= s.options.minstep end -function next(s :: Solution{RosenbrockStepper}, +function next(s :: Solution{ModifiedRosenbrockStepper}, state :: RosenbrockState) stepper = s.stepper diff --git a/src/rk.jl b/src/rk.jl index 15c46c845..77ecdb912 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -1,78 +1,98 @@ # This file contains the implementation of explicit Runkge-Kutta # solver from (Hairer & Wanner 1992 p.134, p.165-169). -using Iterators +include("tableaus.jl") -#################### -# Iterator methods # -#################### +# intermediate level interface -# common structures and functions +immutable TableauStepper{StepType,MethodType} <: AbstractStepper + tableau :: Tableau # Butcher tableau +end -type TempArrays - y; ks; yerr -end +function TableauStepper(tableau :: Tableau) + if isadaptive(tableau) + steptype = :adaptive + else + steptype = :fixed + end + if isexplicit(tableau) + methodtype = :explicit + else + methodtype = :implicit + end -type State - t; dt; y; tmp :: TempArrays - timeout + # convert the method table to T here + return TableauStepper{steptype,methodtype}(tableau) end -immutable Problem{MethodType} - method - F - y0 - t0 - dt0 - tstop - reltol - abstol - minstep - maxstep +order(stepper :: TableauStepper) = minimum(order(stepper.tableau)) + + +# TODO: possibly handle the initial stepsize and the tableau conversion here? +solve{T}(ode :: ExplicitODEInPlace, stepper :: TableauStepper{T,:explicit}, options :: Options) = + Solution{TableauStepper{T,:explicit}}(ode,stepper,options) + + +# lower level interface + +abstract AbstractTableauState + + +# explicit RK stepper + +type RKTempArrays{T} + y :: T + ynew :: T + yerr :: T + ks :: Vector{T} end -# overload the call method for TableauRKExplicit, it returns the -# iterator (fixed or variable step according to the tableau) -function call(tab::TableauRKExplicit, - F, y0, t0; - tstop = Inf, - reltol = eps(typeof(t0))^(1/3), - abstol = reltol, - minstep = 10*eps(typeof(t0)), - maxstep = 1/minstep, - dt0 = hinit(F, y0, t0, reltol, abstol; tstop, minimum(tab.order)), - kargs... - ) - - # TODO make methodtype a parameter of the `tab` - if isadaptive(tab) - methodtype = :adaptive - else - methodtype = :fixed - end +type TableauExplicitState{T,S} <: AbstractTableauState + step :: Step{T,S} + dt :: T + tmp :: RKTempArrays{S} + timeout :: Int +end - return Problem{methodtype}(tab, F, y0, t0, dt0, tstop, reltol, abstol, minstep, maxstep) +function show(io :: IO, state :: TableauExplicitState) + show(state.step) + println("dt = $(state.dt)") + println("timeout = $(state.timeout)") + println("tmp = $(state.tmp)") end -function start(problem :: Problem) - t0, dt0, y0 = problem.t0, problem.dt0, problem.y0 +function start{T}(s :: Solution{TableauStepper{T,:explicit}}) + t0, dt0, y0 = s.ode.t0, s.options.initstep, s.ode.y0 + + # TODO: we should do the Butcher table conversion somewhere + lk = lengthks(s.stepper.tableau) + tmp = RKTempArrays(zero(y0), # y + zero(y0), # ynew + zero(y0), # yerr + Array(typeof(y0), lk)) # ks + + for i = 1:lk + tmp.ks[i] = zero(y0) + end + + # pre-initialize tmp.ks[1] + s.ode.F!(t0,y0,tmp.ks[1]) - tmp = TempArrays(deepcopy(y0), Array(typeof(y0), S(problem.method)), deepcopy(y0)) - tmp.ks[1] = problem.F(t0,y0) # we assume that ks[1] is already initialized + step = Step(t0,deepcopy(y0),deepcopy(tmp.ks[1])) timeout = 0 # for step control - return State(t0,dt0,deepcopy(y0),tmp,timeout) + return TableauExplicitState(step,dt0,tmp,timeout) end -function done(prob :: Problem, state :: State) - return state.t >= prob.tstop || state.dt < prob.minstep +function done{T,S}(s :: Solution{TableauStepper{T,S}}, state :: AbstractTableauState) + return state.step.t >= s.options.tstop || state.dt < s.options.minstep end @@ -81,16 +101,21 @@ end ##################### -function next(prob :: Problem{:fixed}, state :: State) - dof = length(state.y) - for s=1:S(prob.method) - calc_next_k!(state.tmp, s, state, prob) +function next(s :: Solution{TableauStepper{:fixed,:explicit}}, state :: TableauExplicitState) + step = state.step + tmp = state.tmp + + dof = length(step.y) + b = s.stepper.tableau.b + + for k=1:lengthks(s.stepper.tableau) + calc_next_k!(state.tmp, k, s.ode, s.stepper.tableau, step, state.dt) for d=1:dof - state.y[d] += state.dt * prob.method.b[s]*state.tmp.ks[s][d] + step.y[d] += state.dt * b[k]*tmp.ks[k][d] end end - state.t += state.dt - return ((state.t,state.y), state) + step.t += state.dt + return ((step.t,step.y), state) end @@ -99,7 +124,7 @@ end ######################## -function next(prob :: Problem{:adaptive}, state :: State) +function next(sol :: Solution{TableauStepper{:adaptive,:explicit}}, state :: TableauExplicitState) const timeout_const = 5 @@ -108,8 +133,8 @@ function next(prob :: Problem{:adaptive}, state :: State) # modified inside the loop timeout = state.timeout - # for aesthetical reasons we extract the temporary componen tmp = state.tmp + step = state.step # The while loop continues until we either find a stepsize which # leads to a small enough error or the stepsize reaches @@ -117,10 +142,13 @@ function next(prob :: Problem{:adaptive}, state :: State) while true - # do one step (assumes ks[1]==f0), changes only tmp - err, newdt, timeout = rk_trial_step!(tmp, state, prob, dt, timeout) + # Do one step (assumes ks[1]==f0). After calling tmp.ynew + # holds the new step. + # TODO: return ynew instead of passing it as tmp.ynew? + err, newdt, timeout = + rk_trial_step!(tmp, sol.ode, step, sol.stepper.tableau, dt, timeout, sol.options) - if abs(newdt) < prob.minstep # minimum step size reached, break + if abs(newdt) < sol.options.minstep # minimum step size reached, break println("Warning: dt < minstep. Stopping.") # passing the newdt to state will result in done() state.dt = newdt @@ -135,24 +163,24 @@ function next(prob :: Problem{:adaptive}, state :: State) # step is accepted # preload ks[1] for the next step - if isFSAL(prob.method) - tmp.ks[1] = tmp.ks[S(prob.method)] + if isFSAL(sol.stepper.tableau) + tmp.ks[1][:] = tmp.ks[end] else - tmp.ks[1] = prob.F(state.t+dt, state.tmp.y) + sol.ode.F!(step.t+dt, tmp.ynew, tmp.ks[1]) end # Swap bindings of y and ytrial, avoids one copy - state.y, state.tmp.y = state.tmp.y, state.y + step.y, tmp.ynew = tmp.ynew, step.y # Update state with the data from the step we have just # made: - state.t += dt + step.t += dt state.dt = newdt state.timeout = timeout break end end - return ((state.t,state.y),state) + return ((step.t,step.y),state) end @@ -161,53 +189,64 @@ end ########################## -function rk_trial_step!(tmp, state, prob, dt, timeout) +function rk_trial_step!(tmp :: RKTempArrays, + ode :: ExplicitODEInPlace, + last_step :: Step, + tableau :: TableauRKExplicit, + dt, + timeout, + options :: Options) # tmp.y and tmp.yerr and tmp.ks are updated after this step - rk_embedded_step!(tmp, state, prob, dt) + rk_embedded_step!(tmp, ode, tableau, last_step, dt) # changes tmp.yerr (via in place update) - err, newdt, timeout = stepsize_hw92!(tmp, state, prob, dt, timeout) + err, newdt, timeout = stepsize_hw92!(tmp, last_step, tableau, dt, timeout, options) return err, newdt, timeout end -function rk_embedded_step!(tmp :: TempArrays, state :: State, prob :: Problem, dt) +function rk_embedded_step!(tmp :: RKTempArrays, + ode :: ExplicitODEInPlace, + tableau :: Tableau, + last_step :: Step, + dt) # Does one embedded R-K step updating ytrial, yerr and ks. # # Assumes that ks[:,1] is already calculated! # - # Modifies tmp.y and tmp.yerr only + # Modifies tmp.y, tmp.ynew and tmp.yerr only - y = state.y + y = last_step.y dof = length(y) - b = prob.method.b + b = tableau.b - tmp.y[:] = 0 + tmp.ynew[:] = 0 tmp.yerr[:] = 0 - for d=1:dof - tmp.y[d] += b[1,1]*tmp.ks[1][d] - tmp.yerr[d] += b[2,1]*tmp.ks[1][d] - end - - for s=2:S(prob.method) - calc_next_k!(state.tmp, s, state, prob) - for d=1:dof - tmp.y[d] += b[1,s]*tmp.ks[s][d] - tmp.yerr[d] += b[2,s]*tmp.ks[s][d] + for s=1:lengthks(tableau) + # we skip the first step beacause we assume that tmp.ks[1] is + # already computed + if s > 1 + calc_next_k!(tmp, s, ode, tableau, last_step, dt) end + tmp.ynew[:] += b[1,s]*tmp.ks[s] + tmp.yerr[:] += b[2,s]*tmp.ks[s] end - for d=1:dof - tmp.yerr[d] = dt * (tmp.y[d]-tmp.yerr[d]) - tmp.y[d] = y[d] + dt * tmp.y[d] - end + tmp.yerr[:] = dt*(tmp.ynew-tmp.yerr) + tmp.ynew[:] = y + dt*tmp.ynew + end -function stepsize_hw92!(tmp, state, prob, dt, timeout) +function stepsize_hw92!(tmp, + last_step :: Step, + tableau :: TableauRKExplicit, + dt, + timeout, + options :: Options) # Estimates the error and a new step size following Hairer & # Wanner 1992, p167 (with some modifications) # @@ -220,12 +259,12 @@ function stepsize_hw92!(tmp, state, prob, dt, timeout) # - allow component-wise reltol and abstol? # - allow other norms - order = minimum(prob.method.order) + ord = minimum(order(tableau)) timout_after_nan = 5 - fac = [0.8, 0.9, 0.25^(1/(order+1)), 0.38^(1/(order+1))][1] + fac = [0.8, 0.9, 0.25^(1/(ord+1)), 0.38^(1/(ord+1))][1] facmax = 5.0 # maximal step size increase. 1.5-5 facmin = 1./facmax # maximal step size decrease. ? - dof = length(state.y) + dof = length(last_step.y) # in-place calculate yerr./tol for d=1:dof @@ -235,11 +274,14 @@ function stepsize_hw92!(tmp, state, prob, dt, timeout) return 10., dt*facmin, timout_after_nan end - tmp.yerr[d] = tmp.yerr[d]/(prob.abstol + max(norm(prob.y0[d]), norm(tmp.y[d]))*prob.reltol) # Eq 4.10 + y0 = last_step.y[d] # TODO: is this supposed to be the last successful step? + y1 = tmp.ynew[d] # the approximation to the next step + sci = (options.abstol + options.reltol*max(norm(y0),norm(y1))) + tmp.yerr[d] = tmp.yerr[d]/sci # Eq 4.10 end err = norm(tmp.yerr, 2) # Eq. 4.11 - newdt = min(prob.maxstep, dt*max(facmin, fac*(1/err)^(1/(order+1)))) # Eq 4.13 modified + newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1/(ord+1)))) # Eq 4.13 modified if timeout > 0 newdt = min(newdt, dt) @@ -250,19 +292,24 @@ function stepsize_hw92!(tmp, state, prob, dt, timeout) end -# For clarity we pass the TempArrays part of the state separately, +# For clarity we pass the RKTempArrays part of the state separately, # this is the only part of state that can be changed here -function calc_next_k!(tmp :: TempArrays, i, state :: State, prob :: Problem) - dof = length(state.y) - t, dt, a, c = state.t, state.dt, prob.method.a, prob.method.c - - tmp.y[:] = state.y +function calc_next_k!(tmp :: RKTempArrays, + i :: Int, + ode :: ExplicitODEInPlace, + tableau :: Tableau, + last_step :: Step, + dt) + dof = length(last_step.y) + t, a, c = last_step.t, tableau.a, tableau.c + + tmp.y[:] = last_step.y for j=1:i-1 for d=1:dof tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] end end - tmp.ks[i] = prob.F(t + c[i]*dt, tmp.y) + ode.F!(t + c[i]*dt, tmp.y, tmp.ks[i]) nothing end diff --git a/src/tableaus.jl b/src/tableaus.jl index 219895ea7..0ab5fb0da 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -78,7 +78,7 @@ end conv_field{T,N}(D,a::Array{T,N}) = convert(Array{D,N}, a) -S(tab::TableauRKExplicit) = length(tab.c) +lengthks(tab::TableauRKExplicit) = length(tab.c) function Base.convert{Tnew<:Real,Name,S,T}(::Type{Tnew}, tab::TableauRKExplicit{Name,S,T}) # Converts the tableau coefficients to the new type Tnew diff --git a/src/types.jl b/src/types.jl index 51717a499..ffa2601be 100644 --- a/src/types.jl +++ b/src/types.jl @@ -84,7 +84,7 @@ immutable Options{T} # here, possibly overwrite it # in the call to solve() norm = Base.norm, - kargs...) + kargs...) new(initstep,tstop,reltol,abstol,minstep,maxstep,norm) end @@ -123,9 +123,11 @@ solve(ode, stepper; kargs...) = solve(ode, stepper, Options(kargs...)) # filter the wrong combinations of ode and stepper solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't support $T") -# convert the ExplicitODE to the default in place version -solve(ode :: ExplicitODE, stepper, options :: Options) = solve(convert(ExplicitODEInPlace,ode), stepper, options) +# TODO: this might not be necessary, in the long run we should make +# ExplicitODE the in-place one and use specific constructors +# always convert ExplicitODE to ExplicitODEInPlace +solve(ode :: ExplicitODE, stepper, options :: Options) = solve(convert(ExplicitODEInPlace,ode), stepper, options) # some leftovers from the previous implementation From 4dd06153c989df90e34683ead305fee4e4a068ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 11 Apr 2016 14:16:49 +0200 Subject: [PATCH 010/113] DenseStepper compatible with Solver --- src/dense.jl | 96 +++++++++++++++++++--------------------------------- src/types.jl | 30 ++++++++++++++-- 2 files changed, 62 insertions(+), 64 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 484171b77..ae44c267f 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -1,10 +1,15 @@ -# iterator for the dense output, can be wrapped around any other -# iterator supporting tspan by using the method keyword, for example -# ODE.newDenseProblem(..., method = ODE.bt_rk23, ...) +# A higher level stepper, defined as a wrapper around another stepper. -# type Step -# t; y; dy -# end +immutable DenseStepper <: AbstractStepper + solver :: Solution +end + + +solve(ode :: ExplicitODEInPlace, + stepper :: DenseStepper, + options :: Options) = Solution(ode,stepper,options) + +dense(stepper :: AbstractStepper) = solve(stepper.ode, stepper, stepper.options) type DenseState @@ -17,57 +22,24 @@ type DenseState end -# TODO: would it be possible to make the DenseProblem a -# Solver{DenseProblem} instead? -immutable DenseProblem - # TODO: solver has options in it, maybe we should move the points, - # tspan, stopevent, roottol to options instead of having them - # hanging around here? - solver :: Solution - points :: Symbol - tspan - stopevent :: Function - roottol -end - -# TODO: perhaps something like this? -# solve(ode, stepper :: DenseStepper, options :: Options) = Solver(ode,stepper,options) -# function solve(ode, stepper, options :: Options) -# solver = Solver(ode, stepper, options) -# Solver(ode,DenseStepper(solver),options) -# end - - -# normally we return the working array, which changes at each step and -# expect the user to copy it if necessary. In order for collect to -# return the expected result we need to copy the output at each step. -collect{T}(t::Type{T}, prob::DenseProblem) = collect(t, imap(x->deepcopy(x),prob)) - - -function dense(solver :: Solution; - tspan = [Inf], - points = :all, - stopevent = (t,y)->false, - roottol = 1e-5, - kargs...) - return DenseProblem(solver, points, tspan, stopevent, roottol) -end - - -function start(prob :: DenseProblem) - t0 = prob.solver.ode.t0 - y0 = prob.solver.ode.y0 +function start(s :: Solution{DenseStepper}) + # extract the real solver + solver = s.stepper.solver + t0 = solver.ode.t0 + y0 = solver.ode.y0 dy0 = deepcopy(y0) - prob.solver.ode.F!(t0,y0,dy0) + solver.ode.F!(t0,y0,dy0) step0 = Step(t0,deepcopy(y0),deepcopy(dy0)) step1 = Step(t0,deepcopy(y0),deepcopy(dy0)) - solver_state = start(prob.solver) + solver_state = start(solver) ytmp = deepcopy(y0) return DenseState(step0, step1, t0, true, solver_state, ytmp) end -function next(prob :: DenseProblem, state :: DenseState) +function next(s :: Solution{DenseStepper}, state :: DenseState) + + solver = s.stepper.solver s0, s1 = state.s0, state.s1 t0, t1 = s0.t, s1.t @@ -78,7 +50,7 @@ function next(prob :: DenseProblem, state :: DenseState) end # the next output time that we aim at - t_goal = prob.tspan[findfirst(t->(t>state.last_tout), prob.tspan)] + t_goal = s.options.tspan[findfirst(t->(t>state.last_tout), s.options.tspan)] # the t0 == t1 part ensures that we make at least one step while t1 < t_goal @@ -86,11 +58,11 @@ function next(prob :: DenseProblem, state :: DenseState) # s1 is the starting point for the new step, while the new # step is saved in s0 - if done(prob.solver, state.solver_state) + if done(solver, state.solver_state) warn("The iterator was exhausted before the dense output completed.") else # at this point s0 holds the new step, "s2" if you will - ((s0.t,s0.y[:]), state.solver_state) = next(prob.solver, state.solver_state) + ((s0.t,s0.y[:]), state.solver_state) = next(solver, state.solver_state) end # swap s0 and s1 @@ -101,7 +73,7 @@ function next(prob :: DenseProblem, state :: DenseState) t0, t1 = s0.t, s1.t # we made a successfull step and points == :all - if prob.points == :all || prob.stopevent(t1,s1.y) + if s.options.points == :all || s.options.stopevent(t1,s1.y) t_goal = min(t_goal,t1) break end @@ -111,16 +83,16 @@ function next(prob :: DenseProblem, state :: DenseState) # at this point we have t_goal∈[t0,t1] so we can apply the # interpolation - prob.solver.ode.F!(t0,s0.y,s0.dy) - prob.solver.ode.F!(t1,s1.y,s1.dy) + solver.ode.F!(t0,s0.y,s0.dy) + solver.ode.F!(t1,s1.y,s1.dy) - if prob.stopevent(t1,s1.y) + if s.options.stopevent(t1,s1.y) function stopfun(t) hermite_interp!(state.ytmp,t,s0,s1) - res = typeof(t0)(prob.stopevent(t,state.ytmp)) + res = Int(s.options.stopevent(t,state.ytmp)) return 2*res-1 # -1 if false, +1 if true end - t_goal = findroot(stopfun, [s0.t,s1.t], prob.roottol) + t_goal = findroot(stopfun, [s0.t,s1.t], s.options.roottol) # state.ytmp is already overwwriten to the correct result as a # side-effect of calling stopfun else @@ -135,12 +107,12 @@ function next(prob :: DenseProblem, state :: DenseState) end -function done(prob :: DenseProblem, state :: DenseState) +function done(s :: Solution{DenseStepper}, state :: DenseState) return ( - done(prob.solver, state.solver_state) || - state.s1.t >= prob.tspan[end] || - prob.stopevent(state.s1.t,state.s1.y) + done(s.stepper.solver, state.solver_state) || + state.s1.t >= s.options.tspan[end] || + s.options.stopevent(state.s1.t,state.s1.y) ) end diff --git a/src/types.jl b/src/types.jl index ffa2601be..6845e2bde 100644 --- a/src/types.jl +++ b/src/types.jl @@ -66,6 +66,7 @@ end # general purpose options immutable Options{T} + # stepper options initstep :: T tstop :: T reltol :: T @@ -74,6 +75,12 @@ immutable Options{T} maxstep :: T norm :: Function + # dense output options + tspan :: Vector{T} + points :: Symbol + stopevent :: Function + roottol :: T + function Options(; tstop = T(Inf), reltol = eps(T)^(1/3), @@ -84,8 +91,12 @@ immutable Options{T} # here, possibly overwrite it # in the call to solve() norm = Base.norm, - kargs...) - new(initstep,tstop,reltol,abstol,minstep,maxstep,norm) + tspan = [tstop], + points = :all, + stopevent = (t,y)->false, + roottol = eps(T)^(1/3), + kargs...) + new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,tspan,points,stopevent,roottol) end end @@ -101,6 +112,10 @@ function show{T}(io::IO, opts :: Options{T}) println("maxstep = $(opts.maxstep)") println("initstep = $(opts.initstep)") println("norm = $(opts.norm)") + println("tspan = $(opts.tspan)") + println("points = $(opts.points)") + println("stopevent= $(opts.stopevent)") + println("roottol = $(opts.roottol)") end @@ -130,6 +145,17 @@ solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't s solve(ode :: ExplicitODE, stepper, options :: Options) = solve(convert(ExplicitODEInPlace,ode), stepper, options) +# normally we return the working array, which changes at each step and +# expect the user to copy it if necessary. In order for collect to +# return the expected result we need to copy the output at each step. +function collect{T}(t::Type{T}, s::Solution) + if s.options.tstop == Inf || s.options.tspan[end] == Inf + error("Trying to collect an infinite list") + end + collect(t, imap(x->deepcopy(x),s)) +end + + # some leftovers from the previous implementation # FIXME: This doesn't really work if x is anything but a Vector or a scalar From 58b9ad42e999c817c6c8b348d1a44ac29b8eda28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Tue, 12 Apr 2016 11:18:51 +0200 Subject: [PATCH 011/113] Finished the higher level interface --- src/ODE.jl | 2 +- src/dense.jl | 7 ++-- src/interfaces.jl | 92 +++++++++++++++++++++++++++++++++++++++++ src/ode23s.jl | 10 ++++- src/rk.jl | 84 ++++++++++++++++++------------------- src/types.jl | 64 +++++++--------------------- test/interface-tests.jl | 13 +++--- test/runtests.jl | 74 +++++++++++++++++---------------- 8 files changed, 209 insertions(+), 137 deletions(-) create mode 100644 src/interfaces.jl diff --git a/src/ODE.jl b/src/ODE.jl index 07fa57f62..e2c14f689 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -34,6 +34,6 @@ include("rk.jl") # include("multistep.jl") include("iterators.jl") -# include("interfaces.jl") +include("interfaces.jl") end # module ODE diff --git a/src/dense.jl b/src/dense.jl index ae44c267f..0245332fa 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -5,11 +5,11 @@ immutable DenseStepper <: AbstractStepper end -solve(ode :: ExplicitODEInPlace, +solve(ode :: ExplicitODE, stepper :: DenseStepper, options :: Options) = Solution(ode,stepper,options) -dense(stepper :: AbstractStepper) = solve(stepper.ode, stepper, stepper.options) +dense(sol :: Solution) = solve(sol.ode, DenseStepper(sol), sol.options) type DenseState @@ -60,6 +60,7 @@ function next(s :: Solution{DenseStepper}, state :: DenseState) if done(solver, state.solver_state) warn("The iterator was exhausted before the dense output completed.") + break else # at this point s0 holds the new step, "s2" if you will ((s0.t,s0.y[:]), state.solver_state) = next(solver, state.solver_state) @@ -111,7 +112,7 @@ function done(s :: Solution{DenseStepper}, state :: DenseState) return ( done(s.stepper.solver, state.solver_state) || - state.s1.t >= s.options.tspan[end] || + state.s1.t >= s.options.tspan[end] || s.options.stopevent(state.s1.t,state.s1.y) ) end diff --git a/src/interfaces.jl b/src/interfaces.jl new file mode 100644 index 000000000..8dbac8793 --- /dev/null +++ b/src/interfaces.jl @@ -0,0 +1,92 @@ +const steppers = + [ + ( :ode23s, :ModifiedRosenbrockStepper, []), + ( :ode1, :TableauStepperFixed, [bt_feuler]), + ( :ode2_midpoint, :TableauStepperFixed, [bt_midpoint]), + ( :ode2_heun, :TableauStepperFixed, [bt_heun]), + ( :ode4, :TableauStepperFixed, [bt_rk4]), + ( :ode21, :TableauStepperAdaptive, [bt_rk21]), + ( :ode23, :TableauStepperAdaptive, [bt_rk23]), + ( :ode45_fe, :TableauStepperAdaptive, [bt_rk45]), + ( :ode45_dp, :TableauStepperAdaptive, [bt_dopri5]), + ( :ode78, :TableauStepperAdaptive, [bt_feh78]) +] + + + +# TODO: there is a lot of useless conversions going on here + +for (name,stepper,params) in steppers + @eval begin + function ($name){T<:Number}(F, y0 :: Vector, t0 :: T; + jacobian = (t,y)->fdjacobian(F, t, y), + stopevent = (t,y)->false, + tstop = Inf, + tspan = [tstop], + kargs...) + + step = ($stepper){T}($params...) + + if all(tspan .>= t0) + # forward time integration + ode = explicit_ineff(t0,y0,F,jac=jacobian) + opts = Options{T}(; + tstop = tstop, + tspan = tspan, + stopevent = stopevent, + kargs...) + solution = collect(dense(solve(ode,step,opts))) + n = length(solution) + elseif all(tspan .<= t0) + # reverse time integration + F_reverse(t,y) = -F(2*t0-t,y) + # TODO: is that how the jacobian changes? + jac_reverse(t,y) = -jacobian(2*t0-t,y) + ode = explicit_ineff(t0,y0,F_reverse,jac=jac_reverse) + opts = Options{T}(; + tstop = 2*t0-tstop, + tspan = 2*t0.-tspan, + stopevent = (t,y)->stopevent(2*t0-t,y), + kargs...) + else + # tspan stretches to the left and to the right of t0 + return ([t0],[y0]) + end + + solution = collect(dense(solve(ode,step,opts))) + n = length(solution) + + # return solution + + # convert a list of pairs to a pair of arrays + # TODO: leave it out as a list of pairs? + tn = Array(T,n) + yn = Array(typeof(y0),n) + if all(tspan .>= t0) + tn[:] = [x[1] for x in solution] + else + tn[:] = [2*t0-x[1] for x in solution] + end + yn[:] = [x[2] for x in solution] + + return (tn,yn) + end + + ($name){T<:Number}(F, y0 :: Vector, t0 :: Vector{T}; kargs...) = + ($name)(F,y0,t0[1]; + tstop = t0[end], + tspan = t0, + points = :specified, + kargs...) + + function ($name)(F, y0, t0; kargs...) + tn, yn = ($name)((t,y)->[F(t,y[1])], [y0], t0; kargs...) + yn2 = Array(typeof(y0),length(yn)) + yn2[:] = map(first,yn) + return (tn,yn2) + end + end +end + + +const ode45 = ode45_dp diff --git a/src/ode23s.jl b/src/ode23s.jl index e79c635b0..575d63b3c 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -28,7 +28,7 @@ order(ModifiedRosenbrockStepper) = 2 # define the set of ODE problems with which this stepper can work -solve(ode :: ExplicitODEInPlace, stepper :: ModifiedRosenbrockStepper, options :: Options) = +solve(ode :: ExplicitODE, stepper :: ModifiedRosenbrockStepper, options :: Options) = Solution{ModifiedRosenbrockStepper}(ode,stepper,options) @@ -79,7 +79,13 @@ end function done(s :: Solution{ModifiedRosenbrockStepper}, state :: RosenbrockState) - return state.dt <= s.options.minstep + if state.step.t >= s.options.tstop + return true + elseif state.dt < s.options.minstep + warn("minstep reached") + return true + end + return false end diff --git a/src/rk.jl b/src/rk.jl index 77ecdb912..3829b64e5 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -5,43 +5,33 @@ include("tableaus.jl") # intermediate level interface - -immutable TableauStepper{StepType,MethodType} <: AbstractStepper - tableau :: Tableau # Butcher tableau -end - - -function TableauStepper(tableau :: Tableau) - if isadaptive(tableau) - steptype = :adaptive - else - steptype = :fixed +immutable TableauStepper{Step,T} <: AbstractStepper + tableau :: Tableau + function TableauStepper(tab) + if Step == :fixed && isadaptive(tab) + error("Cannot construct a fixed step method from an adaptive step tableau") + elseif Step == :adaptive && !isadaptive(tab) + error("Cannot construct an adaptive step method from an fixed step tableau") + end + new(convert(T,tab)) end +end - if isexplicit(tableau) - methodtype = :explicit - else - methodtype = :implicit - end - # convert the method table to T here - return TableauStepper{steptype,methodtype}(tableau) -end +typealias TableauStepperFixed{T} TableauStepper{:fixed, T} +typealias TableauStepperAdaptive{T} TableauStepper{:adaptive,T} order(stepper :: TableauStepper) = minimum(order(stepper.tableau)) # TODO: possibly handle the initial stepsize and the tableau conversion here? -solve{T}(ode :: ExplicitODEInPlace, stepper :: TableauStepper{T,:explicit}, options :: Options) = - Solution{TableauStepper{T,:explicit}}(ode,stepper,options) +solve{S,T}(ode :: ExplicitODE, stepper :: TableauStepper{S,T}, options :: Options{T}) = + Solution{TableauStepper{S,T}}(ode,stepper,options) # lower level interface -abstract AbstractTableauState - - # explicit RK stepper type RKTempArrays{T} @@ -52,14 +42,14 @@ type RKTempArrays{T} end -type TableauExplicitState{T,S} <: AbstractTableauState +type TableauState{T,S} step :: Step{T,S} dt :: T tmp :: RKTempArrays{S} timeout :: Int end -function show(io :: IO, state :: TableauExplicitState) +function show(io :: IO, state :: TableauState) show(state.step) println("dt = $(state.dt)") println("timeout = $(state.timeout)") @@ -67,7 +57,7 @@ function show(io :: IO, state :: TableauExplicitState) end -function start{T}(s :: Solution{TableauStepper{T,:explicit}}) +function start{S,T}(s :: Solution{TableauStepper{S,T}}) t0, dt0, y0 = s.ode.t0, s.options.initstep, s.ode.y0 # TODO: we should do the Butcher table conversion somewhere @@ -87,12 +77,18 @@ function start{T}(s :: Solution{TableauStepper{T,:explicit}}) step = Step(t0,deepcopy(y0),deepcopy(tmp.ks[1])) timeout = 0 # for step control - return TableauExplicitState(step,dt0,tmp,timeout) + return TableauState(step,dt0,tmp,timeout) end -function done{T,S}(s :: Solution{TableauStepper{T,S}}, state :: AbstractTableauState) - return state.step.t >= s.options.tstop || state.dt < s.options.minstep +function done{S,T}(s :: Solution{TableauStepper{S,T}}, state :: TableauState) + if state.step.t >= s.options.tstop + return true + elseif state.dt < s.options.minstep + warn("minstep reached") + return true + end + return false end @@ -101,7 +97,7 @@ end ##################### -function next(s :: Solution{TableauStepper{:fixed,:explicit}}, state :: TableauExplicitState) +function next{T}(s :: Solution{TableauStepperFixed{T}}, state :: TableauState) step = state.step tmp = state.tmp @@ -124,7 +120,7 @@ end ######################## -function next(sol :: Solution{TableauStepper{:adaptive,:explicit}}, state :: TableauExplicitState) +function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauState) const timeout_const = 5 @@ -190,7 +186,7 @@ end function rk_trial_step!(tmp :: RKTempArrays, - ode :: ExplicitODEInPlace, + ode :: ExplicitODE, last_step :: Step, tableau :: TableauRKExplicit, dt, @@ -208,7 +204,7 @@ end function rk_embedded_step!(tmp :: RKTempArrays, - ode :: ExplicitODEInPlace, + ode :: ExplicitODE, tableau :: Tableau, last_step :: Step, dt) @@ -222,8 +218,8 @@ function rk_embedded_step!(tmp :: RKTempArrays, dof = length(y) b = tableau.b - tmp.ynew[:] = 0 - tmp.yerr[:] = 0 + tmp.ynew[:] = zero(y) + tmp.yerr[:] = zero(y) for s=1:lengthks(tableau) # we skip the first step beacause we assume that tmp.ks[1] is @@ -231,12 +227,16 @@ function rk_embedded_step!(tmp :: RKTempArrays, if s > 1 calc_next_k!(tmp, s, ode, tableau, last_step, dt) end - tmp.ynew[:] += b[1,s]*tmp.ks[s] - tmp.yerr[:] += b[2,s]*tmp.ks[s] + for d=1:dof + tmp.ynew[d] += b[1,s]*tmp.ks[s][d] + tmp.yerr[d] += b[2,s]*tmp.ks[s][d] + end end - tmp.yerr[:] = dt*(tmp.ynew-tmp.yerr) - tmp.ynew[:] = y + dt*tmp.ynew + for d=1:dof + tmp.yerr[d] = dt*(tmp.ynew[d]-tmp.yerr[d]) + tmp.ynew[d] = y[d] + dt*tmp.ynew[d] + end end @@ -280,7 +280,7 @@ function stepsize_hw92!(tmp, tmp.yerr[d] = tmp.yerr[d]/sci # Eq 4.10 end - err = norm(tmp.yerr, 2) # Eq. 4.11 + err = norm(tmp.yerr) # Eq. 4.11 newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1/(ord+1)))) # Eq 4.13 modified if timeout > 0 @@ -296,7 +296,7 @@ end # this is the only part of state that can be changed here function calc_next_k!(tmp :: RKTempArrays, i :: Int, - ode :: ExplicitODEInPlace, + ode :: ExplicitODE, tableau :: Tableau, last_step :: Step, dt) diff --git a/src/types.jl b/src/types.jl index 6845e2bde..a1b8f3bda 100644 --- a/src/types.jl +++ b/src/types.jl @@ -1,46 +1,21 @@ abstract AbstractODE -type ExplicitODE <: AbstractODE - t0; y0 # initial data - F :: Function # solve y'=F(t,y) - jac :: Function # optional jacobian of F -end -type ExplicitODEInPlace <: AbstractODE +type ExplicitODE <: AbstractODE t0; y0 F! :: Function jac! :: Function end -# plug in the numerical jacobian if none is provided -ExplicitODE(t,y,F)=ExplicitODE(t,y,F,(t,y)->fdjacobian(F,t,y)) - - -function convert(::Type{ExplicitODEInPlace}, ode :: ExplicitODE) +function explicit_ineff(t0,y0,F;jac = (t,y)->fdjacobian(F,t,y)) function F!(t,y,dy) - dy[:] = ode.F(t,y) + dy[:] = F(t,y) end function jac!(t,y,J) - J[:] = ode.jac(t,y) - end - return ExplicitODEInPlace(ode.t0,ode.y0,F!,jac!) -end - - -function convert(::Type{ExplicitODE}, ode :: ExplicitODEInPlace) - function F(t,y) - dy = deepcopy(y) - ode.F!(t,y,dy) - return dy + J[:] = jac(t,y) end - function jac(t,y) - n = length(y) - J = Array(eltype(dy),n,n) - ode.jac!(t,y,J) - return J - end - return ExplicitODE(ode.t0,ode.y0,F,jac) + return ExplicitODE(t0,y0,F!,jac!) end @@ -49,7 +24,7 @@ abstract AbstractState # this might suffice for some solvers -type Step{T,S} <: AbstractState +type Step{T,S} t :: T y :: S dy :: S @@ -87,16 +62,19 @@ immutable Options{T} abstol = reltol, minstep = 10*eps(T), maxstep = 1/minstep, - initstep = reltol, # TODO: we need a better guess - # here, possibly overwrite it - # in the call to solve() + # TODO: we need a better guess here, possibly + # overwrite it in the call to solve() + initstep = max(min(reltol,abstol),minstep), norm = Base.norm, tspan = [tstop], points = :all, stopevent = (t,y)->false, roottol = eps(T)^(1/3), kargs...) - new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,tspan,points,stopevent,roottol) + if all(points .!= [:specified,:all]) + error("Option points = $points is not supported, use :specified or :all") + end + new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,sort(tspan),points,stopevent,roottol) end end @@ -119,10 +97,6 @@ function show{T}(io::IO, opts :: Options{T}) end -# default to floating point precision -Options(args...) = Options{Float64}(args...) - - # solution is a collection of an equation, an integration method # (stepper) and its options type Solution{T<:AbstractStepper} @@ -138,19 +112,13 @@ solve(ode, stepper; kargs...) = solve(ode, stepper, Options(kargs...)) # filter the wrong combinations of ode and stepper solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't support $T") -# TODO: this might not be necessary, in the long run we should make -# ExplicitODE the in-place one and use specific constructors - -# always convert ExplicitODE to ExplicitODEInPlace -solve(ode :: ExplicitODE, stepper, options :: Options) = solve(convert(ExplicitODEInPlace,ode), stepper, options) - # normally we return the working array, which changes at each step and # expect the user to copy it if necessary. In order for collect to # return the expected result we need to copy the output at each step. function collect{T}(t::Type{T}, s::Solution) - if s.options.tstop == Inf || s.options.tspan[end] == Inf - error("Trying to collect an infinite list") + if any(s.options.tspan .== Inf) + error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") end collect(t, imap(x->deepcopy(x),s)) end @@ -169,7 +137,7 @@ function fdjacobian(F, t, x::Number) return dFdx end -function fdjacobian(F, t, x) +function fdjacobian(F, t, x::Vector) ftx = F(t, x) lx = max(length(x),1) dFdx = zeros(eltype(x), lx, lx) diff --git a/test/interface-tests.jl b/test/interface-tests.jl index dbb811dcc..42bc0011e 100644 --- a/test/interface-tests.jl +++ b/test/interface-tests.jl @@ -11,7 +11,7 @@ const V0 = 1. const g0 = 0. # define custom type ... -immutable CompSol +immutable CompSol <: Number rho::Matrix{Complex128} x::Float64 p::Float64 @@ -27,11 +27,14 @@ Base.norm(y::CompSol) = norm(y::CompSol, 2.0) +(y1::CompSol, y2::CompSol) = CompSol(y1.rho+y2.rho, y1.x+y2.x, y1.p+y2.p) -(y1::CompSol, y2::CompSol) = CompSol(y1.rho-y2.rho, y1.x-y2.x, y1.p-y2.p) *(y1::CompSol, s::Real) = CompSol(y1.rho*s, y1.x*s, y1.p*s) +*(s::Bool, y1::CompSol) = false *(s::Real, y1::CompSol) = y1*s /(y1::CompSol, s::Real) = CompSol(y1.rho/s, y1.x/s, y1.p/s) ### new for PR #68 Base.abs(y::CompSol) = norm(y, 2.) # TODO not needed anymore once https://github.com/JuliaLang/julia/pull/11043 is in current stable julia +Base.abs2(y::CompSol) = norm(y, 2.) + Base.zero(::Type{CompSol}) = CompSol(complex(zeros(2,2)), 0., 0.) ODE.isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) @@ -45,19 +48,19 @@ ODE.isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) ################################################################################ - + # define RHSs of differential equations # delta, V and g are parameters function rhs(t, y, delta, V, g) H = [[-delta/2 V]; [V delta/2]] - + rho_dot = -im*H*y.rho + im*y.rho*H x_dot = y.p p_dot = -y.x - + return CompSol( rho_dot, x_dot, p_dot) end - + # inital conditons rho0 = zeros(2,2); rho0[1,1]=1.; diff --git a/test/runtests.jl b/test/runtests.jl index 9c88cd281..0485cbc6c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,4 @@ -using ODE +# using ODE using Base.Test tol = 1e-2 @@ -9,9 +9,9 @@ solvers = [ ODE.ode1, ODE.ode2_midpoint, ODE.ode2_heun, - ODE.ode4, - ODE.ode4ms, - ODE.ode5ms, + # ODE.ode4, + # ODE.ode4ms, + # ODE.ode5ms, # adaptive # ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. ODE.ode23, @@ -21,44 +21,44 @@ solvers = [ ## Stiff # fixed-step - ODE.ode4s_s, - ODE.ode4s_kr, + # ODE.ode4s_s, + # ODE.ode4s_kr, # adaptive ODE.ode23s] -for solver in solvers - println("using $solver") - # dy - # -- = 6 ==> y = 6t - # dt - t,y=solver((t,y)->6.0, 0., [0:.1:1;]) - @test maximum(abs(y-6t)) < tol +# for solver in solvers +# println("using $solver") +# # dy +# # -- = 6 ==> y = 6t +# # dt +# t,y=solver((t,y)->6.0, 0., [0:.1:1;]) +# @test maximum(abs(y-6t)) < tol - # dy - # -- = 2t ==> y = t.^2 - # dt - t,y=solver((t,y)->2t, 0., [0:.001:1;]) - @test maximum(abs(y-t.^2)) < tol +# # dy +# # -- = 2t ==> y = t.^2 +# # dt +# t,y=solver((t,y)->2t, 0., [0:.001:1;]) +# @test maximum(abs(y-t.^2)) < tol - # dy - # -- = y ==> y = y0*e.^t - # dt - t,y=solver((t,y)->y, 1., [0:.001:1;]) - @test maximum(abs(y-e.^t)) < tol +# # dy +# # -- = y ==> y = y0*e.^t +# # dt +# t,y=solver((t,y)->y, 1., [0:.001:1;]) +# @test maximum(abs(y-e.^t)) < tol - t,y=solver((t,y)->y, 1., [1:-.001:0;]) - @test maximum(abs(y-e.^(t-1))) < tol +# t,y=solver((t,y)->y, 1., [1:-.001:0;]) +# @test maximum(abs(y-e.^(t-1))) < tol - # dv dw - # -- = -w, -- = v ==> v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) - # dt dt - # - # y = [v, w] - t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;]) - ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} - @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol -end +# # dv dw +# # -- = -w, -- = v ==> v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) +# # dt dt +# # +# # y = [v, w] +# t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;]) +# ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} +# @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol +# end # Test negative starting times ODE.ode23s @assert length(ODE.ode23s((t,y)->[-y[2]; y[1]], [1., 2.], [-5., 0])[1]) > 1 @@ -75,12 +75,14 @@ let ydot end t = [0., 1e11] - t,y = ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, - maxstep=1e11/10, minstep=1e11/1e18) + t,y = ODE.ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, + maxstep=1e11/10, minstep=1e11/1e18) refsol = [0.2083340149701255e-07, 0.8333360770334713e-13, 0.9999999791665050] # reference solution at tspan[2] + println(t) + println(y) @test norm(refsol-y[end], Inf) < 2e-10 end include("interface-tests.jl") From 0aa2e92a995c85c0c93dd2476f4012a8de85b2a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Tue, 12 Apr 2016 14:06:43 +0200 Subject: [PATCH 012/113] Small fixes --- src/dense.jl | 11 +++++--- src/helpers.jl | 8 ++++-- src/interfaces.jl | 20 ++++++++++---- src/ode23s.jl | 36 ++++++++++++++++-------- src/rk.jl | 1 - src/types.jl | 13 +++++---- test/interface-tests.jl | 18 ++++++------ test/runtests.jl | 61 ++++++++++++++++++++--------------------- 8 files changed, 100 insertions(+), 68 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 0245332fa..6f646f3cb 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -19,6 +19,7 @@ type DenseState solver_state # used for storing the interpolation result ytmp + solver_done end @@ -33,7 +34,7 @@ function start(s :: Solution{DenseStepper}) step1 = Step(t0,deepcopy(y0),deepcopy(dy0)) solver_state = start(solver) ytmp = deepcopy(y0) - return DenseState(step0, step1, t0, true, solver_state, ytmp) + return DenseState(step0, step1, t0, true, solver_state, ytmp, false) end @@ -60,7 +61,9 @@ function next(s :: Solution{DenseStepper}, state :: DenseState) if done(solver, state.solver_state) warn("The iterator was exhausted before the dense output completed.") - break + # prevents calling done(..) twice + state.solver_done = true + return ((s0.t,s0.y[:]),state) else # at this point s0 holds the new step, "s2" if you will ((s0.t,s0.y[:]), state.solver_state) = next(solver, state.solver_state) @@ -111,8 +114,8 @@ end function done(s :: Solution{DenseStepper}, state :: DenseState) return ( - done(s.stepper.solver, state.solver_state) || - state.s1.t >= s.options.tspan[end] || + state.solver_done || + state.s1.t >= s.options.tspan[end] || s.options.stopevent(state.s1.t,state.s1.y) ) end diff --git a/src/helpers.jl b/src/helpers.jl index f815f8000..d8e43b2f6 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,5 +1,6 @@ isoutofdomain = isnan +#TODO make it a function on ExplicitODE and Options function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = Inf, order = 1) # Returns first step size tdir = sign(tstop-t0) @@ -13,7 +14,10 @@ function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = Inf, order = 1) h0 = (d0/d1)/100 end # perform Euler step - y1 = y0 + tdir*h0*f0 + y1 = similar(y0) + for d = 1:length(y1) + y1[d] = y0[d]+tdir*h0*f0[d] + end f1 = F(t0 + tdir*h0, y1) # estimate second derivative d2 = norm(f1 - f0, Inf)/(tau*h0) @@ -23,5 +27,5 @@ function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = Inf, order = 1) pow = -(2 + log10(max(d1, d2)))/(order+1) h1 = 10^pow end - return min(100*h0, h1, tdir*abs(tstop-t0)) + return min(100*h0, h1, abs(tstop-t0)) end diff --git a/src/interfaces.jl b/src/interfaces.jl index 8dbac8793..0de55e5a3 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -16,17 +16,23 @@ const steppers = # TODO: there is a lot of useless conversions going on here + for (name,stepper,params) in steppers @eval begin - function ($name){T<:Number}(F, y0 :: Vector, t0 :: T; + function ($name){T<:Number}(F, y0 :: AbstractVector, t0 :: T; jacobian = (t,y)->fdjacobian(F, t, y), stopevent = (t,y)->false, tstop = Inf, tspan = [tstop], + # we need these options explicitly for the hinit + reltol = eps(T)^(1/3)/10, + abstol = eps(T)^(1/2)/10, + initstep = hinit(F, y0, t0, reltol, abstol; tstop=tstop, order=order($stepper)), kargs...) step = ($stepper){T}($params...) + # handle different directions of time integration if all(tspan .>= t0) # forward time integration ode = explicit_ineff(t0,y0,F,jac=jacobian) @@ -34,9 +40,10 @@ for (name,stepper,params) in steppers tstop = tstop, tspan = tspan, stopevent = stopevent, + reltol = reltol, + abstol = abstol, + initstep = initstep, kargs...) - solution = collect(dense(solve(ode,step,opts))) - n = length(solution) elseif all(tspan .<= t0) # reverse time integration F_reverse(t,y) = -F(2*t0-t,y) @@ -47,6 +54,9 @@ for (name,stepper,params) in steppers tstop = 2*t0-tstop, tspan = 2*t0.-tspan, stopevent = (t,y)->stopevent(2*t0-t,y), + reltol = reltol, + abstol = abstol, + initstep = initstep, kargs...) else # tspan stretches to the left and to the right of t0 @@ -72,14 +82,14 @@ for (name,stepper,params) in steppers return (tn,yn) end - ($name){T<:Number}(F, y0 :: Vector, t0 :: Vector{T}; kargs...) = + ($name){T<:Number}(F, y0 :: AbstractVector, t0 :: AbstractVector{T}; kargs...) = ($name)(F,y0,t0[1]; tstop = t0[end], tspan = t0, points = :specified, kargs...) - function ($name)(F, y0, t0; kargs...) + function ($name)(F, y0 :: Number, t0; kargs...) tn, yn = ($name)((t,y)->[F(t,y[1])], [y0], t0; kargs...) yn2 = Array(typeof(y0),length(yn)) yn2[:] = map(first,yn) diff --git a/src/ode23s.jl b/src/ode23s.jl index 575d63b3c..7be4610c0 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -19,9 +19,6 @@ immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper end end -# default to floating point precision -ModifiedRosenbrockStepper() = ModifiedRosenbrockStepper{Float64}() - # TODO: is this correct? order(ModifiedRosenbrockStepper) = 2 @@ -42,6 +39,7 @@ type RosenbrockState{T,S} <: AbstractState dt :: T F1 :: S; F2 :: S J # :: ? + iters :: Int end @@ -68,7 +66,8 @@ function start(s :: Solution{ModifiedRosenbrockStepper}) dt, zero(y), # F1 zero(y), # F2 - J) # J + J, # J + 0) # iters # initialize the derivative and the Jacobian s.ode.F!(t,y,step.dy) s.ode.jac!(t,y,state.J) @@ -82,7 +81,10 @@ function done(s :: Solution{ModifiedRosenbrockStepper}, if state.step.t >= s.options.tstop return true elseif state.dt < s.options.minstep - warn("minstep reached") + warn("minstep reached.") + return true + elseif state.iters >= s.options.maxiters + warn("Maximum number of iterations ($(Int(s.options.maxiters))) reached, consider setting a larger maxiter.") return true end return false @@ -107,8 +109,16 @@ function next(s :: Solution{ModifiedRosenbrockStepper}, while true + state.iters += 1 + if state.iters > s.options.maxiters + return ((step.t,step.y), state) + end + + # trim the step size to match the bounds of integration + dt = min(s.options.tstop-t,dt) + # TODO: this should go to a specialized function for type stabilty sake - # maybe make W a part of ExplicitODE? + # maybe make W a part of ExplicitODE? Same for tder below? if size(J,1) == 1 W = one(J) - dt*d*J else @@ -120,30 +130,29 @@ function next(s :: Solution{ModifiedRosenbrockStepper}, W = lufact( eye(J) - dt*d*J ) end - # TODO: same for tder? - # approximate time-derivative of F + # Approximate time-derivative of F, we are using F1 as a + # temporary array ode.F!(t+dt/100,y,F1) tder = 100*d*(F1-F0) # modified Rosenbrock formula # TODO: allocate some temporary space for these variables - k1 = W \ (dy + tder) + k1 = W \ (F0 + tder) ode.F!(t+dt/2, y+dt*k1/2, F1) k2 = W \ (F1 - k1) + k1 ynew = y + dt*k2 ode.F!(t+dt, ynew, F2) - k3 = W \ (F2 - e32*(k2 - F1) - 2*(k1 - dy) + tder ) + k3 = W \ (F2 - e32*(k2 - F1) - 2*(k1 - F0) + tder ) delta = max(opts.reltol*max(opts.norm(y), opts.norm(ynew)), opts.abstol) # allowable error - err = (dt/6)*opts.norm(k1 - 2*k2 + k3)/delta # error estimate # upon a failed step decrease the step size dtnew = min(opts.maxstep, - dt*0.8*max(1,err^(-1/3)) ) + dt*0.8*err^(-1/3) ) # check if the new solution is acceptable if err <= 1 @@ -156,6 +165,9 @@ function next(s :: Solution{ModifiedRosenbrockStepper}, ode.jac!(step.t,step.y,J) return ((step.t,step.y), state) + else + # continue with the decreased time step + dt = dtnew end end diff --git a/src/rk.jl b/src/rk.jl index 3829b64e5..0a88e260b 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -145,7 +145,6 @@ function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauSta rk_trial_step!(tmp, sol.ode, step, sol.stepper.tableau, dt, timeout, sol.options) if abs(newdt) < sol.options.minstep # minimum step size reached, break - println("Warning: dt < minstep. Stopping.") # passing the newdt to state will result in done() state.dt = newdt break diff --git a/src/types.jl b/src/types.jl index a1b8f3bda..acfff5077 100644 --- a/src/types.jl +++ b/src/types.jl @@ -10,6 +10,7 @@ end function explicit_ineff(t0,y0,F;jac = (t,y)->fdjacobian(F,t,y)) function F!(t,y,dy) + # this is why we can't handle a scalar type any more dy[:] = F(t,y) end function jac!(t,y,J) @@ -49,6 +50,7 @@ immutable Options{T} minstep :: T maxstep :: T norm :: Function + maxiters :: T # dense output options tspan :: Vector{T} @@ -58,15 +60,16 @@ immutable Options{T} function Options(; tstop = T(Inf), - reltol = eps(T)^(1/3), - abstol = reltol, + tspan = [tstop], + reltol = eps(T)^(1/3)/10, + abstol = eps(T)^(1/2)/10, minstep = 10*eps(T), maxstep = 1/minstep, # TODO: we need a better guess here, possibly # overwrite it in the call to solve() - initstep = max(min(reltol,abstol),minstep), + initstep = max(min(reltol,abstol,maxstep),minstep), norm = Base.norm, - tspan = [tstop], + maxiters = T(Inf), points = :all, stopevent = (t,y)->false, roottol = eps(T)^(1/3), @@ -74,7 +77,7 @@ immutable Options{T} if all(points .!= [:specified,:all]) error("Option points = $points is not supported, use :specified or :all") end - new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,sort(tspan),points,stopevent,roottol) + new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,maxiters,sort(tspan),points,stopevent,roottol) end end diff --git a/test/interface-tests.jl b/test/interface-tests.jl index 42bc0011e..0dd9ee6de 100644 --- a/test/interface-tests.jl +++ b/test/interface-tests.jl @@ -52,13 +52,14 @@ ODE.isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) # define RHSs of differential equations # delta, V and g are parameters function rhs(t, y, delta, V, g) - H = [[-delta/2 V]; [V delta/2]] + H = [[-delta/2 V]; + [V delta/2]] - rho_dot = -im*H*y.rho + im*y.rho*H - x_dot = y.p - p_dot = -y.x + rho_dot = -im*H*y.rho + im*y.rho*H + x_dot = y.p + p_dot = -y.x - return CompSol( rho_dot, x_dot, p_dot) + return CompSol( rho_dot, x_dot, p_dot) end # inital conditons @@ -70,13 +71,14 @@ y0 = CompSol(complex(rho0), 2., 1.) endt = 2*pi; t,y1 = ODE.ode45((t,y)->rhs(t, y, delta0, V0, g0), y0, [0., endt]) # used as reference -print("Testing interface for scalar-like state... ") +println("Testing interface for scalar-like state... ") for solver in solvers + println("Testing $solver") # these only work with some Array-like interface defined: - if solver in [ODE.ode23s, ODE.ode4s_s, ODE.ode4s_kr] + if solver in [ODE.ode23s] # , ODE.ode4s_s, ODE.ode4s_kr continue end - t,y2 = solver((t,y)->rhs(t, y, delta0, V0, g0), y0, linspace(0., endt, 500)) + t,y2 = solver((t,y)->rhs(t, y, delta0, V0, g0), y0, linspace(0., endt, 500),abstol=1e-8,reltol=1e-5,initstep=endt) @test norm(y1[end]-y2[end])<0.1 end println("ok.") diff --git a/test/runtests.jl b/test/runtests.jl index 0485cbc6c..7a73ef8a8 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -13,7 +13,7 @@ solvers = [ # ODE.ode4ms, # ODE.ode5ms, # adaptive -# ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. + ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. ODE.ode23, ODE.ode45_dp, ODE.ode45_fe, @@ -26,39 +26,40 @@ solvers = [ # adaptive ODE.ode23s] -# for solver in solvers -# println("using $solver") -# # dy -# # -- = 6 ==> y = 6t -# # dt -# t,y=solver((t,y)->6.0, 0., [0:.1:1;]) -# @test maximum(abs(y-6t)) < tol +for solver in solvers + println("using $solver") + # dy + # -- = 6 ==> y = 6t + # dt + # we need to fix initstep for the fixed-step methods + t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) + @test maximum(abs(y-6t)) < tol -# # dy -# # -- = 2t ==> y = t.^2 -# # dt -# t,y=solver((t,y)->2t, 0., [0:.001:1;]) -# @test maximum(abs(y-t.^2)) < tol + # dy + # -- = 2t ==> y = t.^2 + # dt + t,y=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) + @test maximum(abs(y-t.^2)) < tol -# # dy -# # -- = y ==> y = y0*e.^t -# # dt -# t,y=solver((t,y)->y, 1., [0:.001:1;]) -# @test maximum(abs(y-e.^t)) < tol + # dy + # -- = y ==> y = y0*e.^t + # dt + t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001) + @test maximum(abs(y-e.^t)) < tol -# t,y=solver((t,y)->y, 1., [1:-.001:0;]) -# @test maximum(abs(y-e.^(t-1))) < tol + t,y=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001) + @test maximum(abs(y-e.^(t-1))) < tol -# # dv dw -# # -- = -w, -- = v ==> v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) -# # dt dt -# # -# # y = [v, w] -# t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;]) -# ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} -# @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol -# end + # dv dw + # -- = -w, -- = v ==> v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) + # dt dt + # + # y = [v, w] + t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) + ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} + @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol +end # Test negative starting times ODE.ode23s @assert length(ODE.ode23s((t,y)->[-y[2]; y[1]], [1., 2.], [-5., 0])[1]) > 1 @@ -81,8 +82,6 @@ let refsol = [0.2083340149701255e-07, 0.8333360770334713e-13, 0.9999999791665050] # reference solution at tspan[2] - println(t) - println(y) @test norm(refsol-y[end], Inf) < 2e-10 end include("interface-tests.jl") From b21b32676a07436aec1f9ca4be81fad1798cc3f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 13 Apr 2016 15:43:27 +0200 Subject: [PATCH 013/113] No more overshooting --- src/rk.jl | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/rk.jl b/src/rk.jl index 0a88e260b..a3167a474 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -99,18 +99,22 @@ end function next{T}(s :: Solution{TableauStepperFixed{T}}, state :: TableauState) step = state.step - tmp = state.tmp + tmp = state.tmp - dof = length(step.y) - b = s.stepper.tableau.b + dof = length(step.y) + b = s.stepper.tableau.b + dt = min(state.dt,s.options.tstop-step.t) - for k=1:lengthks(s.stepper.tableau) - calc_next_k!(state.tmp, k, s.ode, s.stepper.tableau, step, state.dt) + tmp.ynew[:] = step.y + + for k=1:length(b) + calc_next_k!(state.tmp, k, s.ode, s.stepper.tableau, step, dt) for d=1:dof - step.y[d] += state.dt * b[k]*tmp.ks[k][d] + tmp.ynew[d] += dt * b[k]*tmp.ks[k][d] end end - step.t += state.dt + step.t += dt + step.y[:] = tmp.ynew return ((step.t,step.y), state) end @@ -136,6 +140,9 @@ function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauSta # leads to a small enough error or the stepsize reaches # prob.minstep + # trim the inital stepsize to avoid overshooting + dt = min(dt, sol.options.tstop-state.step.t) + while true # Do one step (assumes ks[1]==f0). After calling tmp.ynew @@ -144,6 +151,9 @@ function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauSta err, newdt, timeout = rk_trial_step!(tmp, sol.ode, step, sol.stepper.tableau, dt, timeout, sol.options) + # trim again in case newdt > dt + newdt = min(newdt, sol.options.tstop-state.step.t) + if abs(newdt) < sol.options.minstep # minimum step size reached, break # passing the newdt to state will result in done() state.dt = newdt @@ -307,8 +317,7 @@ function calc_next_k!(tmp :: RKTempArrays, for d=1:dof tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] end + # tmp.y[:] += dt*tmp.ks[j]*a[i,j] end ode.F!(t + c[i]*dt, tmp.y, tmp.ks[i]) - - nothing end From 7b6d6b80cfb6e5dceffd940cd62aae3b3320623b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 22 Apr 2016 10:57:23 +0200 Subject: [PATCH 014/113] This should eliminate the conversions to Float64 --- src/helpers.jl | 7 +++---- src/interfaces.jl | 12 +++++++----- src/rk.jl | 20 ++++++++++---------- src/tableaus.jl | 2 +- src/types.jl | 32 +++++++++++++++++++++++++------- 5 files changed, 46 insertions(+), 27 deletions(-) diff --git a/src/helpers.jl b/src/helpers.jl index d8e43b2f6..801c768e7 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,9 +1,8 @@ isoutofdomain = isnan #TODO make it a function on ExplicitODE and Options -function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = Inf, order = 1) +function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = T(Inf), order = 1) # Returns first step size - tdir = sign(tstop-t0) tau = max(reltol*norm(y0, Inf), abstol) d0 = norm(y0, Inf)/tau f0 = F(t0, y0) @@ -16,9 +15,9 @@ function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = Inf, order = 1) # perform Euler step y1 = similar(y0) for d = 1:length(y1) - y1[d] = y0[d]+tdir*h0*f0[d] + y1[d] = y0[d]+h0*f0[d] end - f1 = F(t0 + tdir*h0, y1) + f1 = F(t0 + h0, y1) # estimate second derivative d2 = norm(f1 - f0, Inf)/(tau*h0) if max(d1, d2) <= 10*eps(T) diff --git a/src/interfaces.jl b/src/interfaces.jl index 0de55e5a3..46043ee2f 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -19,14 +19,15 @@ const steppers = for (name,stepper,params) in steppers @eval begin - function ($name){T<:Number}(F, y0 :: AbstractVector, t0 :: T; + function ($name){T<:Number,S<:AbstractVector}( + F, y0 :: S, t0 :: T; jacobian = (t,y)->fdjacobian(F, t, y), stopevent = (t,y)->false, - tstop = Inf, + tstop = T(Inf), tspan = [tstop], # we need these options explicitly for the hinit - reltol = eps(T)^(1/3)/10, - abstol = eps(T)^(1/2)/10, + reltol = eps(T)^T(1//3)/10, + abstol = eps(T)^T(1//2)/10, initstep = hinit(F, y0, t0, reltol, abstol; tstop=tstop, order=order($stepper)), kargs...) @@ -63,7 +64,7 @@ for (name,stepper,params) in steppers return ([t0],[y0]) end - solution = collect(dense(solve(ode,step,opts))) + solution = collect(Tuple{T,S},dense(solve(ode,step,opts))) n = length(solution) # return solution @@ -90,6 +91,7 @@ for (name,stepper,params) in steppers kargs...) function ($name)(F, y0 :: Number, t0; kargs...) + # TODO: this is slow! tn, yn = ($name)((t,y)->[F(t,y[1])], [y0], t0; kargs...) yn2 = Array(typeof(y0),length(yn)) yn2[:] = map(first,yn) diff --git a/src/rk.jl b/src/rk.jl index a3167a474..f2a7d7b8a 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -250,12 +250,12 @@ function rk_embedded_step!(tmp :: RKTempArrays, end -function stepsize_hw92!(tmp, - last_step :: Step, - tableau :: TableauRKExplicit, - dt, - timeout, - options :: Options) +function stepsize_hw92!{T}(tmp, + last_step :: Step, + tableau :: TableauRKExplicit, + dt :: T, + timeout, + options :: Options) # Estimates the error and a new step size following Hairer & # Wanner 1992, p167 (with some modifications) # @@ -270,8 +270,8 @@ function stepsize_hw92!(tmp, ord = minimum(order(tableau)) timout_after_nan = 5 - fac = [0.8, 0.9, 0.25^(1/(ord+1)), 0.38^(1/(ord+1))][1] - facmax = 5.0 # maximal step size increase. 1.5-5 + fac = [T(8//10), T(9//10), T(1//4)^(1//(ord+1)), T(38//100)^(1//(ord+1))][1] + facmax = T(5) # maximal step size increase. 1.5-5 facmin = 1./facmax # maximal step size decrease. ? dof = length(last_step.y) @@ -280,7 +280,7 @@ function stepsize_hw92!(tmp, # if outside of domain (usually NaN) then make step size smaller by maximum if isoutofdomain(tmp.y[d]) - return 10., dt*facmin, timout_after_nan + return T(10), dt*facmin, timout_after_nan end y0 = last_step.y[d] # TODO: is this supposed to be the last successful step? @@ -290,7 +290,7 @@ function stepsize_hw92!(tmp, end err = norm(tmp.yerr) # Eq. 4.11 - newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1/(ord+1)))) # Eq 4.13 modified + newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1//(ord+1)))) # Eq 4.13 modified if timeout > 0 newdt = min(newdt, dt) diff --git a/src/tableaus.jl b/src/tableaus.jl index 0ab5fb0da..22cbf08f1 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -56,7 +56,7 @@ immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} @assert istril(a) @assert S==length(c)==size(a,1)==size(a,2)==size(b,2) @assert size(b,1)==length(order) - @assert norm(sum(a,2)-c'',Inf)<1e-10 # consistency. + @assert norm(sum(a,2)-c'',Inf)fdjacobian!(F!,t,y,J)) + new(t0,y0,F!,jac!) + end end @@ -60,9 +63,9 @@ immutable Options{T} function Options(; tstop = T(Inf), - tspan = [tstop], - reltol = eps(T)^(1/3)/10, - abstol = eps(T)^(1/2)/10, + tspan = T[tstop], + reltol = eps(T)^T(1//3)/10, + abstol = eps(T)^T(1//2)/10, minstep = 10*eps(T), maxstep = 1/minstep, # TODO: we need a better guess here, possibly @@ -70,9 +73,9 @@ immutable Options{T} initstep = max(min(reltol,abstol,maxstep),minstep), norm = Base.norm, maxiters = T(Inf), - points = :all, + points = :all, stopevent = (t,y)->false, - roottol = eps(T)^(1/3), + roottol = eps(T)^T(1//3), kargs...) if all(points .!= [:specified,:all]) error("Option points = $points is not supported, use :specified or :all") @@ -119,8 +122,8 @@ solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't s # normally we return the working array, which changes at each step and # expect the user to copy it if necessary. In order for collect to # return the expected result we need to copy the output at each step. -function collect{T}(t::Type{T}, s::Solution) - if any(s.options.tspan .== Inf) +function collect{T,S}(t :: Type{Tuple{T,AbstractVector{S}}}, s::Solution) + if maximum(s.options.tspan) == T(Inf) error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") end collect(t, imap(x->deepcopy(x),s)) @@ -152,3 +155,18 @@ function fdjacobian(F, t, x::Vector) end return dFdx end + +function fdjacobian!{T}(F!, t, x::Vector{T}, J::Array{T,2}) + ftx = similar(x) + ftx2= similar(x) + dx = similar(x) + F!(t,x,ftx) + lx = max(length(x),1) + dFdx = zeros(eltype(x), lx, lx) + for j = 1:lx + # The 100 below is heuristic + dx[j] = (x[j] .+ (x[j]==0))./100 + F!(t,x+dx,ftx2) + J[:,j] = (ftx2-ftx)./dx[j] + end +end From f558b9fa4b41c58438b49d37a506a2c37a097568 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Sat, 23 Apr 2016 23:25:19 +0200 Subject: [PATCH 015/113] Review for @pwl --- src/dense.jl | 31 ++++++++++++--- src/helpers.jl | 5 +++ src/interfaces.jl | 6 +++ src/iterators.jl | 6 +++ src/rk.jl | 29 ++++++++++++-- src/tableaus.jl | 8 ++++ src/types.jl | 97 +++++++++++++++++++++++++++++++++-------------- test/runtests.jl | 7 ++-- 8 files changed, 149 insertions(+), 40 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 6f646f3cb..716f4a884 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -1,17 +1,19 @@ # A higher level stepper, defined as a wrapper around another stepper. +# m3: is this still true? The wrapper in `next` seems very substantial? immutable DenseStepper <: AbstractStepper - solver :: Solution + solver :: Solution # m3: a `solver` is a `Solution`? Seems a bit strange. end - +# m3: this seems a bit odd: just return a `Solution` type which is +# actually not solved yet. solve(ode :: ExplicitODE, stepper :: DenseStepper, options :: Options) = Solution(ode,stepper,options) dense(sol :: Solution) = solve(sol.ode, DenseStepper(sol), sol.options) - +# m3: does this not need type-parameters? type DenseState s0 :: Step; s1 :: Step last_tout @@ -37,9 +39,28 @@ function start(s :: Solution{DenseStepper}) return DenseState(step0, step1, t0, true, solver_state, ytmp, false) end - +# m3: I think it would be nice to factor out the dense-output and +# root-finding into its own function. That way it could be used also +# independently of the dense-output iterator. Also, it would make +# this next function more compact. function next(s :: Solution{DenseStepper}, state :: DenseState) + + # m3: I'm not 100% sure what happens here. I would implement it like so: + # Initialize in `start`: calculate next t1, y1 and also hold onto IC in t0,y0, + # set state.last_t=1 + # + # in next have this loop + # for t in tspan[state.last_t:end] + # if t>t1 + # make new t1, y1, move old t1, y1 into t0, y0 + # end + # make dense output at t + # find events + # state.last_t += 1 + # return ((t, y), state) + # end + solver = s.stepper.solver s0, s1 = state.s0, state.s1 @@ -140,7 +161,7 @@ function hermite_interp!(y,t,step0::Step,step1::Step) nothing end - +# m3: docs, move to helpers.jl function findroot(f,rng,eps) xl, xr = rng fl, fr = f(xl), f(xr) diff --git a/src/helpers.jl b/src/helpers.jl index 801c768e7..783c94296 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,5 +1,10 @@ +# m3: This should be a generic function so methods can be added if +# other types have something else than NaN for out-of-domain. +# Although, better make it an option. isoutofdomain = isnan +# m3: to be consistent: h->dt + #TODO make it a function on ExplicitODE and Options function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = T(Inf), order = 1) # Returns first step size diff --git a/src/interfaces.jl b/src/interfaces.jl index 46043ee2f..e1854f9f5 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -16,6 +16,10 @@ const steppers = # TODO: there is a lot of useless conversions going on here +# m3: is meta-programming really needed here? Why not do it like it +# was done before? For instance in runge_kutta.jl we got: +# ode23(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk23; kwargs...) + for (name,stepper,params) in steppers @eval begin @@ -46,6 +50,7 @@ for (name,stepper,params) in steppers initstep = initstep, kargs...) elseif all(tspan .<= t0) +# m3: again, seems like a band-aid # reverse time integration F_reverse(t,y) = -F(2*t0-t,y) # TODO: is that how the jacobian changes? @@ -90,6 +95,7 @@ for (name,stepper,params) in steppers points = :specified, kargs...) +# m3: could this go into the low-level API? function ($name)(F, y0 :: Number, t0; kargs...) # TODO: this is slow! tn, yn = ($name)((t,y)->[F(t,y[1])], [y0], t0; kargs...) diff --git a/src/iterators.jl b/src/iterators.jl index 1bfab4863..988d0ef02 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -13,6 +13,12 @@ function solver(F, y0::AbstractArray, t0; dense_sol = dense(F, y0, t0, sol; tspan = tspan, kargs...) return dense_sol else + # m3: couldn't this be handled at a lower level (in the + # iterator) by adjusting the direction of the time step. This + # seems a bit like a band-aid. What do I do if I want to use + # the iterator? Maybe cleanest to add a time-comparison + # function `before(t1,t2,dt)` and use that in all comparisons? + # reverse time integration F_reverse(t,y) = -F(2*t0-t,y) reverse_output(t,y)=(2*t0-t,y) diff --git a/src/rk.jl b/src/rk.jl index f2a7d7b8a..de1bc1214 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -5,8 +5,10 @@ include("tableaus.jl") # intermediate level interface +# m3: this seems a bit an odd name. Tableaus are useful not just for +# RK. So either move this to tableaus.jl or rename it. immutable TableauStepper{Step,T} <: AbstractStepper - tableau :: Tableau + tableau :: Tableau # m3: this is an abstract type. Is that ok? function TableauStepper(tab) if Step == :fixed && isadaptive(tab) error("Cannot construct a fixed step method from an adaptive step tableau") @@ -34,7 +36,9 @@ solve{S,T}(ode :: ExplicitODE, stepper :: TableauStepper{S,T}, options :: Option # explicit RK stepper -type RKTempArrays{T} +# m3: rename tmp->work (This is what these arrays are called in classic codes, + +type RKTempArrays{T} # m3: RKWorkArrays y :: T ynew :: T yerr :: T @@ -66,7 +70,7 @@ function start{S,T}(s :: Solution{TableauStepper{S,T}}) zero(y0), # ynew zero(y0), # yerr Array(typeof(y0), lk)) # ks - +# m3: above to zeros(typeof(y0), lk) and remove below loop for i = 1:lk tmp.ks[i] = zero(y0) end @@ -105,9 +109,11 @@ function next{T}(s :: Solution{TableauStepperFixed{T}}, state :: TableauState) b = s.stepper.tableau.b dt = min(state.dt,s.options.tstop-step.t) + # m3: why is it necessary to copy here and then copy back below? tmp.ynew[:] = step.y for k=1:length(b) + # m3: here write in tmp not state.tmp: calc_next_k!(state.tmp, k, s.ode, s.stepper.tableau, step, dt) for d=1:dof tmp.ynew[d] += dt * b[k]*tmp.ks[k][d] @@ -151,6 +157,16 @@ function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauSta err, newdt, timeout = rk_trial_step!(tmp, sol.ode, step, sol.stepper.tableau, dt, timeout, sol.options) + # m3: I liked my setup better with: + # rk_embedded_step!(ytrial, yerr, ks, ytmp, y, fn, t, dt, dof, btab) + # # Check error and find a new step size: + # err, newdt, timeout = stepsize_hw92!(dt, tdir, y, ytrial, yerr, order, timeout, + # dof, abstol, reltol, maxstep, norm) + # + # It that way it's clearer what's done, plus the + # rk_trial_step! function is only used once. + + # trim again in case newdt > dt newdt = min(newdt, sol.options.tstop-state.step.t) @@ -193,7 +209,7 @@ end # Lower level algorithms # ########################## - +# m3: docs (or better remove) function rk_trial_step!(tmp :: RKTempArrays, ode :: ExplicitODE, last_step :: Step, @@ -217,6 +233,7 @@ function rk_embedded_step!(tmp :: RKTempArrays, tableau :: Tableau, last_step :: Step, dt) +# m3: update docs # Does one embedded R-K step updating ytrial, yerr and ks. # # Assumes that ks[:,1] is already calculated! @@ -227,6 +244,8 @@ function rk_embedded_step!(tmp :: RKTempArrays, dof = length(y) b = tableau.b + # m3: not good: this first creates an array, then copies it. Use + # fill!(A, zero(y[1])) tmp.ynew[:] = zero(y) tmp.yerr[:] = zero(y) @@ -268,6 +287,8 @@ function stepsize_hw92!{T}(tmp, # - allow component-wise reltol and abstol? # - allow other norms +# m3: shouldn't this use options.norm? + ord = minimum(order(tableau)) timout_after_nan = 5 fac = [T(8//10), T(9//10), T(1//4)^(1//(ord+1)), T(38//100)^(1//(ord+1))][1] diff --git a/src/tableaus.jl b/src/tableaus.jl index 22cbf08f1..a11ae16d0 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -41,6 +41,7 @@ Base.convert{Tnew<:Real}(::Type{Tnew}, tab::Tableau) = error("Define convert met # Tableaus for explicit Runge-Kutta methods ########################################### +# m3: these tableaus should go into rk.jl as they belong to R-K methods immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} order::(@compat(Tuple{Vararg{Int}})) # the order of the methods @@ -198,6 +199,13 @@ const bt_feh78 = TableauRKExplicit(:feh78, (7,8), Rational{Int64}, ) +# m3: +# - this function is not used anymore! Is there a reason for this? +# - this should probably stay in this file. +# - update type-variables to +# ET, EY, EF, CT, CY +# - I think it is fine to assume that typeof(y0)==typeof(F(t0,y0)) +# i.e. dy has same type as y. function make_consistent_types(fn, y0, tspan, btab::Tableau) # There are a few types involved in a call to a ODE solver which # somehow need to be consistent: diff --git a/src/types.jl b/src/types.jl index 486f7eb8e..e21cb98f1 100644 --- a/src/types.jl +++ b/src/types.jl @@ -1,16 +1,29 @@ abstract AbstractODE +# m3: does this need type-parameters for t0,y0: Et, Eyf? +""" +Explicitly defined ODE of form dy = F(t,y) + +Fields: + +- t0, y0: initial conditions +- F!: ODE function `F!(t,y,dy)` which modifies `dy` in-place +- jac!: TODO +""" type ExplicitODE <: AbstractODE t0; y0 F! :: Function jac! :: Function - function ExplicitODE(t0,y0,F!; jac! = (t,y,J)->fdjacobian!(F!,t,y,J)) - new(t0,y0,F!,jac!) - end end +# m3: how about you define (a bit prematurely) an ImplicitODE type too? +""" +Convert a out-of-place explicitly defined ODE function to an in-place function. + +Note, this does not help with memory allocations. +""" function explicit_ineff(t0,y0,F;jac = (t,y)->fdjacobian(F,t,y)) function F!(t,y,dy) # this is why we can't handle a scalar type any more @@ -22,10 +35,18 @@ function explicit_ineff(t0,y0,F;jac = (t,y)->fdjacobian(F,t,y)) return ExplicitODE(t0,y0,F!,jac!) end - +# m3: needs docs! abstract AbstractStepper abstract AbstractState +# m3: +# - docs +# - maybe use the typevars as defined in make_consistent_types for t, +# y, dy? T->Et, S->Ty +# (or something else consistent throughout, maybe nicer would be all +# uppercase: ET, EFY, TT, TY). +# - if find `Step` a bit confusing name, in particular combined with +# AbstractStepper, but not sure what's better. # this might suffice for some solvers type Step{T,S} @@ -43,8 +64,31 @@ function show(io::IO, state :: Step) end -# general purpose options -immutable Options{T} +""" +Options for ODE solvers. This type has a key-word constructor which +will fill the structure with default values. + +General: + +- initstep :: T initial step +- tstop :: T end integration time +- reltol :: T relative tolerance (m3: could this be a vector?) +- abstol :: T absolute tolerance (m3: could this be a vector?) +- minstep :: T minimal allowed step +- maxstep :: T maximal allowed step +- norm function to calculate the norm in step control +- maxiters :: T maximum number of steps + +Dense output options: + +- tspan :: Vector{T} output times +- points :: Symbol which points are returned: `:specified` only the + ones in tspan or `:all` which includes also the step-points of the solver. +- stopevent Stop integration at a zero of this function +- roottol TODO + +""" +immutable Options{T} # m3: T->Et # stepper options initstep :: T tstop :: T @@ -58,14 +102,19 @@ immutable Options{T} # dense output options tspan :: Vector{T} points :: Symbol +# m3: I think this should be an array of functions. Depending on some +# flag each one returns, the iteration stops or continues. Rename it +# to eventfns. I like matlabs interface. +# [value,isterminal,direction] = myEventsFcn(t,y,dy) +# The value gets stored. stopevent :: Function roottol :: T function Options(; tstop = T(Inf), - tspan = T[tstop], - reltol = eps(T)^T(1//3)/10, - abstol = eps(T)^T(1//2)/10, + tspan = [tstop], # m3 maybe [tstart, tstop] + reltol = eps(T)^(1/3)/10, + abstol = eps(T)^(1/2)/10, minstep = 10*eps(T), maxstep = 1/minstep, # TODO: we need a better guess here, possibly @@ -73,9 +122,9 @@ immutable Options{T} initstep = max(min(reltol,abstol,maxstep),minstep), norm = Base.norm, maxiters = T(Inf), - points = :all, + points = :all, stopevent = (t,y)->false, - roottol = eps(T)^T(1//3), + roottol = eps(T)^(1/3), kargs...) if all(points .!= [:specified,:all]) error("Option points = $points is not supported, use :specified or :all") @@ -87,6 +136,7 @@ end function show{T}(io::IO, opts :: Options{T}) + # m3: iterate over fields. println("") println("Options{$T}") println("tstop = $(opts.tstop)") @@ -102,6 +152,12 @@ function show{T}(io::IO, opts :: Options{T}) println("roottol = $(opts.roottol)") end +# m3: +# - I don't like the name `Solution` as this is just the problem +# specification. Maybe `Problem` or `SolutionIterator`? +# - in my idea-write-up I added `tend` to this type. I quite like +# this as it means that tspan is just about outputs and t0, tend is +# about the integration interval. But maybe this is over-tidy. # solution is a collection of an equation, an integration method # (stepper) and its options @@ -122,8 +178,8 @@ solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't s # normally we return the working array, which changes at each step and # expect the user to copy it if necessary. In order for collect to # return the expected result we need to copy the output at each step. -function collect{T,S}(t :: Type{Tuple{T,AbstractVector{S}}}, s::Solution) - if maximum(s.options.tspan) == T(Inf) +function collect{T}(t::Type{T}, s::Solution) + if any(s.options.tspan .== Inf) error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") end collect(t, imap(x->deepcopy(x),s)) @@ -155,18 +211,3 @@ function fdjacobian(F, t, x::Vector) end return dFdx end - -function fdjacobian!{T}(F!, t, x::Vector{T}, J::Array{T,2}) - ftx = similar(x) - ftx2= similar(x) - dx = similar(x) - F!(t,x,ftx) - lx = max(length(x),1) - dFdx = zeros(eltype(x), lx, lx) - for j = 1:lx - # The 100 below is heuristic - dx[j] = (x[j] .+ (x[j]==0))./100 - F!(t,x+dx,ftx2) - J[:,j] = (ftx2-ftx)./dx[j] - end -end diff --git a/test/runtests.jl b/test/runtests.jl index 7a73ef8a8..5ffc45b33 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,5 @@ -# using ODE +include("../src/ODE.jl") +using ODE using Base.Test tol = 1e-2 @@ -9,8 +10,8 @@ solvers = [ ODE.ode1, ODE.ode2_midpoint, ODE.ode2_heun, - # ODE.ode4, - # ODE.ode4ms, + ODE.ode4, + # Ode.ode4ms, # ODE.ode5ms, # adaptive ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. From 408b14972f1391e03f89320ac73c9a56816b4665 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 29 Apr 2016 15:07:50 +0200 Subject: [PATCH 016/113] First round of fixes from @mauro3 comments. --- src/dense.jl | 129 ++++++++++----------- src/helpers.jl | 63 ++++++++--- src/interfaces.jl | 33 +++--- src/iterators.jl | 49 +++----- src/ode23s.jl | 40 +++---- src/rk.jl | 243 ++++++++++++++++++---------------------- src/types.jl | 189 ++++++++++++++++++------------- test/interface-tests.jl | 20 +++- 8 files changed, 397 insertions(+), 369 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 716f4a884..aab8f001b 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -1,31 +1,38 @@ # A higher level stepper, defined as a wrapper around another stepper. -# m3: is this still true? The wrapper in `next` seems very substantial? +#TODO: how about having an DenseStepper <: AbstractWrapper <: AbstractStepper? immutable DenseStepper <: AbstractStepper - solver :: Solution # m3: a `solver` is a `Solution`? Seems a bit strange. + solver::Solver end -# m3: this seems a bit odd: just return a `Solution` type which is -# actually not solved yet. -solve(ode :: ExplicitODE, - stepper :: DenseStepper, - options :: Options) = Solution(ode,stepper,options) +solve(ode::ExplicitODE, + stepper::DenseStepper, + options::Options) = Solver(ode,stepper,options) -dense(sol :: Solution) = solve(sol.ode, DenseStepper(sol), sol.options) +dense(sol::Solver) = solve(sol.ode, DenseStepper(sol), sol.options) -# m3: does this not need type-parameters? -type DenseState - s0 :: Step; s1 :: Step - last_tout +""" + +The state of the dense stepper + +- s0, s1: Previous steps, used to produce interpolated output +- solver_state: The state of the associated solver +- ytmp: work array + +""" +type DenseState{T,S} <: AbstractState + s0::Step{T,S} + s1::Step{T,S} + last_tout::T first_step - solver_state + solver_state::AbstractState # used for storing the interpolation result - ytmp + ytmp::S solver_done end -function start(s :: Solution{DenseStepper}) +function start(s::Solver{DenseStepper}) # extract the real solver solver = s.stepper.solver t0 = solver.ode.t0 @@ -36,14 +43,17 @@ function start(s :: Solution{DenseStepper}) step1 = Step(t0,deepcopy(y0),deepcopy(dy0)) solver_state = start(solver) ytmp = deepcopy(y0) - return DenseState(step0, step1, t0, true, solver_state, ytmp, false) + return DenseState(step0, step1, t0-1, true, solver_state, ytmp, false) end # m3: I think it would be nice to factor out the dense-output and # root-finding into its own function. That way it could be used also # independently of the dense-output iterator. Also, it would make # this next function more compact. -function next(s :: Solution{DenseStepper}, state :: DenseState) + +# pwl: I agree, but then the problem is that once you decouple them +# you would lose the opprotunity to detect the roots with each step. +function next(s::Solver{DenseStepper}, state::DenseState) # m3: I'm not 100% sure what happens here. I would implement it like so: @@ -61,53 +71,69 @@ function next(s :: Solution{DenseStepper}, state :: DenseState) # return ((t, y), state) # end + # pwl: @m3 this is basically what happens here:-), although I'm + # not using the index of tspan anywhere explicitly. + solver = s.stepper.solver + # these guys store the intermediate steps we make s0, s1 = state.s0, state.s1 t0, t1 = s0.t, s1.t - if state.first_step - state.first_step = false - return ((s0.t,s0.y),state) - end + # assuming the last output was done at state.last_tout set the + # t_goal to the next larger time from tspan. Strong inequality + # below is crucial, otherwise we would be selecting the same step + # every time. + tspan = s.options.tspan + t_goal = tspan[findfirst(t->(t>state.last_tout), tspan)] - # the next output time that we aim at - t_goal = s.options.tspan[findfirst(t->(t>state.last_tout), s.options.tspan)] + # Keep computing new steps (i.e. new pairs (t0,t1)) until we reach + # t0 < t_goal <= t1, then we use interpolation to get the value at + # t_goal. Unless points==:all, then we break the while loop after + # making the first step. + while t_goal > t1 - # the t0 == t1 part ensures that we make at least one step - while t1 < t_goal - - # s1 is the starting point for the new step, while the new - # step is saved in s0 + # s1 stores the last succesfull step, the new step is stored + # in s0 if done(solver, state.solver_state) warn("The iterator was exhausted before the dense output completed.") # prevents calling done(..) twice state.solver_done = true + # TODO: deepcopy? + # Return whatever we got as the last step return ((s0.t,s0.y[:]),state) else - # at this point s0 holds the new step, "s2" if you will + # at this point s0 is updated with the new step, "s2" if you will ((s0.t,s0.y[:]), state.solver_state) = next(solver, state.solver_state) end # swap s0 and s1 s0, s1 = s1, s0 - # update the state - state.s0, state.s1 = s0, s1 # and times t0, t1 = s0.t, s1.t - # we made a successfull step and points == :all - if s.options.points == :all || s.options.stopevent(t1,s1.y) - t_goal = min(t_goal,t1) + # update the state accordingly + state.s0, state.s1 = s0, s1 + + # we haven't reached t_goal yet (t1= s.options.tspan[end] || + state.last_tout >= s.options.tspan[end] || s.options.stopevent(state.s1.t,state.s1.y) ) end @@ -160,28 +186,3 @@ function hermite_interp!(y,t,step0::Step,step1::Step) end nothing end - -# m3: docs, move to helpers.jl -function findroot(f,rng,eps) - xl, xr = rng - fl, fr = f(xl), f(xr) - - if fl*fr > 0 || xl > xr - error("Inconsistent bracket") - end - - while xr-xl > eps - xm = (xl+xr)/2 - fm = f(xm) - - if fm*fr > 0 - xr = xm - fr = fm - else - xl = xm - fl = fm - end - end - - return (xr+xl)/2 -end diff --git a/src/helpers.jl b/src/helpers.jl index 783c94296..739b5319e 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,35 +1,68 @@ -# m3: This should be a generic function so methods can be added if -# other types have something else than NaN for out-of-domain. -# Although, better make it an option. -isoutofdomain = isnan +#TODO make it a function on ExplicitODE and Options +""" -# m3: to be consistent: h->dt +Chooses an initial step-size basing on the equation, initial data, +time span and the order of the method of integration. -#TODO make it a function on ExplicitODE and Options -function hinit{T}(F, y0, t0::T, reltol, abstol; tstop = T(Inf), order = 1) +""" +function dtinit{T}(F, y0, t0::T, reltol, abstol; tstop = T(Inf), order = 1) # Returns first step size tau = max(reltol*norm(y0, Inf), abstol) d0 = norm(y0, Inf)/tau f0 = F(t0, y0) d1 = norm(f0, Inf)/tau if min(d0,d1) < eps(T)^(1/3) - h0 = eps(T)^(1/3)/10 + dt0 = eps(T)^(1/3)/10 else - h0 = (d0/d1)/100 + dt0 = (d0/d1)/100 end # perform Euler step y1 = similar(y0) for d = 1:length(y1) - y1[d] = y0[d]+h0*f0[d] + y1[d] = y0[d]+dt0*f0[d] end - f1 = F(t0 + h0, y1) + f1 = F(t0 + dt0, y1) # estimate second derivative - d2 = norm(f1 - f0, Inf)/(tau*h0) + d2 = norm(f1 - f0, Inf)/(tau*dt0) if max(d1, d2) <= 10*eps(T) - h1 = max(eps(T)^(1/3)/10, h0/10^3) + dt1 = max(eps(T)^(1/3)/10, dt0/10^3) else pow = -(2 + log10(max(d1, d2)))/(order+1) - h1 = 10^pow + dt1 = 10^pow end - return min(100*h0, h1, abs(tstop-t0)) + return min(100*dt0, dt1, abs(tstop-t0)) +end + + +""" + +A simple bisection algorithm for finding a root of a solution f(x)=0 +starting within the range x∈rng, the result is a point x₀ which is +located within the distance eps from the true root of f(x)=0. For +this algorithm to work we need f(rng[1]) to have a different sign then +f(rng[2]). + +""" +function findroot(f,rng,eps) + xl, xr = rng + fl, fr = f(xl), f(xr) + + if fl*fr > 0 || xl > xr + error("Inconsistent bracket") + end + + while xr-xl > eps + xm = (xl+xr)/2 + fm = f(xm) + + if fm*fr > 0 + xr = xm + fr = fm + else + xl = xm + fl = fm + end + end + + return (xr+xl)/2 end diff --git a/src/interfaces.jl b/src/interfaces.jl index e1854f9f5..f30ebb7be 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -1,15 +1,15 @@ const steppers = [ ( :ode23s, :ModifiedRosenbrockStepper, []), - ( :ode1, :TableauStepperFixed, [bt_feuler]), - ( :ode2_midpoint, :TableauStepperFixed, [bt_midpoint]), - ( :ode2_heun, :TableauStepperFixed, [bt_heun]), - ( :ode4, :TableauStepperFixed, [bt_rk4]), - ( :ode21, :TableauStepperAdaptive, [bt_rk21]), - ( :ode23, :TableauStepperAdaptive, [bt_rk23]), - ( :ode45_fe, :TableauStepperAdaptive, [bt_rk45]), - ( :ode45_dp, :TableauStepperAdaptive, [bt_dopri5]), - ( :ode78, :TableauStepperAdaptive, [bt_feh78]) + ( :ode1, :RKStepperFixed, [bt_feuler]), + ( :ode2_midpoint, :RKStepperFixed, [bt_midpoint]), + ( :ode2_heun, :RKStepperFixed, [bt_heun]), + ( :ode4, :RKStepperFixed, [bt_rk4]), + ( :ode21, :RKStepperAdaptive, [bt_rk21]), + ( :ode23, :RKStepperAdaptive, [bt_rk23]), + ( :ode45_fe, :RKStepperAdaptive, [bt_rk45]), + ( :ode45_dp, :RKStepperAdaptive, [bt_dopri5]), + ( :ode78, :RKStepperAdaptive, [bt_feh78]) ] @@ -29,10 +29,10 @@ for (name,stepper,params) in steppers stopevent = (t,y)->false, tstop = T(Inf), tspan = [tstop], - # we need these options explicitly for the hinit + # we need these options explicitly for the dtinit reltol = eps(T)^T(1//3)/10, abstol = eps(T)^T(1//2)/10, - initstep = hinit(F, y0, t0, reltol, abstol; tstop=tstop, order=order($stepper)), + initstep = dtinit(F, y0, t0, reltol, abstol; tstop=tstop, order=order($stepper)), kargs...) step = ($stepper){T}($params...) @@ -50,7 +50,7 @@ for (name,stepper,params) in steppers initstep = initstep, kargs...) elseif all(tspan .<= t0) -# m3: again, seems like a band-aid + # m3: again, seems like a band-aid # reverse time integration F_reverse(t,y) = -F(2*t0-t,y) # TODO: is that how the jacobian changes? @@ -70,6 +70,7 @@ for (name,stepper,params) in steppers end solution = collect(Tuple{T,S},dense(solve(ode,step,opts))) + # solution = collect(Tuple{T,S},solve(ode,step,opts)) n = length(solution) # return solution @@ -88,19 +89,19 @@ for (name,stepper,params) in steppers return (tn,yn) end - ($name){T<:Number}(F, y0 :: AbstractVector, t0 :: AbstractVector{T}; kargs...) = + ($name){T<:Number}(F, y0::AbstractVector, t0::AbstractVector{T}; kargs...) = ($name)(F,y0,t0[1]; tstop = t0[end], tspan = t0, points = :specified, kargs...) -# m3: could this go into the low-level API? - function ($name)(F, y0 :: Number, t0; kargs...) + # m3: could this go into the low-level API? + function ($name)(F, y0::Number, t0; kargs...) # TODO: this is slow! tn, yn = ($name)((t,y)->[F(t,y[1])], [y0], t0; kargs...) yn2 = Array(typeof(y0),length(yn)) - yn2[:] = map(first,yn) + copy!(yn2,map(first,yn)) return (tn,yn2) end end diff --git a/src/iterators.jl b/src/iterators.jl index 988d0ef02..964b8746c 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -1,40 +1,17 @@ -# this wraps any iterator (method) returning pairs (t,y) in a dense -# output and also covers the reverse time integration -function solver(F, y0::AbstractArray, t0; - tstop = Inf, - tspan = [tstop], - method = bt_feuler, - stopevent = (t,y)->false, - kargs...) +""" +Generic done method, some steppers may implement their own versions. +""" - if tstop >= t0 - # forward time integration - sol = method(F,y0,t0; tstop = tstop, kargs...) - dense_sol = dense(F, y0, t0, sol; tspan = tspan, kargs...) - return dense_sol - else - # m3: couldn't this be handled at a lower level (in the - # iterator) by adjusting the direction of the time step. This - # seems a bit like a band-aid. What do I do if I want to use - # the iterator? Maybe cleanest to add a time-comparison - # function `before(t1,t2,dt)` and use that in all comparisons? - # reverse time integration - F_reverse(t,y) = -F(2*t0-t,y) - reverse_output(t,y)=(2*t0-t,y) - sol = method(F_reverse,y0,t0; - tstop = 2*t0 -tstop, - tspan = 2*t0.-tspan, - kargs...) - dense_sol = dense(F_reverse, y0, t0, sol; - tspan = 2*t0-tspan, - stopevent = (t,y)->stopevent(2*t0-t,y), - kargs...) - - return imap(x->reverse_output(x...),dense_sol) +function done(s::Solver, state::AbstractState) + if state.step.t >= s.options.tstop + return true + elseif state.dt < s.options.minstep + warn("minstep reached.") + return true + elseif state.iters >= s.options.maxiters + warn("Maximum number of iterations ($(Int(s.options.maxiters))) reached, consider setting a larger maxiter.") + return true end - + return false end - - -solver(F,y0,t0;kargs...)=solver((t,y)->[F(t,y[1])],[y0],t0;kargs...) diff --git a/src/ode23s.jl b/src/ode23s.jl index 7be4610c0..840a88de0 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -26,20 +26,27 @@ order(ModifiedRosenbrockStepper) = 2 # define the set of ODE problems with which this stepper can work solve(ode :: ExplicitODE, stepper :: ModifiedRosenbrockStepper, options :: Options) = - Solution{ModifiedRosenbrockStepper}(ode,stepper,options) + Solver{ModifiedRosenbrockStepper}(ode,stepper,options) # lower level interface (iterator) +""" +The state for the Rosenbrock stepper -# TODO: should we re-use Step or should we just put t,y,dy explicitly -# there? +- step: Last successful step +- F1,F2: Work arrays for storing the intermediate values of y' +- J: Jacobian +- iters: Number of successful steps made + +""" type RosenbrockState{T,S} <: AbstractState - step :: Step{T,S} - dt :: T - F1 :: S; F2 :: S + step ::Step{T,S} + dt ::T + F1 ::S + F2 ::S J # :: ? - iters :: Int + iters::Int end @@ -53,7 +60,7 @@ function show(io::IO, state :: RosenbrockState) end -function start(s :: Solution{ModifiedRosenbrockStepper}) +function start(s :: Solver{ModifiedRosenbrockStepper}) t = s.ode.t0 dt = s.options.initstep y = s.ode.y0 @@ -76,22 +83,7 @@ function start(s :: Solution{ModifiedRosenbrockStepper}) end -function done(s :: Solution{ModifiedRosenbrockStepper}, - state :: RosenbrockState) - if state.step.t >= s.options.tstop - return true - elseif state.dt < s.options.minstep - warn("minstep reached.") - return true - elseif state.iters >= s.options.maxiters - warn("Maximum number of iterations ($(Int(s.options.maxiters))) reached, consider setting a larger maxiter.") - return true - end - return false -end - - -function next(s :: Solution{ModifiedRosenbrockStepper}, +function next(s :: Solver{ModifiedRosenbrockStepper}, state :: RosenbrockState) stepper = s.stepper diff --git a/src/rk.jl b/src/rk.jl index de1bc1214..cb515943c 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -5,11 +5,19 @@ include("tableaus.jl") # intermediate level interface -# m3: this seems a bit an odd name. Tableaus are useful not just for -# RK. So either move this to tableaus.jl or rename it. -immutable TableauStepper{Step,T} <: AbstractStepper - tableau :: Tableau # m3: this is an abstract type. Is that ok? - function TableauStepper(tab) +""" + +A general Runge-Kutta stepper (it cen represent either, a fixed step +or an adaptive step algorithm). + +""" +immutable RKStepper{Step,T} <: AbstractStepper + # m3: this is an abstract type. Is that ok? + + # pwl: I think this is fine, otherwise we would have to add even + # more type parameters to RKStepper + tableau::Tableau + function RKStepper(tab) if Step == :fixed && isadaptive(tab) error("Cannot construct a fixed step method from an adaptive step tableau") elseif Step == :adaptive && !isadaptive(tab) @@ -20,79 +28,78 @@ immutable TableauStepper{Step,T} <: AbstractStepper end -typealias TableauStepperFixed{T} TableauStepper{:fixed, T} -typealias TableauStepperAdaptive{T} TableauStepper{:adaptive,T} +typealias RKStepperFixed{T} RKStepper{:fixed, T} +typealias RKStepperAdaptive{T} RKStepper{:adaptive,T} -order(stepper :: TableauStepper) = minimum(order(stepper.tableau)) +order(stepper::RKStepper) = minimum(order(stepper.tableau)) # TODO: possibly handle the initial stepsize and the tableau conversion here? -solve{S,T}(ode :: ExplicitODE, stepper :: TableauStepper{S,T}, options :: Options{T}) = - Solution{TableauStepper{S,T}}(ode,stepper,options) +solve{S,T}(ode::ExplicitODE, stepper::RKStepper{S,T}, options::Options{T}) = + Solver{RKStepper{S,T}}(ode,stepper,options) # lower level interface # explicit RK stepper -# m3: rename tmp->work (This is what these arrays are called in classic codes, +""" -type RKTempArrays{T} # m3: RKWorkArrays - y :: T - ynew :: T - yerr :: T - ks :: Vector{T} +Pre allocated arrays to store temporary data. Used only by +Runge-Kutta stepper. + +""" +type RKWorkArrays{T} + y ::T + ynew::T + yerr::T + ks ::Vector{T} end -type TableauState{T,S} - step :: Step{T,S} - dt :: T - tmp :: RKTempArrays{S} - timeout :: Int +""" +State for the Runge-Kutta stepper. +""" +type RKState{T,S} <: AbstractState + step ::Step{T,S} + dt ::T + work ::RKWorkArrays{S} + timeout ::Int + # This is not currently incremented with each step + iters ::Int end -function show(io :: IO, state :: TableauState) +function show(io::IO, state::RKState) show(state.step) println("dt = $(state.dt)") println("timeout = $(state.timeout)") - println("tmp = $(state.tmp)") + println("work = $(state.work)") end -function start{S,T}(s :: Solution{TableauStepper{S,T}}) +function start{S,T}(s::Solver{RKStepper{S,T}}) t0, dt0, y0 = s.ode.t0, s.options.initstep, s.ode.y0 # TODO: we should do the Butcher table conversion somewhere lk = lengthks(s.stepper.tableau) - tmp = RKTempArrays(zero(y0), # y - zero(y0), # ynew - zero(y0), # yerr - Array(typeof(y0), lk)) # ks -# m3: above to zeros(typeof(y0), lk) and remove below loop + work = RKWorkArrays(zero(y0), # y + zero(y0), # ynew + zero(y0), # yerr + Array(typeof(y0), lk)) # ks + + # we have to allocate each component separately for i = 1:lk - tmp.ks[i] = zero(y0) + work.ks[i]=zero(y0) end - # pre-initialize tmp.ks[1] - s.ode.F!(t0,y0,tmp.ks[1]) + # pre-initialize work.ks[1] + s.ode.F!(t0,y0,work.ks[1]) - step = Step(t0,deepcopy(y0),deepcopy(tmp.ks[1])) + step = Step(t0,deepcopy(y0),deepcopy(work.ks[1])) timeout = 0 # for step control - return TableauState(step,dt0,tmp,timeout) -end - - -function done{S,T}(s :: Solution{TableauStepper{S,T}}, state :: TableauState) - if state.step.t >= s.options.tstop - return true - elseif state.dt < s.options.minstep - warn("minstep reached") - return true - end - return false + return RKState(step,dt0,work,timeout,0) end @@ -101,26 +108,30 @@ end ##################### -function next{T}(s :: Solution{TableauStepperFixed{T}}, state :: TableauState) +function next{T}(s::Solver{RKStepperFixed{T}}, state::RKState) step = state.step - tmp = state.tmp + work = state.work dof = length(step.y) b = s.stepper.tableau.b dt = min(state.dt,s.options.tstop-step.t) # m3: why is it necessary to copy here and then copy back below? - tmp.ynew[:] = step.y + + # pwl: to my understanding calc_next_k! needs the starting value + # (step.y) but work.ynew is changing in the inner loop, so we need + # two distinct arrays, both starting as step.y. + + copy!(work.ynew,step.y) for k=1:length(b) - # m3: here write in tmp not state.tmp: - calc_next_k!(state.tmp, k, s.ode, s.stepper.tableau, step, dt) + calc_next_k!(work, k, s.ode, s.stepper.tableau, step, dt) for d=1:dof - tmp.ynew[d] += dt * b[k]*tmp.ks[k][d] + work.ynew[d] += dt * b[k]*work.ks[k][d] end end step.t += dt - step.y[:] = tmp.ynew + copy!(step.y,work.ynew) return ((step.t,step.y), state) end @@ -130,17 +141,17 @@ end ######################## -function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauState) +function next{T}(sol::Solver{RKStepperAdaptive{T}}, state::RKState) const timeout_const = 5 # the initial values dt = state.dt # dt is the previous stepisze, it is - # modified inside the loop + # modified inside the loop timeout = state.timeout - - tmp = state.tmp + work = state.work step = state.step + tableau = sol.stepper.tableau # The while loop continues until we either find a stepsize which # leads to a small enough error or the stepsize reaches @@ -151,21 +162,15 @@ function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauSta while true - # Do one step (assumes ks[1]==f0). After calling tmp.ynew + # Do one step (assumes ks[1]==f0). After calling work.ynew # holds the new step. - # TODO: return ynew instead of passing it as tmp.ynew? - err, newdt, timeout = - rk_trial_step!(tmp, sol.ode, step, sol.stepper.tableau, dt, timeout, sol.options) + # TODO: return ynew instead of passing it as work.ynew? - # m3: I liked my setup better with: - # rk_embedded_step!(ytrial, yerr, ks, ytmp, y, fn, t, dt, dof, btab) - # # Check error and find a new step size: - # err, newdt, timeout = stepsize_hw92!(dt, tdir, y, ytrial, yerr, order, timeout, - # dof, abstol, reltol, maxstep, norm) - # - # It that way it's clearer what's done, plus the - # rk_trial_step! function is only used once. + # work.y and work.yerr and work.ks are updated after this step + rk_embedded_step!(work, sol.ode, tableau, step, dt) + # changes work.yerr + err, newdt, timeout = stepsize_hw92!(work, step, tableau, dt, timeout, sol.options) # trim again in case newdt > dt newdt = min(newdt, sol.options.tstop-state.step.t) @@ -185,13 +190,13 @@ function next{T}(sol :: Solution{TableauStepperAdaptive{T}}, state :: TableauSta # preload ks[1] for the next step if isFSAL(sol.stepper.tableau) - tmp.ks[1][:] = tmp.ks[end] + copy!(work.ks[1],work.ks[end]) else - sol.ode.F!(step.t+dt, tmp.ynew, tmp.ks[1]) + sol.ode.F!(step.t+dt, work.ynew, work.ks[1]) end # Swap bindings of y and ytrial, avoids one copy - step.y, tmp.ynew = tmp.ynew, step.y + step.y, work.ynew = work.ynew, step.y # Update state with the data from the step we have just # made: @@ -209,72 +214,48 @@ end # Lower level algorithms # ########################## -# m3: docs (or better remove) -function rk_trial_step!(tmp :: RKTempArrays, - ode :: ExplicitODE, - last_step :: Step, - tableau :: TableauRKExplicit, - dt, - timeout, - options :: Options) - - # tmp.y and tmp.yerr and tmp.ks are updated after this step - rk_embedded_step!(tmp, ode, tableau, last_step, dt) - - # changes tmp.yerr (via in place update) - err, newdt, timeout = stepsize_hw92!(tmp, last_step, tableau, dt, timeout, options) - - return err, newdt, timeout -end - - -function rk_embedded_step!(tmp :: RKTempArrays, - ode :: ExplicitODE, - tableau :: Tableau, - last_step :: Step, +function rk_embedded_step!(work ::RKWorkArrays, + ode ::ExplicitODE, + tableau ::Tableau, + last_step ::Step, dt) -# m3: update docs - # Does one embedded R-K step updating ytrial, yerr and ks. - # - # Assumes that ks[:,1] is already calculated! - # - # Modifies tmp.y, tmp.ynew and tmp.yerr only + # Does one embedded R-K step updating work.ynew, work.yerr and work.ks. + # Assumes that work.ks[:,1] is already calculated! + # Modifies work.y, work.ynew and work.yerr only y = last_step.y dof = length(y) b = tableau.b - # m3: not good: this first creates an array, then copies it. Use - # fill!(A, zero(y[1])) - tmp.ynew[:] = zero(y) - tmp.yerr[:] = zero(y) + fill!(work.ynew, zero(eltype(y))) + fill!(work.yerr, zero(eltype(y))) for s=1:lengthks(tableau) - # we skip the first step beacause we assume that tmp.ks[1] is + # we skip the first step beacause we assume that work.ks[1] is # already computed if s > 1 - calc_next_k!(tmp, s, ode, tableau, last_step, dt) + calc_next_k!(work, s, ode, tableau, last_step, dt) end for d=1:dof - tmp.ynew[d] += b[1,s]*tmp.ks[s][d] - tmp.yerr[d] += b[2,s]*tmp.ks[s][d] + work.ynew[d] += b[1,s]*work.ks[s][d] + work.yerr[d] += b[2,s]*work.ks[s][d] end end for d=1:dof - tmp.yerr[d] = dt*(tmp.ynew[d]-tmp.yerr[d]) - tmp.ynew[d] = y[d] + dt*tmp.ynew[d] + work.yerr[d] = dt*(work.ynew[d]-work.yerr[d]) + work.ynew[d] = y[d] + dt*work.ynew[d] end end -function stepsize_hw92!{T}(tmp, - last_step :: Step, - tableau :: TableauRKExplicit, - dt :: T, +function stepsize_hw92!{T}(work, + last_step ::Step, + tableau ::TableauRKExplicit, + dt ::T, timeout, - options :: Options) + options ::Options) # Estimates the error and a new step size following Hairer & # Wanner 1992, p167 (with some modifications) # @@ -287,8 +268,6 @@ function stepsize_hw92!{T}(tmp, # - allow component-wise reltol and abstol? # - allow other norms -# m3: shouldn't this use options.norm? - ord = minimum(order(tableau)) timout_after_nan = 5 fac = [T(8//10), T(9//10), T(1//4)^(1//(ord+1)), T(38//100)^(1//(ord+1))][1] @@ -300,17 +279,18 @@ function stepsize_hw92!{T}(tmp, for d=1:dof # if outside of domain (usually NaN) then make step size smaller by maximum - if isoutofdomain(tmp.y[d]) + if options.isoutofdomain(work.y[d]) return T(10), dt*facmin, timout_after_nan end y0 = last_step.y[d] # TODO: is this supposed to be the last successful step? - y1 = tmp.ynew[d] # the approximation to the next step - sci = (options.abstol + options.reltol*max(norm(y0),norm(y1))) - tmp.yerr[d] = tmp.yerr[d]/sci # Eq 4.10 + y1 = work.ynew[d] # the approximation to the next step + sci = (options.abstol + options.reltol*max(options.norm(y0),options.norm(y1))) + work.yerr[d] = work.yerr[d]/sci # Eq 4.10 end - err = norm(tmp.yerr) # Eq. 4.11 + # TOOD: should we use options.norm here as well? + err = norm(work.yerr) # Eq. 4.11 newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1//(ord+1)))) # Eq 4.13 modified if timeout > 0 @@ -322,23 +302,22 @@ function stepsize_hw92!{T}(tmp, end -# For clarity we pass the RKTempArrays part of the state separately, +# For clarity we pass the RKWorkArrays part of the state separately, # this is the only part of state that can be changed here -function calc_next_k!(tmp :: RKTempArrays, - i :: Int, - ode :: ExplicitODE, - tableau :: Tableau, - last_step :: Step, +function calc_next_k!(work ::RKWorkArrays, + i ::Int, + ode ::ExplicitODE, + tableau ::Tableau, + last_step ::Step, dt) dof = length(last_step.y) t, a, c = last_step.t, tableau.a, tableau.c - tmp.y[:] = last_step.y + copy!(work.y,last_step.y) for j=1:i-1 for d=1:dof - tmp.y[d] += dt * tmp.ks[j][d] * a[i,j] + work.y[d] += dt * work.ks[j][d] * a[i,j] end - # tmp.y[:] += dt*tmp.ks[j]*a[i,j] end - ode.F!(t + c[i]*dt, tmp.y, tmp.ks[i]) + ode.F!(t + c[i]*dt, work.y, work.ks[i]) end diff --git a/src/types.jl b/src/types.jl index e21cb98f1..c242ae3dc 100644 --- a/src/types.jl +++ b/src/types.jl @@ -1,9 +1,7 @@ abstract AbstractODE - -# m3: does this need type-parameters for t0,y0: Et, Eyf? """ -Explicitly defined ODE of form dy = F(t,y) +Explicitly defined ODE of form dy = F(t,y). Fields: @@ -11,32 +9,55 @@ Fields: - F!: ODE function `F!(t,y,dy)` which modifies `dy` in-place - jac!: TODO """ -type ExplicitODE <: AbstractODE - t0; y0 - F! :: Function - jac! :: Function +immutable ExplicitODE{T,S} <: AbstractODE + t0 ::T + y0 ::S + F! ::Function + jac!::Function +end + +# TODO: change (t,y,J)->fdjacobian(F!,t,y,J) to fdjacobian!(F!) +ExplicitODE{T,S}(t0::T, y0::S, F!::Function; + jac!::Function = (t,y,J)->fdjacobian!(F!,t,y,J)) = + ExplicitODE{T,S}(t0,y0,F!,jac!) + + +""" +This type is not yet implemented, but will serve as an implicitly +defined ODE (i.e. ODE of the form F(t,y,y')=0. +""" +immutable ImplicitODE{T,S} <: AbstractODE end -# m3: how about you define (a bit prematurely) an ImplicitODE type too? """ Convert a out-of-place explicitly defined ODE function to an in-place function. Note, this does not help with memory allocations. """ -function explicit_ineff(t0,y0,F;jac = (t,y)->fdjacobian(F,t,y)) +function explicit_ineff(t0, y0, F::Function; + jac = (t,y)->fdjacobian(F,t,y)) function F!(t,y,dy) # this is why we can't handle a scalar type any more - dy[:] = F(t,y) + copy!(dy,F(t,y)) end function jac!(t,y,J) - J[:] = jac(t,y) + copy!(J,jac(t,y)) end return ExplicitODE(t0,y0,F!,jac!) end -# m3: needs docs! + +""" +The abstract type of the actual algorithm to solve an ODE. +""" abstract AbstractStepper + + +""" +AbstractState keeps the temporary data (state) for the iterator +Solver{::AbstractStepper}. +""" abstract AbstractState # m3: @@ -48,16 +69,20 @@ abstract AbstractState # - if find `Step` a bit confusing name, in particular combined with # AbstractStepper, but not sure what's better. -# this might suffice for some solvers +""" + +Holds a value of a function and its derivative at time t. This is +usually used to store the solution of an ODE at particular times. + +""" type Step{T,S} - t :: T - y :: S - dy :: S + t ::T + y ::S + dy::S end -# for debugging -function show(io::IO, state :: Step) +function show(io::IO, state::Step) println("t =$(state.t)") println("y =$(state.y)") println("dy =$(state.dy)") @@ -70,19 +95,20 @@ will fill the structure with default values. General: -- initstep :: T initial step -- tstop :: T end integration time -- reltol :: T relative tolerance (m3: could this be a vector?) -- abstol :: T absolute tolerance (m3: could this be a vector?) -- minstep :: T minimal allowed step -- maxstep :: T maximal allowed step +- initstep ::T initial step +- tstop ::T end integration time +- reltol ::T relative tolerance (m3: could this be a vector?) +- abstol ::T absolute tolerance (m3: could this be a vector?) +- minstep ::T minimal allowed step +- maxstep ::T maximal allowed step - norm function to calculate the norm in step control -- maxiters :: T maximum number of steps +- maxiters ::T maximum number of steps +- isoutofdomain::Function checks if the solution became non-numeric (NaN or Inf) Dense output options: -- tspan :: Vector{T} output times -- points :: Symbol which points are returned: `:specified` only the +- tspan ::Vector{T} output times +- points ::Symbol which points are returned: `:specified` only the ones in tspan or `:all` which includes also the step-points of the solver. - stopevent Stop integration at a zero of this function - roottol TODO @@ -90,31 +116,34 @@ Dense output options: """ immutable Options{T} # m3: T->Et # stepper options - initstep :: T - tstop :: T - reltol :: T - abstol :: T - minstep :: T - maxstep :: T - norm :: Function - maxiters :: T + initstep ::T + tstop ::T + reltol ::T + abstol ::T + minstep ::T + maxstep ::T + norm ::Function + maxiters ::T + + isoutofdomain::Function # dense output options - tspan :: Vector{T} - points :: Symbol -# m3: I think this should be an array of functions. Depending on some -# flag each one returns, the iteration stops or continues. Rename it -# to eventfns. I like matlabs interface. -# [value,isterminal,direction] = myEventsFcn(t,y,dy) -# The value gets stored. - stopevent :: Function - roottol :: T + tspan ::Vector{T} + points ::Symbol + + # m3: I think this should be an array of functions. Depending on some + # flag each one returns, the iteration stops or continues. Rename it + # to eventfns. I like matlabs interface. + # [value,isterminal,direction] = myEventsFcn(t,y,dy) + # The value gets stored. + stopevent::Function + roottol ::T function Options(; tstop = T(Inf), - tspan = [tstop], # m3 maybe [tstart, tstop] - reltol = eps(T)^(1/3)/10, - abstol = eps(T)^(1/2)/10, + tspan = T[tstop], + reltol = eps(T)^T(1//3)/10, + abstol = eps(T)^T(1//2)/10, minstep = 10*eps(T), maxstep = 1/minstep, # TODO: we need a better guess here, possibly @@ -122,46 +151,37 @@ immutable Options{T} # m3: T->Et initstep = max(min(reltol,abstol,maxstep),minstep), norm = Base.norm, maxiters = T(Inf), - points = :all, + points = :all, stopevent = (t,y)->false, - roottol = eps(T)^(1/3), + roottol = eps(T)^T(1//3), + isoutofdomain = isnan, kargs...) if all(points .!= [:specified,:all]) error("Option points = $points is not supported, use :specified or :all") end - new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,maxiters,sort(tspan),points,stopevent,roottol) + #TODO iterate over fields here? + new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,maxiters,isoutofdomain,sort(tspan),points,stopevent,roottol) end end - function show{T}(io::IO, opts :: Options{T}) - # m3: iterate over fields. - println("") - println("Options{$T}") - println("tstop = $(opts.tstop)") - println("reltol = $(opts.reltol)") - println("abstol = $(opts.abstol)") - println("minstep = $(opts.minstep)") - println("maxstep = $(opts.maxstep)") - println("initstep = $(opts.initstep)") - println("norm = $(opts.norm)") - println("tspan = $(opts.tspan)") - println("points = $(opts.points)") - println("stopevent= $(opts.stopevent)") - println("roottol = $(opts.roottol)") + for name in fieldnames(opts) + @printf("%-20s = %s\n",name,getfield(opts,name)) + end end -# m3: -# - I don't like the name `Solution` as this is just the problem -# specification. Maybe `Problem` or `SolutionIterator`? -# - in my idea-write-up I added `tend` to this type. I quite like -# this as it means that tspan is just about outputs and t0, tend is -# about the integration interval. But maybe this is over-tidy. - -# solution is a collection of an equation, an integration method -# (stepper) and its options -type Solution{T<:AbstractStepper} +""" + +This is an iterable type, each call to next(...) produces a next step +of a numerical solution to an ODE. + +- ode: is the prescrived ode, along with the initial data +- stepper: the algorithm used to produce subsequent steps +- options: options passed to the stepper + +""" +type Solver{T<:AbstractStepper} ode :: AbstractODE stepper :: T options :: Options @@ -178,8 +198,8 @@ solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't s # normally we return the working array, which changes at each step and # expect the user to copy it if necessary. In order for collect to # return the expected result we need to copy the output at each step. -function collect{T}(t::Type{T}, s::Solution) - if any(s.options.tspan .== Inf) +function collect{T,S}(t::Type{Tuple{T,S}}, s::Solver) + if maximum(s.options.tspan) == T(Inf) error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") end collect(t, imap(x->deepcopy(x),s)) @@ -211,3 +231,18 @@ function fdjacobian(F, t, x::Vector) end return dFdx end + +function fdjacobian!{T}(F!, t, x::Vector{T}, J::Array{T,2}) + ftx = similar(x) + ftx2= similar(x) + dx = similar(x) + F!(t,x,ftx) + lx = max(length(x),1) + dFdx = zeros(eltype(x), lx, lx) + for j = 1:lx + # The 100 below is heuristic + dx[j] = (x[j] .+ (x[j]==0))./100 + F!(t,x+dx,ftx2) + J[:,j] = (ftx2-ftx)./dx[j] + end +end diff --git a/test/interface-tests.jl b/test/interface-tests.jl index 0dd9ee6de..a0693617a 100644 --- a/test/interface-tests.jl +++ b/test/interface-tests.jl @@ -36,7 +36,9 @@ Base.abs(y::CompSol) = norm(y, 2.) # TODO not needed anymore once https://github Base.abs2(y::CompSol) = norm(y, 2.) Base.zero(::Type{CompSol}) = CompSol(complex(zeros(2,2)), 0., 0.) -ODE.isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) +# TODO: This is now an option and has to be passed to the +# solvers. Looks ugly and a kind of a pain to handle. +isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) # Because the new RK solvers wrap scalars in an array and because of # https://github.com/JuliaLang/julia/issues/11053 these are also needed: @@ -46,7 +48,6 @@ ODE.isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) .*(s::Real, y1::CompSol) = y1*s ./(y1::CompSol, s::Real) = CompSol(y1.rho/s, y1.x/s, y1.p/s) - ################################################################################ # define RHSs of differential equations @@ -70,15 +71,24 @@ y0 = CompSol(complex(rho0), 2., 1.) # solve ODEs endt = 2*pi; -t,y1 = ODE.ode45((t,y)->rhs(t, y, delta0, V0, g0), y0, [0., endt]) # used as reference +F(t,y)=rhs(t, y, delta0, V0, g0) +t,y1 = ODE.ode45(F, y0, [0., endt], + reltol=1e-8,abstol=1e-5, + isoutofdomain = isoutofdomain) # used as reference + println("Testing interface for scalar-like state... ") for solver in solvers println("Testing $solver") # these only work with some Array-like interface defined: - if solver in [ODE.ode23s] # , ODE.ode4s_s, ODE.ode4s_kr + if solver in solvers # [ODE.ode23s] # , ODE.ode4s_s, ODE.ode4s_kr continue end - t,y2 = solver((t,y)->rhs(t, y, delta0, V0, g0), y0, linspace(0., endt, 500),abstol=1e-8,reltol=1e-5,initstep=endt) + tout = collect(linspace(0., endt, 5)) + t,y2 = solver(F, y0, tout, + abstol=1e-8,reltol=1e-5, + initstep=1e-4, + isoutofdomain = isoutofdomain) + break @test norm(y1[end]-y2[end])<0.1 end println("ok.") From b6e2b555ddcff54c3d00cebbcd15caa38abb65ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sat, 30 Apr 2016 15:29:10 +0200 Subject: [PATCH 017/113] Round two of fixes --- src/dense.jl | 21 +++-- src/helpers.jl | 8 +- src/interfaces.jl | 213 +++++++++++++++++++++++----------------------- src/types.jl | 51 ++++++++--- test/runtests.jl | 1 - 5 files changed, 167 insertions(+), 127 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index aab8f001b..28c2fdb67 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -119,9 +119,9 @@ function next(s::Solver{DenseStepper}, state::DenseState) # we haven't reached t_goal yet (t1[F(t,y[1])], [y0], args...; kargs...) """ diff --git a/src/interfaces.jl b/src/interfaces.jl index f30ebb7be..58d341048 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -1,111 +1,112 @@ -const steppers = - [ - ( :ode23s, :ModifiedRosenbrockStepper, []), - ( :ode1, :RKStepperFixed, [bt_feuler]), - ( :ode2_midpoint, :RKStepperFixed, [bt_midpoint]), - ( :ode2_heun, :RKStepperFixed, [bt_heun]), - ( :ode4, :RKStepperFixed, [bt_rk4]), - ( :ode21, :RKStepperAdaptive, [bt_rk21]), - ( :ode23, :RKStepperAdaptive, [bt_rk23]), - ( :ode45_fe, :RKStepperAdaptive, [bt_rk45]), - ( :ode45_dp, :RKStepperAdaptive, [bt_dopri5]), - ( :ode78, :RKStepperAdaptive, [bt_feh78]) -] - - - -# TODO: there is a lot of useless conversions going on here - -# m3: is meta-programming really needed here? Why not do it like it -# was done before? For instance in runge_kutta.jl we got: -# ode23(fn, y0, tspan; kwargs...) = oderk_adapt(fn, y0, tspan, bt_rk23; kwargs...) - - -for (name,stepper,params) in steppers - @eval begin - function ($name){T<:Number,S<:AbstractVector}( - F, y0 :: S, t0 :: T; - jacobian = (t,y)->fdjacobian(F, t, y), - stopevent = (t,y)->false, - tstop = T(Inf), - tspan = [tstop], - # we need these options explicitly for the dtinit - reltol = eps(T)^T(1//3)/10, - abstol = eps(T)^T(1//2)/10, - initstep = dtinit(F, y0, t0, reltol, abstol; tstop=tstop, order=order($stepper)), - kargs...) - - step = ($stepper){T}($params...) - - # handle different directions of time integration - if all(tspan .>= t0) - # forward time integration - ode = explicit_ineff(t0,y0,F,jac=jacobian) - opts = Options{T}(; - tstop = tstop, - tspan = tspan, - stopevent = stopevent, - reltol = reltol, - abstol = abstol, - initstep = initstep, - kargs...) - elseif all(tspan .<= t0) - # m3: again, seems like a band-aid - # reverse time integration - F_reverse(t,y) = -F(2*t0-t,y) - # TODO: is that how the jacobian changes? - jac_reverse(t,y) = -jacobian(2*t0-t,y) - ode = explicit_ineff(t0,y0,F_reverse,jac=jac_reverse) - opts = Options{T}(; - tstop = 2*t0-tstop, - tspan = 2*t0.-tspan, - stopevent = (t,y)->stopevent(2*t0-t,y), - reltol = reltol, - abstol = abstol, - initstep = initstep, - kargs...) - else - # tspan stretches to the left and to the right of t0 - return ([t0],[y0]) - end - - solution = collect(Tuple{T,S},dense(solve(ode,step,opts))) - # solution = collect(Tuple{T,S},solve(ode,step,opts)) - n = length(solution) - - # return solution - - # convert a list of pairs to a pair of arrays - # TODO: leave it out as a list of pairs? - tn = Array(T,n) - yn = Array(typeof(y0),n) - if all(tspan .>= t0) - tn[:] = [x[1] for x in solution] - else - tn[:] = [2*t0-x[1] for x in solution] - end - yn[:] = [x[2] for x in solution] - - return (tn,yn) - end - - ($name){T<:Number}(F, y0::AbstractVector, t0::AbstractVector{T}; kargs...) = - ($name)(F,y0,t0[1]; - tstop = t0[end], - tspan = t0, - points = :specified, - kargs...) - - # m3: could this go into the low-level API? - function ($name)(F, y0::Number, t0; kargs...) - # TODO: this is slow! - tn, yn = ($name)((t,y)->[F(t,y[1])], [y0], t0; kargs...) - yn2 = Array(typeof(y0),length(yn)) - copy!(yn2,map(first,yn)) - return (tn,yn2) - end +""" + +We assume that the initial data y0 is given at tspan[1], and that +tspan[end] is the last integration time. + +""" +function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; + jacobian::Function = (t,y)->fdjacobian(F, t, y), + # we need these options explicitly for the dtinit + reltol::T = eps(T)^T(1//3)/10, + abstol::T = eps(T)^T(1//2)/10, + initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper)), + kargs...) + + # TODO: any ideas on how we could improve the interface so that we + # don't have to use the ugly call to dtinit as a default? + + t0 = tspan[1] + + # construct a solver + ode = explicit_ineff(t0,y0,F,jac=jacobian) + + opts = Options{T}(; + tspan = tspan, + reltol = reltol, + abstol = abstol, + initstep = initstep, + kargs...) + solver = solve(ode,stepper,opts) + + # handle different directions of time integration + if issorted(tspan) + # do nothing, we are already set with the solver + solution = collect(dense(solver)) + elseif issorted(reverse(tspan)) + # Reverse the time direction if necessary. dense() only works + # for positive time direction. + + # TODO: still ugly but slightly less bandaid-like then the + # previous solution + solution = map(ty->(2*t0-ty[1],ty[2]),collect(dense(reverse_time(solver)))) + else + warn("Unsorted output times are not supported") + return ([t0],[y0]) end + + n = length(solution) + + # convert a list of pairs to a pair of arrays + # TODO: leave it out as a list of pairs? + tn = Array(T,n) + yn = Array(typeof(y0),n) + + for (n,(t,y)) in enumerate(solution) + tn[n] = t + yn[n] = isa(y0,Number) ? y[1] : y + end + + return (tn,yn) end +ode23s{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,ModifiedRosenbrockStepper{T}(); kargs...) +ode1{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_feuler); kargs...) +ode2_midpoint{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_midpoint); kargs...) +ode2_heun{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_heun); kargs...) +ode4{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_rk4); kargs...) +ode21{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_rk21); kargs...) +ode23{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_rk23); kargs...) +ode45_fe{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_rk45); kargs...) +ode45_dp{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_dopri5); kargs...) +ode78{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_feh78); kargs...) const ode45 = ode45_dp + + +""" + +A nasty hack to convert a solver with negative time direction into a +solver with positive time direction. This is necessary as negative +time direction is not supported by steppers (including the dense +output). This only works for ExplicitODE. + +""" +function reverse_time(sol::Solver) + ode, options, stepper = sol.ode, sol.options, sol.stepper + + t0 = ode.t0 + y0 = ode.y0 + + # TODO: improve the implementation + function F_reverse!(t,y,dy) + ode.F!(2*t0-t,y,dy) + dy[:]=-dy + end + + # TODO: is that how the jacobian changes? + function jac_reverse!(t,y,J) + ode.jac!(2*t0-t,y,J) + J[:]=-J + end + + # ExplicitODE is immutable + ode_reversed = ExplicitODE(t0,y0,F_reverse!,jac_reverse!) + stopevent = options.stopevent + + # TODO: we are modifying options here, should we construct new + # options insted? + options.tstop = 2*t0-options.tstop + options.tspan = reverse(2*t0.-options.tspan) + options.stopevent = (t,y)->stopevent(2*t0-t,y) + return solve(ode_reversed,stepper,options) +end diff --git a/src/types.jl b/src/types.jl index c242ae3dc..b1f900740 100644 --- a/src/types.jl +++ b/src/types.jl @@ -17,28 +17,30 @@ immutable ExplicitODE{T,S} <: AbstractODE end # TODO: change (t,y,J)->fdjacobian(F!,t,y,J) to fdjacobian!(F!) -ExplicitODE{T,S}(t0::T, y0::S, F!::Function; - jac!::Function = (t,y,J)->fdjacobian!(F!,t,y,J)) = - ExplicitODE{T,S}(t0,y0,F!,jac!) - +ExplicitODE{T,S<:AbstractVector}(t0::T, y0::S, F!::Function; + jac!::Function = (t,y,J)->fdjacobian!(F!,t,y,J)) = + ExplicitODE{T,S}(t0,y0,F!,jac!) """ + This type is not yet implemented, but will serve as an implicitly defined ODE (i.e. ODE of the form F(t,y,y')=0. + """ immutable ImplicitODE{T,S} <: AbstractODE end """ + Convert a out-of-place explicitly defined ODE function to an in-place function. Note, this does not help with memory allocations. + """ -function explicit_ineff(t0, y0, F::Function; +function explicit_ineff(t0, y0::AbstractVector, F::Function; jac = (t,y)->fdjacobian(F,t,y)) function F!(t,y,dy) - # this is why we can't handle a scalar type any more copy!(dy,F(t,y)) end function jac!(t,y,J) @@ -47,16 +49,37 @@ function explicit_ineff(t0, y0, F::Function; return ExplicitODE(t0,y0,F!,jac!) end +# A temporary solution for handling scalars, should be faster then the +# previous implementation. Should be used only at the top level +# interface. This function cheats by converting scalar functions F +# and jac to vector functions F! and jac!. Still, solving this ODE +# will result in a vector of length one result, so additional external +# conversion is necessary. +function explicit_ineff(t0, y0::Number, F::Function; + jac = (t,y)->fdjacobian(F,t,y)) + function F!(t,y,dy) + dy[1]=F(t,y[1]) + end + function jac!(t,y,J) + J[1]=jac(t,y[1]) + end + return ExplicitODE(t0,[y0],F!,jac!) +end + """ + The abstract type of the actual algorithm to solve an ODE. + """ abstract AbstractStepper """ + AbstractState keeps the temporary data (state) for the iterator Solver{::AbstractStepper}. + """ abstract AbstractState @@ -90,6 +113,7 @@ end """ + Options for ODE solvers. This type has a key-word constructor which will fill the structure with default values. @@ -114,7 +138,7 @@ Dense output options: - roottol TODO """ -immutable Options{T} # m3: T->Et +type Options{T} # m3: T->Et # stepper options initstep ::T tstop ::T @@ -140,8 +164,8 @@ immutable Options{T} # m3: T->Et roottol ::T function Options(; - tstop = T(Inf), - tspan = T[tstop], + tspan = T[Inf], + tstop = tspan[end], reltol = eps(T)^T(1//3)/10, abstol = eps(T)^T(1//2)/10, minstep = 10*eps(T), @@ -181,7 +205,7 @@ of a numerical solution to an ODE. - options: options passed to the stepper """ -type Solver{T<:AbstractStepper} +type Solver{T<:AbstractStepper} # TODO: immutable? ode :: AbstractODE stepper :: T options :: Options @@ -205,6 +229,13 @@ function collect{T,S}(t::Type{Tuple{T,S}}, s::Solver) collect(t, imap(x->deepcopy(x),s)) end +function collect(s::Solver) + if maximum(s.options.tspan) == Inf + error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") + end + collect(imap(x->deepcopy(x),s)) +end + # some leftovers from the previous implementation diff --git a/test/runtests.jl b/test/runtests.jl index 5ffc45b33..5e4984432 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,3 @@ -include("../src/ODE.jl") using ODE using Base.Test From d6129538ff5fcfc141466be7af23effaae26b940 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 23 Jun 2016 22:12:25 -0400 Subject: [PATCH 018/113] Graceful treatment of integer initial conditions --- src/helpers.jl | 7 ++-- src/interfaces.jl | 74 ++++++++++++++++++++++++++++++----------- src/ode23s.jl | 5 +-- src/rk.jl | 28 ++++++++-------- src/tableaus.jl | 57 ------------------------------- test/interface-tests.jl | 4 +++ test/runtests.jl | 6 ++++ 7 files changed, 83 insertions(+), 98 deletions(-) diff --git a/src/helpers.jl b/src/helpers.jl index 04e7efc6e..cb594f832 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -6,7 +6,7 @@ Chooses an initial step-size basing on the equation, initial data, time span and the order of the method of integration. """ -function dtinit{T,S}(F, y0::Vector{S}, tspan::Vector{T}, reltol, abstol; order = 1) +function dtinit{T}(F, y0, tspan::Vector{T}, reltol, abstol; order = 1) t0 = abs(tspan[1]) tstop = abs(tspan[end]) tau = max(reltol*norm(y0, Inf), abstol) @@ -19,10 +19,7 @@ function dtinit{T,S}(F, y0::Vector{S}, tspan::Vector{T}, reltol, abstol; order = dt0 = (d0/d1)/100 end # perform Euler step - y1 = similar(y0) - for d = 1:length(y1) - y1[d] = y0[d]+dt0*f0[d] - end + y1 = y0+dt0*f0 f1 = F(t0 + dt0, y1) # estimate second derivative d2 = norm(f1 - f0, Inf)/(tau*dt0) diff --git a/src/interfaces.jl b/src/interfaces.jl index 58d341048..91570941b 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -4,13 +4,17 @@ We assume that the initial data y0 is given at tspan[1], and that tspan[end] is the last integration time. """ -function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; - jacobian::Function = (t,y)->fdjacobian(F, t, y), - # we need these options explicitly for the dtinit - reltol::T = eps(T)^T(1//3)/10, - abstol::T = eps(T)^T(1//2)/10, - initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper)), - kargs...) + +function ode{T}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; + jacobian::Function = (t,y)->fdjacobian(F, t, y), + # we need these options explicitly for the dtinit + reltol::T = eps(T)^T(1//3)/10, + abstol::T = eps(T)^T(1//2)/10, + initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper)), + kargs...) + +# function ode{T}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; +# jacobian::Function = (t,y)->fdjacobian(F, t, y),kargs...) # TODO: any ideas on how we could improve the interface so that we # don't have to use the ugly call to dtinit as a default? @@ -18,7 +22,7 @@ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractSteppe t0 = tspan[1] # construct a solver - ode = explicit_ineff(t0,y0,F,jac=jacobian) + equation = explicit_ineff(t0,y0,F,jac=jacobian) opts = Options{T}(; tspan = tspan, @@ -26,7 +30,7 @@ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractSteppe abstol = abstol, initstep = initstep, kargs...) - solver = solve(ode,stepper,opts) + solver = solve(equation,stepper,opts) # handle different directions of time integration if issorted(tspan) @@ -59,16 +63,24 @@ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractSteppe return (tn,yn) end -ode23s{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,ModifiedRosenbrockStepper{T}(); kargs...) -ode1{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_feuler); kargs...) -ode2_midpoint{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_midpoint); kargs...) -ode2_heun{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_heun); kargs...) -ode4{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperFixed{T}(bt_rk4); kargs...) -ode21{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_rk21); kargs...) -ode23{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_rk23); kargs...) -ode45_fe{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_rk45); kargs...) -ode45_dp{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_dopri5); kargs...) -ode78{T}(F,y0,t0::Vector{T};kargs...) = ode(F,y0,t0,RKStepperAdaptive{T}(bt_feh78); kargs...) +ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockStepper; kargs...) +ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_feuler}; kargs...) +ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_midpoint}; kargs...) +ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_heun}; kargs...) +ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_rk4}; kargs...) +ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_rk21}; kargs...) +ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_rk23}; kargs...) +ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_rk45}; kargs...) +ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_dopri5}; kargs...) +ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_feh78}; kargs...) + +# this is bugged +# ode_conv(F,y0,t0,stepper;kargs...)=ode(F,make_consistent(y0,t0,stepper)...;kargs...) +# a temporary fix +function ode_conv(F,y0,t0,stepper;kargs...) + y1, t1, stepper1 = make_consistent(y0,t0,stepper) + ode(F,y1,t1,stepper1;kargs...) +end const ode45 = ode45_dp @@ -110,3 +122,27 @@ function reverse_time(sol::Solver) options.stopevent = (t,y)->stopevent(2*t0-t,y) return solve(ode_reversed,stepper,options) end + + + +# The the elements of tspan should basically be scalars and support +# most of the scalar operations. In particular the element type +# should be closed under the division. +function make_consistent{S<:AbstractStepper}(y0, tspan::AbstractVector, stepper::Type{S}) + t_test = 1/(tspan[end]-tspan[1]) + Tt = typeof(t_test) + t_new = convert(Vector{Tt},tspan) + + y_test = y0./t_test + Ty = typeof(y_test) + if typejoin(Ty,AbstractArray) == AbstractArray + Ey = promote_type(map(typeof,y_test)...) + y_new = copy!(similar(y0,Ey),y0) + else + y_new = convert(Ty,y0) + end + + @assert eltype(Tt)<:Real + + return y_new, t_new, stepper{Tt}() +end diff --git a/src/ode23s.jl b/src/ode23s.jl index 840a88de0..19b87667a 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -19,9 +19,10 @@ immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper end end - # TODO: is this correct? -order(ModifiedRosenbrockStepper) = 2 +order(::ModifiedRosenbrockStepper) = 2 + +name(::ModifiedRosenbrockStepper) = "Modified Rosenbrock Stepper" # define the set of ODE problems with which this stepper can work diff --git a/src/rk.jl b/src/rk.jl index cb515943c..45451ecb9 100644 --- a/src/rk.jl +++ b/src/rk.jl @@ -11,16 +11,13 @@ A general Runge-Kutta stepper (it cen represent either, a fixed step or an adaptive step algorithm). """ -immutable RKStepper{Step,T} <: AbstractStepper - # m3: this is an abstract type. Is that ok? - - # pwl: I think this is fine, otherwise we would have to add even - # more type parameters to RKStepper +immutable RKStepper{Kind,tab_name,T<:Number} <: AbstractStepper tableau::Tableau - function RKStepper(tab) - if Step == :fixed && isadaptive(tab) + function RKStepper() + tab = eval(tab_name) + if Kind == :fixed && isadaptive(tab) error("Cannot construct a fixed step method from an adaptive step tableau") - elseif Step == :adaptive && !isadaptive(tab) + elseif Kind == :adaptive && !isadaptive(tab) error("Cannot construct an adaptive step method from an fixed step tableau") end new(convert(T,tab)) @@ -28,16 +25,17 @@ immutable RKStepper{Step,T} <: AbstractStepper end -typealias RKStepperFixed{T} RKStepper{:fixed, T} -typealias RKStepperAdaptive{T} RKStepper{:adaptive,T} +typealias RKStepperFixed RKStepper{:fixed} +typealias RKStepperAdaptive RKStepper{:adaptive} order(stepper::RKStepper) = minimum(order(stepper.tableau)) +name(stepper::RKStepper) = typeof(stepper.tableau) # TODO: possibly handle the initial stepsize and the tableau conversion here? -solve{S,T}(ode::ExplicitODE, stepper::RKStepper{S,T}, options::Options{T}) = - Solver{RKStepper{S,T}}(ode,stepper,options) +solve{K,S,T}(ode::ExplicitODE, stepper::RKStepper{K,S,T}, options::Options{T}) = + Solver{RKStepper{K,S,T}}(ode,stepper,options) # lower level interface @@ -78,7 +76,7 @@ function show(io::IO, state::RKState) end -function start{S,T}(s::Solver{RKStepper{S,T}}) +function start{T<:RKStepper}(s::Solver{T}) t0, dt0, y0 = s.ode.t0, s.options.initstep, s.ode.y0 # TODO: we should do the Butcher table conversion somewhere @@ -108,7 +106,7 @@ end ##################### -function next{T}(s::Solver{RKStepperFixed{T}}, state::RKState) +function next{RKSF<:RKStepperFixed}(s::Solver{RKSF}, state::RKState) step = state.step work = state.work @@ -141,7 +139,7 @@ end ######################## -function next{T}(sol::Solver{RKStepperAdaptive{T}}, state::RKState) +function next{RKSA<:RKStepperAdaptive}(sol::Solver{RKSA}, state::RKState) const timeout_const = 5 diff --git a/src/tableaus.jl b/src/tableaus.jl index a11ae16d0..6d182558e 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -197,60 +197,3 @@ const bt_feh78 = TableauRKExplicit(:feh78, (7,8), Rational{Int64}, 0 0 0 0 0 34//105 9//35 9//35 9//280 9//280 0 41//840 41//840], [0, 2//27, 1//9, 1//6 , 5//12, 1//2 , 5//6 , 1//6 , 2//3 , 1//3 , 1 , 0, 1] ) - - -# m3: -# - this function is not used anymore! Is there a reason for this? -# - this should probably stay in this file. -# - update type-variables to -# ET, EY, EF, CT, CY -# - I think it is fine to assume that typeof(y0)==typeof(F(t0,y0)) -# i.e. dy has same type as y. -function make_consistent_types(fn, y0, tspan, btab::Tableau) - # There are a few types involved in a call to a ODE solver which - # somehow need to be consistent: - # - # Et = eltype(tspan) - # Ey = eltype(y0) - # Ef = eltype(Tf) - # - # There are also the types of the containers, but they are not - # needed as `similar` is used to make containers. - # Tt = typeof(tspan) - # Ty = typeof(y0) # note, this can be a scalar - # Tf = typeof(F(tspan(1),y0)) # note, this can be a scalar - # - # Returns - # - Et: eltype of time, needs to be a real "continuous" type, at - # the moment a AbstractFloat - # - Eyf: suitable eltype of y and f(t,y) - # --> both of these are set to typeof(y0[1]/(tspan[end]-tspan[1])) - # - Ty: container type of y0 - # - btab: tableau with entries converted to Et - - # Needed interface: - # On components: /, - - # On container: eltype, promote_type - # On time container: eltype - - Ty = typeof(y0) - Eyf = typeof(y0[1]/(tspan[end]-tspan[1])) - - Et = eltype(tspan) - @assert Et<:Real - if !(Et<:AbstractFloat) - Et = promote_type(Et, Float64) - end - - # if all are Floats, make them the same - if Et<:AbstractFloat && Eyf<:AbstractFloat - Et = promote_type(Et, Eyf) - Eyf = Et - end - - !isleaftype(Et) && warn("The eltype(tspan) is not a concrete type! Change type of tspan for better performance.") - !isleaftype(Eyf) && warn("The eltype(y0/tspan[1]) is not a concrete type! Change type of y0 and/or tspan for better performance.") - - btab_ = convert(Et, btab) - return Et, Eyf, Ty, btab_ -end diff --git a/test/interface-tests.jl b/test/interface-tests.jl index a0693617a..ac2e554ec 100644 --- a/test/interface-tests.jl +++ b/test/interface-tests.jl @@ -36,6 +36,7 @@ Base.abs(y::CompSol) = norm(y, 2.) # TODO not needed anymore once https://github Base.abs2(y::CompSol) = norm(y, 2.) Base.zero(::Type{CompSol}) = CompSol(complex(zeros(2,2)), 0., 0.) +Base.zero(::CompSol) = zero(CompSol) # TODO: This is now an option and has to be passed to the # solvers. Looks ugly and a kind of a pain to handle. isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) @@ -90,6 +91,9 @@ for solver in solvers isoutofdomain = isoutofdomain) break @test norm(y1[end]-y2[end])<0.1 + + # test that typeof(tspan)==Vector{Int} does not throw: + t,y2 = solver((t,y)->rhs(t, y, delta0, V0, g0), y0, [0,1]) end println("ok.") diff --git a/test/runtests.jl b/test/runtests.jl index 5e4984432..739f6d9d0 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -41,6 +41,12 @@ for solver in solvers t,y=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) @test maximum(abs(y-t.^2)) < tol + # test typeof(tspan)==Vector{Int} does not throw + t,y=solver((t,y)->2t, 0., [0,1]) + # test typeof(y0)==Vector{Int} does not throw + t,y=solver((t,y)->[2t], [0], [0,1]) + # test typeof(y0)==Int does not throw + t,y=solver((t,y)->2t, 0, [0,1]) # dy # -- = y ==> y = y0*e.^t From 53daa743941f09f37c03d00d583725484d1bf7ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 24 Jun 2016 14:38:14 -0400 Subject: [PATCH 019/113] Added back the old solvers --- src/ODE.jl | 7 ++- src/adams-bashford-moulton.jl | 52 ++++++++++++++++++++ src/helpers.jl | 2 +- src/interfaces.jl | 55 +++++++-------------- src/rosenbrock.jl | 92 +++++++++++++++++++++++++++++++++++ src/{rk.jl => runge-kutta.jl} | 0 src/types.jl | 2 +- test/runtests.jl | 12 +++-- 8 files changed, 175 insertions(+), 47 deletions(-) create mode 100644 src/adams-bashford-moulton.jl create mode 100644 src/rosenbrock.jl rename src/{rk.jl => runge-kutta.jl} (100%) diff --git a/src/ODE.jl b/src/ODE.jl index e2c14f689..80848f710 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -14,7 +14,7 @@ export ode4, ode4ms # adaptive stiff: export ode23s # non-adaptive stiff: -export ode4s +export ode4s, ode4s_s import Base.convert, Base.show import Base: start, next, done, call, collect @@ -30,8 +30,11 @@ include("dense.jl") # particular solvers include("ode23s.jl") -include("rk.jl") +include("runge-kutta.jl") # include("multistep.jl") +include("adams-bashford-moulton.jl") +include("rosenbrock.jl") +# include("taylor.jl") include("iterators.jl") include("interfaces.jl") diff --git a/src/adams-bashford-moulton.jl b/src/adams-bashford-moulton.jl new file mode 100644 index 000000000..5226dbd5a --- /dev/null +++ b/src/adams-bashford-moulton.jl @@ -0,0 +1,52 @@ +# ODE_MS Fixed-step, fixed-order multi-step numerical method +# with Adams-Bashforth-Moulton coefficients +function ode_ms{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, order::Integer; kargs...) + + if !isleaftype(T) + error("The output times have to be of a concrete type.") + elseif !(T <:AbstractFloat) + error("The time variable should be a floating point number.") + end + + if !isleaftype(Ty) & !isleaftype(eltype(Ty)) + error("The initial data has to be of a concrete type (or an array)") + end + + h = diff(tspan) + x = Array(typeof(x0), length(tspan)) + x[1] = x0 + + if 1 <= order <= 4 + b = ms_coefficients4 + else + b = zeros(order, order) + b[1:4, 1:4] = ms_coefficients4 + for s = 5:order + for j = 0:(s - 1) + # Assign in correct order for multiplication below + # (a factor depending on j and s) .* (an integral of a polynomial with -(0:s), except -j, as roots) + p_int = polyint(poly(diagm(-[0:j - 1; j + 1:s - 1]))) + b[s, s - j] = ((-1)^j / factorial(j) + / factorial(s - 1 - j) * polyval(p_int, 1)) + end + end + end + + # TODO: use a better data structure here (should be an order-element circ buffer) + xdot = similar(x) + for i = 1:length(tspan)-1 + # Need to run the first several steps at reduced order + steporder = min(i, order) + xdot[i] = F(tspan[i], x[i]) + + x[i+1] = x[i] + for j = 1:steporder + x[i+1] += h[i]*b[steporder, j]*xdot[i-(steporder-1) + (j-1)] + end + end + return vcat(tspan), x +end + +# Use order 4 by default +ode4ms(F, x0, tspan; kargs...) = ode_ms(F, x0, tspan, 4; kargs...) +ode5ms(F, x0, tspan; kargs...) = ODE.ode_ms(F, x0, tspan, 5; kargs...) diff --git a/src/helpers.jl b/src/helpers.jl index cb594f832..7270fe015 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -29,7 +29,7 @@ function dtinit{T}(F, y0, tspan::Vector{T}, reltol, abstol; order = 1) pow = -(2 + log10(max(d1, d2)))/(order+1) dt1 = 10^pow end - return min(100*dt0, dt1, abs(tstop-t0)) + return T(min(100*dt0, dt1, abs(tstop-t0))) end # a scalar version of the above diff --git a/src/interfaces.jl b/src/interfaces.jl index 91570941b..657c63a55 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -5,20 +5,14 @@ tspan[end] is the last integration time. """ -function ode{T}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; +function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; jacobian::Function = (t,y)->fdjacobian(F, t, y), # we need these options explicitly for the dtinit reltol::T = eps(T)^T(1//3)/10, abstol::T = eps(T)^T(1//2)/10, - initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper)), + initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper))::T, kargs...) -# function ode{T}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; -# jacobian::Function = (t,y)->fdjacobian(F, t, y),kargs...) - - # TODO: any ideas on how we could improve the interface so that we - # don't have to use the ugly call to dtinit as a default? - t0 = tspan[1] # construct a solver @@ -74,12 +68,21 @@ ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_rk45}; ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_dopri5}; kargs...) ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_feh78}; kargs...) -# this is bugged -# ode_conv(F,y0,t0,stepper;kargs...)=ode(F,make_consistent(y0,t0,stepper)...;kargs...) -# a temporary fix -function ode_conv(F,y0,t0,stepper;kargs...) - y1, t1, stepper1 = make_consistent(y0,t0,stepper) - ode(F,y1,t1,stepper1;kargs...) + +function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) + + if !isleaftype(T) + error("The output times have to be of a concrete type.") + elseif !(T <:AbstractFloat) + error("The time variable should be a floating point number.") + end + + if !isleaftype(Ty) & !isleaftype(eltype(Ty)) + error("The initial data has to be of a concrete type (or an array)") + end + + ode(F,y0,t0,stepper{T}();kargs...) + end const ode45 = ode45_dp @@ -122,27 +125,3 @@ function reverse_time(sol::Solver) options.stopevent = (t,y)->stopevent(2*t0-t,y) return solve(ode_reversed,stepper,options) end - - - -# The the elements of tspan should basically be scalars and support -# most of the scalar operations. In particular the element type -# should be closed under the division. -function make_consistent{S<:AbstractStepper}(y0, tspan::AbstractVector, stepper::Type{S}) - t_test = 1/(tspan[end]-tspan[1]) - Tt = typeof(t_test) - t_new = convert(Vector{Tt},tspan) - - y_test = y0./t_test - Ty = typeof(y_test) - if typejoin(Ty,AbstractArray) == AbstractArray - Ey = promote_type(map(typeof,y_test)...) - y_new = copy!(similar(y0,Ey),y0) - else - y_new = convert(Ty,y0) - end - - @assert eltype(Tt)<:Real - - return y_new, t_new, stepper{Tt}() -end diff --git a/src/rosenbrock.jl b/src/rosenbrock.jl new file mode 100644 index 000000000..4d7131b34 --- /dev/null +++ b/src/rosenbrock.jl @@ -0,0 +1,92 @@ +#ODEROSENBROCK Solve stiff differential equations, Rosenbrock method +# with provided coefficients. +function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, gamma, a, b, c; jacobian=nothing, kargs...) + + if !isleaftype(T) + error("The output times have to be of a concrete type.") + elseif !(T <:AbstractFloat) + error("The time variable should be a floating point number.") + end + + if !isleaftype(Ty) & !isleaftype(eltype(Ty)) + error("The initial data has to be of a concrete type (or an array)") + end + + if typeof(jacobian) == Function + G = jacobian + else + G = (t, x)->fdjacobian(F, x, t) + end + + h = diff(tspan) + x = Array(typeof(x0), length(tspan)) + x[1] = x0 + + solstep = 1 + while solstep < length(tspan) + ts = tspan[solstep] + hs = h[solstep] + xs = x[solstep] + dFdx = G(ts, xs) + # FIXME + if size(dFdx,1) == 1 + jac = 1/gamma/hs - dFdx[1] + else + jac = eye(dFdx)/gamma/hs - dFdx + end + + g = Array(typeof(x0), size(a,1)) + g[1] = (jac \ F(ts + b[1]*hs, xs)) + x[solstep+1] = x[solstep] + b[1]*g[1] + + for i = 2:size(a,1) + dx = zero(x0) + dF = zero(x0/hs) + for j = 1:i-1 + dx += a[i,j]*g[j] + dF += c[i,j]*g[j] + end + g[i] = (jac \ (F(ts + b[i]*hs, xs + dx) + dF/hs)) + x[solstep+1] += b[i]*g[i] + end + solstep += 1 + end + return vcat(tspan), x +end + + +# Kaps-Rentrop coefficients +const kr4_coefficients = (0.231, + [0 0 0 0 + 2 0 0 0 + 4.452470820736 4.16352878860 0 0 + 4.452470820736 4.16352878860 0 0], + [3.95750374663 4.62489238836 0.617477263873 1.28261294568], + [ 0 0 0 0 + -5.07167533877 0 0 0 + 6.02015272865 0.1597500684673 0 0 + -1.856343618677 -8.50538085819 -2.08407513602 0],) + +ode4s_kr(F, x0, tspan; jacobian=nothing, kargs...) = oderosenbrock(F, x0, tspan, kr4_coefficients...; jacobian=jacobian, kargs...) + +# Shampine coefficients +const s4_coefficients = (0.5, + [ 0 0 0 0 + 2 0 0 0 + 48/25 6/25 0 0 + 48/25 6/25 0 0], + [19/9 1/2 25/108 125/108], + [ 0 0 0 0 + -8 0 0 0 + 372/25 12/5 0 0 + -112/125 -54/125 -2/5 0],) + +ode4s_s(F, x0, tspan; jacobian=nothing, kargs...) = oderosenbrock(F, x0, tspan, s4_coefficients...; jacobian=jacobian, kargs...) + +# Use Shampine coefficients by default (matching Numerical Recipes) +const ode4s = ode4s_s + +const ms_coefficients4 = [ 1 0 0 0 + -1/2 3/2 0 0 + 5/12 -4/3 23/12 0 + -9/24 37/24 -59/24 55/24] diff --git a/src/rk.jl b/src/runge-kutta.jl similarity index 100% rename from src/rk.jl rename to src/runge-kutta.jl diff --git a/src/types.jl b/src/types.jl index b1f900740..76e9730f1 100644 --- a/src/types.jl +++ b/src/types.jl @@ -233,7 +233,7 @@ function collect(s::Solver) if maximum(s.options.tspan) == Inf error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") end - collect(imap(x->deepcopy(x),s)) + collect(imap(deepcopy,s)) end diff --git a/test/runtests.jl b/test/runtests.jl index 739f6d9d0..c78d8dd88 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -10,8 +10,8 @@ solvers = [ ODE.ode2_midpoint, ODE.ode2_heun, ODE.ode4, - # Ode.ode4ms, - # ODE.ode5ms, + ODE.ode4ms, + ODE.ode5ms, # adaptive ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. ODE.ode23, @@ -42,11 +42,13 @@ for solver in solvers @test maximum(abs(y-t.^2)) < tol # test typeof(tspan)==Vector{Int} does not throw - t,y=solver((t,y)->2t, 0., [0,1]) + @test_throws ErrorException t,y=solver((t,y)->2y, 0., [0,1]) # test typeof(y0)==Vector{Int} does not throw - t,y=solver((t,y)->[2t], [0], [0,1]) + @test_throws ErrorException t,y=solver((t,y)->[2y], [0], [0,1]) # test typeof(y0)==Int does not throw - t,y=solver((t,y)->2t, 0, [0,1]) + @test_throws ErrorException t,y=solver((t,y)->2y, 0, [0,1]) + # test if we can deal with a mixed case + @test_throws ErrorException t,y=solver((t,y)->2y, Number[1,1.1,BigInt(1)], Rational[0,1]) # dy # -- = y ==> y = y0*e.^t From 5891a4304b07c9dc728789067740f3eefd8f7d44 Mon Sep 17 00:00:00 2001 From: The Gitter Badger Date: Sat, 25 Jun 2016 11:57:55 +0000 Subject: [PATCH 020/113] Add Gitter badge --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 323b44c71..2a9b25ee7 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,8 @@ Various basic Ordinary Differential Equation solvers implemented in Julia. Pull requests are always highly welcome to fix bugs, add solvers, or anything else! # API discussions + +[![Join the chat at https://gitter.im/pwl/ODE.jl](https://badges.gitter.im/pwl/ODE.jl.svg)](https://gitter.im/pwl/ODE.jl?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) There are currently discussions about how the Julian API for ODE solvers should look like, and the current documentation is more like a wishlist than a documentation. The API has changed considerably since the initial v0.1 release, so be carefull when you upgrade to v0.2 or later versions. # Current status of the project From 136715565c167d4052267bf8828308ff101e20bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sat, 25 Jun 2016 08:29:15 -0400 Subject: [PATCH 021/113] Minor fixes for the Rosenbrock methods --- src/rosenbrock.jl | 11 +++++++---- test/runtests.jl | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/rosenbrock.jl b/src/rosenbrock.jl index 4d7131b34..341ddef09 100644 --- a/src/rosenbrock.jl +++ b/src/rosenbrock.jl @@ -1,6 +1,9 @@ #ODEROSENBROCK Solve stiff differential equations, Rosenbrock method # with provided coefficients. -function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, gamma, a, b, c; jacobian=nothing, kargs...) +function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, + gamma, a, b, c; + jacobian = nothing, + kargs...) if !isleaftype(T) error("The output times have to be of a concrete type.") @@ -15,7 +18,7 @@ function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, gamma, a, b, c if typeof(jacobian) == Function G = jacobian else - G = (t, x)->fdjacobian(F, x, t) + G = (t, x)->fdjacobian(F, t, x) end h = diff(tspan) @@ -67,7 +70,7 @@ const kr4_coefficients = (0.231, 6.02015272865 0.1597500684673 0 0 -1.856343618677 -8.50538085819 -2.08407513602 0],) -ode4s_kr(F, x0, tspan; jacobian=nothing, kargs...) = oderosenbrock(F, x0, tspan, kr4_coefficients...; jacobian=jacobian, kargs...) +ode4s_kr(F, x0, tspan; kargs...) = oderosenbrock(F, x0, tspan, kr4_coefficients...; kargs...) # Shampine coefficients const s4_coefficients = (0.5, @@ -81,7 +84,7 @@ const s4_coefficients = (0.5, 372/25 12/5 0 0 -112/125 -54/125 -2/5 0],) -ode4s_s(F, x0, tspan; jacobian=nothing, kargs...) = oderosenbrock(F, x0, tspan, s4_coefficients...; jacobian=jacobian, kargs...) +ode4s_s(F, x0, tspan; kargs...) = oderosenbrock(F, x0, tspan, s4_coefficients...; kargs...) # Use Shampine coefficients by default (matching Numerical Recipes) const ode4s = ode4s_s diff --git a/test/runtests.jl b/test/runtests.jl index c78d8dd88..1cf7bf393 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -21,8 +21,8 @@ solvers = [ ## Stiff # fixed-step - # ODE.ode4s_s, - # ODE.ode4s_kr, + ODE.ode4s_s, + ODE.ode4s_kr, # adaptive ODE.ode23s] From c4fc212d08b070ab3f3e642a4b6c944cca9a6b69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sat, 25 Jun 2016 10:10:32 -0400 Subject: [PATCH 022/113] Added the ForwardDiff jacobian as the default --- REQUIRE | 4 ++- src/ODE.jl | 11 +------- src/helpers.jl | 10 +++++++ src/interfaces.jl | 14 +++++----- src/rosenbrock.jl | 10 ++----- src/runge-kutta.jl | 6 ----- src/types.jl | 66 +++++----------------------------------------- 7 files changed, 30 insertions(+), 91 deletions(-) diff --git a/REQUIRE b/REQUIRE index 8e8352dde..772acbb6e 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,3 +1,5 @@ -julia 0.3 +julia 0.4 Polynomials +Iterators +ForwardDiff Compat 0.4.1 diff --git a/src/ODE.jl b/src/ODE.jl index 80848f710..c13afe78c 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -5,16 +5,7 @@ module ODE using Polynomials using Compat using Iterators - -## minimal function export list -# adaptive non-stiff: -export ode23, ode45, ode78 -# non-adaptive non-stiff: -export ode4, ode4ms -# adaptive stiff: -export ode23s -# non-adaptive stiff: -export ode4s, ode4s_s +using ForwardDiff import Base.convert, Base.show import Base: start, next, done, call, collect diff --git a/src/helpers.jl b/src/helpers.jl index 7270fe015..35e031cd0 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -67,3 +67,13 @@ function findroot(f,rng,eps) return (xr+xl)/2 end + + +# generate a jacobian using ForwardDiff +function forward_jacobian(F,y0::AbstractArray) + (t,y)->ForwardDiff.jacobian(y->F(t,y),y) +end + +function forward_jacobian(F,y0) + (t,y)->ForwardDiff.derivative(y->F(t,y),y) +end diff --git a/src/interfaces.jl b/src/interfaces.jl index 657c63a55..02e34ab61 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -6,17 +6,17 @@ tspan[end] is the last integration time. """ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; - jacobian::Function = (t,y)->fdjacobian(F, t, y), - # we need these options explicitly for the dtinit - reltol::T = eps(T)^T(1//3)/10, - abstol::T = eps(T)^T(1//2)/10, - initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper))::T, - kargs...) + jac = forward_jacobian(F,y0), + # we need these options explicitly for the dtinit + reltol::T = eps(T)^T(1//3)/10, + abstol::T = eps(T)^T(1//2)/10, + initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper))::T, + kargs...) t0 = tspan[1] # construct a solver - equation = explicit_ineff(t0,y0,F,jac=jacobian) + equation = explicit_ineff(t0,y0,F,jac) opts = Options{T}(; tspan = tspan, diff --git a/src/rosenbrock.jl b/src/rosenbrock.jl index 341ddef09..8bcba8e6e 100644 --- a/src/rosenbrock.jl +++ b/src/rosenbrock.jl @@ -2,7 +2,7 @@ # with provided coefficients. function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, gamma, a, b, c; - jacobian = nothing, + jacobian = forward_jacobian(F,x0), kargs...) if !isleaftype(T) @@ -15,12 +15,6 @@ function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, error("The initial data has to be of a concrete type (or an array)") end - if typeof(jacobian) == Function - G = jacobian - else - G = (t, x)->fdjacobian(F, t, x) - end - h = diff(tspan) x = Array(typeof(x0), length(tspan)) x[1] = x0 @@ -30,7 +24,7 @@ function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, ts = tspan[solstep] hs = h[solstep] xs = x[solstep] - dFdx = G(ts, xs) + dFdx = jacobian(ts, xs) # FIXME if size(dFdx,1) == 1 jac = 1/gamma/hs - dFdx[1] diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 45451ecb9..6f5759d0b 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -114,12 +114,6 @@ function next{RKSF<:RKStepperFixed}(s::Solver{RKSF}, state::RKState) b = s.stepper.tableau.b dt = min(state.dt,s.options.tstop-step.t) - # m3: why is it necessary to copy here and then copy back below? - - # pwl: to my understanding calc_next_k! needs the starting value - # (step.y) but work.ynew is changing in the inner loop, so we need - # two distinct arrays, both starting as step.y. - copy!(work.ynew,step.y) for k=1:length(b) diff --git a/src/types.jl b/src/types.jl index 76e9730f1..dc259b064 100644 --- a/src/types.jl +++ b/src/types.jl @@ -11,7 +11,7 @@ Fields: """ immutable ExplicitODE{T,S} <: AbstractODE t0 ::T - y0 ::S + y0 ::AbstractArray{S} F! ::Function jac!::Function end @@ -38,14 +38,9 @@ Convert a out-of-place explicitly defined ODE function to an in-place function. Note, this does not help with memory allocations. """ -function explicit_ineff(t0, y0::AbstractVector, F::Function; - jac = (t,y)->fdjacobian(F,t,y)) - function F!(t,y,dy) - copy!(dy,F(t,y)) - end - function jac!(t,y,J) - copy!(J,jac(t,y)) - end +function explicit_ineff(t0, y0::AbstractVector, F::Function, jac) + F!(t,y,dy) =copy!(dy,F(t,y)) + jac!(t,y,J)=copy!(J,jac(t,y)) return ExplicitODE(t0,y0,F!,jac!) end @@ -55,14 +50,9 @@ end # and jac to vector functions F! and jac!. Still, solving this ODE # will result in a vector of length one result, so additional external # conversion is necessary. -function explicit_ineff(t0, y0::Number, F::Function; - jac = (t,y)->fdjacobian(F,t,y)) - function F!(t,y,dy) - dy[1]=F(t,y[1]) - end - function jac!(t,y,J) - J[1]=jac(t,y[1]) - end +function explicit_ineff(t0, y0, F::Function, jac) + F!(t,y,dy) =(dy[1]=F(t,y[1])) + jac!(t,y,J)=(J[1]=jac(t,y[1])) return ExplicitODE(t0,[y0],F!,jac!) end @@ -235,45 +225,3 @@ function collect(s::Solver) end collect(imap(deepcopy,s)) end - - -# some leftovers from the previous implementation - -# FIXME: This doesn't really work if x is anything but a Vector or a scalar -function fdjacobian(F, t, x::Number) - ftx = F(t, x) - - # The 100 below is heuristic - dx = (x .+ (x==0))./100 - dFdx = (F(t,x+dx)-ftx)./dx - - return dFdx -end - -function fdjacobian(F, t, x::Vector) - ftx = F(t, x) - lx = max(length(x),1) - dFdx = zeros(eltype(x), lx, lx) - for j = 1:lx - # The 100 below is heuristic - dx = zeros(eltype(x), lx) - dx[j] = (x[j] .+ (x[j]==0))./100 - dFdx[:,j] = (F(t,x+dx)-ftx)./dx[j] - end - return dFdx -end - -function fdjacobian!{T}(F!, t, x::Vector{T}, J::Array{T,2}) - ftx = similar(x) - ftx2= similar(x) - dx = similar(x) - F!(t,x,ftx) - lx = max(length(x),1) - dFdx = zeros(eltype(x), lx, lx) - for j = 1:lx - # The 100 below is heuristic - dx[j] = (x[j] .+ (x[j]==0))./100 - F!(t,x+dx,ftx2) - J[:,j] = (ftx2-ftx)./dx[j] - end -end From c62bc0564a25233e22bab48e1bdc8fab3ed82a41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sat, 25 Jun 2016 17:10:12 -0400 Subject: [PATCH 023/113] isFSAL is now a parameter --- src/runge-kutta.jl | 21 ++++++++++----------- src/tableaus.jl | 12 +++++------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 6f5759d0b..d962f87b9 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -181,7 +181,7 @@ function next{RKSA<:RKStepperAdaptive}(sol::Solver{RKSA}, state::RKState) # step is accepted # preload ks[1] for the next step - if isFSAL(sol.stepper.tableau) + if sol.stepper.tableau.isFSAL copy!(work.ks[1],work.ks[end]) else sol.ode.F!(step.t+dt, work.ynew, work.ks[1]) @@ -262,28 +262,27 @@ function stepsize_hw92!{T}(work, ord = minimum(order(tableau)) timout_after_nan = 5 - fac = [T(8//10), T(9//10), T(1//4)^(1//(ord+1)), T(38//100)^(1//(ord+1))][1] + # fac = T[0.8, 0.9, (0.25)^(1/(ord+1)), (0.38)^(1/(ord+1))][1] + fac = T(8//10) facmax = T(5) # maximal step size increase. 1.5-5 facmin = 1./facmax # maximal step size decrease. ? dof = length(last_step.y) + if findfirst(options.isoutofdomain,work.y) != 0 + return T(10), dt*facmin, timout_after_nan + end + # in-place calculate yerr./tol for d=1:dof - - # if outside of domain (usually NaN) then make step size smaller by maximum - if options.isoutofdomain(work.y[d]) - return T(10), dt*facmin, timout_after_nan - end - y0 = last_step.y[d] # TODO: is this supposed to be the last successful step? y1 = work.ynew[d] # the approximation to the next step - sci = (options.abstol + options.reltol*max(options.norm(y0),options.norm(y1))) - work.yerr[d] = work.yerr[d]/sci # Eq 4.10 + sci = (options.abstol + options.reltol*max(norm(y0),norm(y1))) + work.yerr[d] ./= sci # Eq 4.10 end # TOOD: should we use options.norm here as well? err = norm(work.yerr) # Eq. 4.11 - newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1//(ord+1)))) # Eq 4.13 modified + newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1/(ord+1)))) # Eq 4.13 modified if timeout > 0 newdt = min(newdt, dt) diff --git a/src/tableaus.jl b/src/tableaus.jl index 6d182558e..ac105fa2d 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -50,6 +50,7 @@ immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} # second for error calc. b::Matrix{T} c::Vector{T} + isFSAL::Bool function TableauRKExplicit(order,a,b,c) @assert isa(S,Integer) @assert isa(Name,Symbol) @@ -58,7 +59,8 @@ immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} @assert S==length(c)==size(a,1)==size(a,2)==size(b,2) @assert size(b,1)==length(order) @assert norm(sum(a,2)-c'',Inf) Date: Sun, 26 Jun 2016 17:44:44 -0400 Subject: [PATCH 024/113] Fixed type stability for RK & updated the tableaus --- src/dense.jl | 16 ++--- src/interfaces.jl | 20 +++--- src/ode23s.jl | 14 ++--- src/runge-kutta.jl | 44 +++++++------- src/tableaus.jl | 148 +++++++++++++++++++++++++-------------------- src/types.jl | 54 +++++++++-------- 6 files changed, 154 insertions(+), 142 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 28c2fdb67..4ca96be38 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -20,19 +20,19 @@ The state of the dense stepper - ytmp: work array """ -type DenseState{T,S} <: AbstractState - s0::Step{T,S} - s1::Step{T,S} +type DenseState{St<:AbstractState,T,Y} <: AbstractState{T,Y} + s0::Step{T,Y} + s1::Step{T,Y} last_tout::T first_step - solver_state::AbstractState + solver_state::St # used for storing the interpolation result - ytmp::S + ytmp::Y solver_done end -function start(s::Solver{DenseStepper}) +function start{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}) # extract the real solver solver = s.stepper.solver t0 = solver.ode.t0 @@ -53,7 +53,7 @@ end # pwl: I agree, but then the problem is that once you decouple them # you would lose the opprotunity to detect the roots with each step. -function next(s::Solver{DenseStepper}, state::DenseState) +function next{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) # m3: I'm not 100% sure what happens here. I would implement it like so: @@ -158,7 +158,7 @@ function next(s::Solver{DenseStepper}, state::DenseState) end -function done(s::Solver{DenseStepper}, state::DenseState) +function done{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) return ( state.solver_done || diff --git a/src/interfaces.jl b/src/interfaces.jl index 02e34ab61..ea211ca26 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -58,15 +58,15 @@ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractSteppe end ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockStepper; kargs...) -ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_feuler}; kargs...) -ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_midpoint}; kargs...) -ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_heun}; kargs...) -ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:bt_rk4}; kargs...) -ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_rk21}; kargs...) -ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_rk23}; kargs...) -ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_rk45}; kargs...) -ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_dopri5}; kargs...) -ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:bt_feh78}; kargs...) +ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:feuler}; kargs...) +ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:midpoint}; kargs...) +ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:heun}; kargs...) +ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:rk4}; kargs...) +ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk21}; kargs...) +ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk23}; kargs...) +ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk45}; kargs...) +ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:dopri5}; kargs...) +ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:feh78}; kargs...) function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) @@ -115,7 +115,7 @@ function reverse_time(sol::Solver) end # ExplicitODE is immutable - ode_reversed = ExplicitODE(t0,y0,F_reverse!,jac_reverse!) + ode_reversed = ExplicitODE(t0,y0,F_reverse!,jac! = jac_reverse!) stopevent = options.stopevent # TODO: we are modifying options here, should we construct new diff --git a/src/ode23s.jl b/src/ode23s.jl index 19b87667a..4e5e760f7 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -2,11 +2,6 @@ # (also used by MATLAB's ODE23s); see Sec. 4.1 in # # [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 -# -# supports keywords: points = :all | :specified (using dense output) -# jacobian = G(t,y)::Function | nothing (FD) - -# Internal immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper d :: T @@ -26,8 +21,8 @@ name(::ModifiedRosenbrockStepper) = "Modified Rosenbrock Stepper" # define the set of ODE problems with which this stepper can work -solve(ode :: ExplicitODE, stepper :: ModifiedRosenbrockStepper, options :: Options) = - Solver{ModifiedRosenbrockStepper}(ode,stepper,options) +solve(ode::ExplicitODE, stepper::ModifiedRosenbrockStepper, options) = + Solver(ode, stepper, options) # lower level interface (iterator) @@ -61,7 +56,7 @@ function show(io::IO, state :: RosenbrockState) end -function start(s :: Solver{ModifiedRosenbrockStepper}) +function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s :: Solver{O,S}) t = s.ode.t0 dt = s.options.initstep y = s.ode.y0 @@ -84,8 +79,7 @@ function start(s :: Solver{ModifiedRosenbrockStepper}) end -function next(s :: Solver{ModifiedRosenbrockStepper}, - state :: RosenbrockState) +function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state) stepper = s.stepper ode = s.ode diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index d962f87b9..2491f2d7e 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -11,20 +11,19 @@ A general Runge-Kutta stepper (it cen represent either, a fixed step or an adaptive step algorithm). """ -immutable RKStepper{Kind,tab_name,T<:Number} <: AbstractStepper - tableau::Tableau +immutable RKStepper{Kind,Name,T} <: AbstractStepper{T} + tableau::TableauRKExplicit{T} function RKStepper() - tab = eval(tab_name) + tab = convert(T,tableaus_rk_explicit[Name]) if Kind == :fixed && isadaptive(tab) error("Cannot construct a fixed step method from an adaptive step tableau") elseif Kind == :adaptive && !isadaptive(tab) error("Cannot construct an adaptive step method from an fixed step tableau") end - new(convert(T,tab)) + new(tab) end end - typealias RKStepperFixed RKStepper{:fixed} typealias RKStepperAdaptive RKStepper{:adaptive} @@ -33,9 +32,9 @@ order(stepper::RKStepper) = minimum(order(stepper.tableau)) name(stepper::RKStepper) = typeof(stepper.tableau) -# TODO: possibly handle the initial stepsize and the tableau conversion here? -solve{K,S,T}(ode::ExplicitODE, stepper::RKStepper{K,S,T}, options::Options{T}) = - Solver{RKStepper{K,S,T}}(ode,stepper,options) +# TODO: possibly handle the initial stepsize here? +solve(ode::ExplicitODE, stepper::RKStepper, options) = + Solver(ode,stepper,options) # lower level interface @@ -48,21 +47,21 @@ Pre allocated arrays to store temporary data. Used only by Runge-Kutta stepper. """ -type RKWorkArrays{T} - y ::T - ynew::T - yerr::T - ks ::Vector{T} +type RKWorkArrays{Y} + y ::Y + ynew::Y + yerr::Y + ks ::Vector{Y} end """ State for the Runge-Kutta stepper. """ -type RKState{T,S} <: AbstractState - step ::Step{T,S} +type RKState{T,Y} <: AbstractState{T,Y} + step ::Step{T,Y} dt ::T - work ::RKWorkArrays{S} + work ::RKWorkArrays{Y} timeout ::Int # This is not currently incremented with each step iters ::Int @@ -76,10 +75,9 @@ function show(io::IO, state::RKState) end -function start{T<:RKStepper}(s::Solver{T}) +function start{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) t0, dt0, y0 = s.ode.t0, s.options.initstep, s.ode.y0 - # TODO: we should do the Butcher table conversion somewhere lk = lengthks(s.stepper.tableau) work = RKWorkArrays(zero(y0), # y zero(y0), # ynew @@ -94,7 +92,7 @@ function start{T<:RKStepper}(s::Solver{T}) # pre-initialize work.ks[1] s.ode.F!(t0,y0,work.ks[1]) - step = Step(t0,deepcopy(y0),deepcopy(work.ks[1])) + step = Step(t0,copy(y0),copy(work.ks[1])) timeout = 0 # for step control return RKState(step,dt0,work,timeout,0) @@ -106,7 +104,9 @@ end ##################### -function next{RKSF<:RKStepperFixed}(s::Solver{RKSF}, state::RKState) +# function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKState) +# function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state) +function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state) step = state.step work = state.work @@ -133,7 +133,7 @@ end ######################## -function next{RKSA<:RKStepperAdaptive}(sol::Solver{RKSA}, state::RKState) +function next{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state) const timeout_const = 5 @@ -247,7 +247,7 @@ function stepsize_hw92!{T}(work, tableau ::TableauRKExplicit, dt ::T, timeout, - options ::Options) + options ::Options{T}) # Estimates the error and a new step size following Hairer & # Wanner 1992, p167 (with some modifications) # diff --git a/src/tableaus.jl b/src/tableaus.jl index ac105fa2d..6fe52b763 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -5,7 +5,7 @@ # Butcher Tableaus, or more generally coefficient tables # see Hairer & Wanner 1992, p. 134, 166 -abstract Tableau{Name, S, T<:Real} +abstract Tableau{T<:Real} # Name is the name of the tableau/method (a symbol) # S is the number of stages (an int) # T is the type of the coefficients @@ -30,7 +30,7 @@ abstract Tableau{Name, S, T<:Real} # | b_1 ... b_s this is the one used for stepping # | b'_1 ... b'_s this is the one used for error-checking -Base.eltype{N,S,T}(b::Tableau{N,S,T}) = T +Base.eltype{T}(b::Tableau{T}) = T order(b::Tableau) = b.order # Subtypes need to define a convert method to convert to a different # eltype with signature: @@ -43,7 +43,7 @@ Base.convert{Tnew<:Real}(::Type{Tnew}, tab::Tableau) = error("Define convert met # m3: these tableaus should go into rk.jl as they belong to R-K methods -immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} +immutable TableauRKExplicit{T} <: Tableau{T} order::(@compat(Tuple{Vararg{Int}})) # the order of the methods a::Matrix{T} # one or several row vectors. First row is used for the step, @@ -51,39 +51,42 @@ immutable TableauRKExplicit{Name, S, T} <: Tableau{Name, S, T} b::Matrix{T} c::Vector{T} isFSAL::Bool - function TableauRKExplicit(order,a,b,c) - @assert isa(S,Integer) - @assert isa(Name,Symbol) + s::Int + name :: AbstractString + function TableauRKExplicit(name,order,a,b,c) + s = length(c) @assert c[1]==0 @assert istril(a) - @assert S==length(c)==size(a,1)==size(a,2)==size(b,2) + @assert s==size(a,1)==size(a,2)==size(b,2) @assert size(b,1)==length(order) @assert norm(sum(a,2)-c'',Inf)fdjacobian(F!,t,y,J) to fdjacobian!(F!) -ExplicitODE{T,S<:AbstractVector}(t0::T, y0::S, F!::Function; - jac!::Function = (t,y,J)->fdjacobian!(F!,t,y,J)) = - ExplicitODE{T,S}(t0,y0,F!,jac!) +ExplicitODE{T,Y}(t0::T, y0::Y, F!::Function; + jac!::Function = forward_jacobian!(F!,similar(y0))) = + ExplicitODE{T,Y}(t0,y0,F!,jac!) + +function forward_jacobian!(F!,tmp) + (t,y,J)->ForwardDiff.jacobian!(J,(y,dy)->F!(t,y,dy),tmp,y) +end """ @@ -27,7 +33,7 @@ This type is not yet implemented, but will serve as an implicitly defined ODE (i.e. ODE of the form F(t,y,y')=0. """ -immutable ImplicitODE{T,S} <: AbstractODE +immutable ImplicitODE{T,Y} <: AbstractODE{T,Y} end @@ -38,10 +44,10 @@ Convert a out-of-place explicitly defined ODE function to an in-place function. Note, this does not help with memory allocations. """ -function explicit_ineff(t0, y0::AbstractVector, F::Function, jac) +function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function, jac) F!(t,y,dy) =copy!(dy,F(t,y)) jac!(t,y,J)=copy!(J,jac(t,y)) - return ExplicitODE(t0,y0,F!,jac!) + return ExplicitODE(t0,y0,F!,jac! = jac!) end # A temporary solution for handling scalars, should be faster then the @@ -50,10 +56,10 @@ end # and jac to vector functions F! and jac!. Still, solving this ODE # will result in a vector of length one result, so additional external # conversion is necessary. -function explicit_ineff(t0, y0, F::Function, jac) +function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function, jac) F!(t,y,dy) =(dy[1]=F(t,y[1])) jac!(t,y,J)=(J[1]=jac(t,y[1])) - return ExplicitODE(t0,[y0],F!,jac!) + return ExplicitODE(t0,[y0],F!,jac! = jac!) end @@ -62,7 +68,7 @@ end The abstract type of the actual algorithm to solve an ODE. """ -abstract AbstractStepper +abstract AbstractStepper{T} """ @@ -71,7 +77,7 @@ AbstractState keeps the temporary data (state) for the iterator Solver{::AbstractStepper}. """ -abstract AbstractState +abstract AbstractState{T,Y} # m3: # - docs @@ -128,7 +134,7 @@ Dense output options: - roottol TODO """ -type Options{T} # m3: T->Et +type Options{T} # stepper options initstep ::T tstop ::T @@ -142,7 +148,7 @@ type Options{T} # m3: T->Et isoutofdomain::Function # dense output options - tspan ::Vector{T} + tspan ::AbstractVector{T} points ::Symbol # m3: I think this should be an array of functions. Depending on some @@ -162,7 +168,7 @@ type Options{T} # m3: T->Et maxstep = 1/minstep, # TODO: we need a better guess here, possibly # overwrite it in the call to solve() - initstep = max(min(reltol,abstol,maxstep),minstep), + initstep = minstep, norm = Base.norm, maxiters = T(Inf), points = :all, @@ -195,18 +201,14 @@ of a numerical solution to an ODE. - options: options passed to the stepper """ -type Solver{T<:AbstractStepper} # TODO: immutable? - ode :: AbstractODE - stepper :: T - options :: Options +immutable Solver{O<:AbstractODE,S<:AbstractStepper,T} + ode :: O + stepper :: S + options :: Options{T} end - -# TODO: is this the right way to implement the mid level interface? -solve(ode, stepper; kargs...) = solve(ode, stepper, Options(kargs...)) - # filter the wrong combinations of ode and stepper -solve{T,S}(ode :: T, stepper :: S, options :: Options) = error("The $S doesn't support $T") +solve{T,S}(ode::T, stepper::S, options) = error("The $S doesn't support $T") # normally we return the working array, which changes at each step and From f9bb09ded495e796a83870877df4a0df77397746 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 26 Jun 2016 21:52:49 -0400 Subject: [PATCH 025/113] Added new tests by @mauro and more fixes --- src/dense.jl | 8 +-- src/runge-kutta.jl | 23 +++++--- test/interface-tests.jl | 4 ++ test/iterators.jl | 124 ++++++++++++++++++++++++++++++++++++++++ test/runtests.jl | 3 +- 5 files changed, 148 insertions(+), 14 deletions(-) create mode 100644 test/iterators.jl diff --git a/src/dense.jl b/src/dense.jl index 4ca96be38..9e8af6898 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -37,12 +37,12 @@ function start{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}) solver = s.stepper.solver t0 = solver.ode.t0 y0 = solver.ode.y0 - dy0 = deepcopy(y0) + dy0 = copy(y0) solver.ode.F!(t0,y0,dy0) - step0 = Step(t0,deepcopy(y0),deepcopy(dy0)) - step1 = Step(t0,deepcopy(y0),deepcopy(dy0)) + step0 = Step(t0,copy(y0),copy(dy0)) + step1 = Step(t0,copy(y0),copy(dy0)) solver_state = start(solver) - ytmp = deepcopy(y0) + ytmp = copy(y0) return DenseState(step0, step1, t0-1, true, solver_state, ytmp, false) end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 2491f2d7e..27d71fc90 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -104,8 +104,6 @@ end ##################### -# function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKState) -# function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state) function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state) step = state.step work = state.work @@ -133,9 +131,9 @@ end ######################## -function next{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state) +const timeout_const = 5 - const timeout_const = 5 +function next{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state) # the initial values dt = state.dt # dt is the previous stepisze, it is @@ -239,6 +237,7 @@ function rk_embedded_step!(work ::RKWorkArrays, work.ynew[d] = y[d] + dt*work.ynew[d] end + return nothing end @@ -246,7 +245,7 @@ function stepsize_hw92!{T}(work, last_step ::Step, tableau ::TableauRKExplicit, dt ::T, - timeout, + timeout ::Int, options ::Options{T}) # Estimates the error and a new step size following Hairer & # Wanner 1992, p167 (with some modifications) @@ -268,12 +267,17 @@ function stepsize_hw92!{T}(work, facmin = 1./facmax # maximal step size decrease. ? dof = length(last_step.y) - if findfirst(options.isoutofdomain,work.y) != 0 - return T(10), dt*facmin, timout_after_nan - end - # in-place calculate yerr./tol for d=1:dof + + # TODO: for some reason calling options.isoutofdomain + # generates a lot of allocations + + # if options.isoutofdomain(work.y[d]) + if isnan(work.y[d]) + return T(10), dt*facmin, timout_after_nan + end + y0 = last_step.y[d] # TODO: is this supposed to be the last successful step? y1 = work.ynew[d] # the approximation to the next step sci = (options.abstol + options.reltol*max(norm(y0),norm(y1))) @@ -311,4 +315,5 @@ function calc_next_k!(work ::RKWorkArrays, end end ode.F!(t + c[i]*dt, work.y, work.ks[i]) + return nothing end diff --git a/test/interface-tests.jl b/test/interface-tests.jl index ac2e554ec..75d174cf6 100644 --- a/test/interface-tests.jl +++ b/test/interface-tests.jl @@ -40,6 +40,10 @@ Base.zero(::CompSol) = zero(CompSol) # TODO: This is now an option and has to be passed to the # solvers. Looks ugly and a kind of a pain to handle. isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) +# TODO: We should decide on which version do we pick. The isnan +# variant seems to be causing less trouble (see the allocation comment +# in runge_kutta.jl). +Base.isnan(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) # Because the new RK solvers wrap scalars in an array and because of # https://github.com/JuliaLang/julia/issues/11053 these are also needed: diff --git a/test/iterators.jl b/test/iterators.jl new file mode 100644 index 000000000..33718cc1f --- /dev/null +++ b/test/iterators.jl @@ -0,0 +1,124 @@ +# Test sets [F, y0, tspan, analytic] +testsets_scalar = Vector[ + Any[(t,y)->6.0, 0., [0:.1:1;], (t,y)->6t], + Any[(t,y)->2t, 0., [0:.001:1;], (t,y)->t.^2], + Any[(t,y)->y, 1., [0:.001:1;], (t,y)->e.^t], + Any[(t,y)->y, 1., [1:-.001:0;], (t,y)->e.^(t-1)], + Any[(t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], + (t,y)->[cos(t)-2*sin(t) 2*cos(t)+sin(t)] ] +] + +testsets_vector = Vector[ + Any[(t,y,dy)-> dy[1]=6.0, [0.], [0:.1:1;], (t,y)->6t], + Any[(t,y,dy)-> dy[1]=2t, [0.], [0:.001:1;], (t,y)->t.^2], + Any[(t,y,dy)-> dy[1]=y[1], [1.], [0:.001:1;], (t,y)->e.^t], + Any[(t,y,dy)-> dy[1]=y[1], [1.], [1:-.001:0;], (t,y)->e.^(t-1)], + Any[(t,y,dy)->(dy[1]=-y[2]; dy[2]=y[1]), [1., 2.], [0:.001:2*pi;], + (t,y)->[cos(t)-2*sin(t) 2*cos(t)+sin(t)] ] +] + + +# Testing function ode +steppers = [ODE.RKStepperFixed{:feuler}, + ODE.RKStepperFixed{:midpoint}, + ODE.RKStepperFixed{:heun}, + ODE.RKStepperFixed{:rk4}, + ODE.RKStepperAdaptive{:rk21}, + ODE.RKStepperAdaptive{:rk23}, + ODE.RKStepperAdaptive{:rk45}, + ODE.RKStepperAdaptive{:dopri5}, + ODE.RKStepperAdaptive{:feh78}, + ODE.ModifiedRosenbrockStepper{} + ] + +# F,y0,tspan,ana = (1,1,1,1) +rks =1 +ts =1 +println("Testing `ode`") +function test_ode() + for rks in steppers + println("Testing $rks") + for ts in testsets_scalar + F,y0,tspan,ana = ts + t,y = ODE.ode(F,y0,tspan,rks{eltype(tspan)}()) + y = hcat(y...).' + @test maximum(abs(y-ana(t,y))) < tol + end + end +end + +function test_iterator_out_place() + # Testing the lower-level iteration API + println("\nTesting iterators") + for rks in steppers + println("Testing $rks") + for ts in testsets_scalar + F,y0,tspan,ana = ts + T = eltype(tspan) + stepper = rks{T}() + jac = ODE.forward_jacobian(F,y0) + equation = ODE.explicit_ineff(tspan[1],y0,F,jac) + opts = ODE.Options{T}( + tspan = tspan, + reltol = eps(T)^T(1//3)/10, + abstol = eps(T)^T(1//2)/10, + initstep = ODE.dtinit(F, y0, tspan, eps(T)^T(1//3)/10, eps(T)^T(1//2)/10, order=ODE.order(stepper)) + ) + solver = ODE.solve(equation,stepper,opts) + solution = collect(ODE.dense(solver)) + nn = length(solution) + t = Array(T,nn) + y = Array(typeof(y0),nn) + + for (n,(tt,yy)) in enumerate(solution) + t[n] = tt + y[n] = isa(y0,Number) ? yy[1] : yy + end + y = hcat(y...).' + @test maximum(abs(y-ana(t,y))) < tol + end + end +end + +# Testing the lower-level iteration API + +# Test sets with in-place F! [F!, y0, tspan, analytic] + +function test_iterator_in_place() + println("\nTesting iterators using in-place functions:") + + for rks in steppers + println("Testing $rks") + for ts in testsets_vector + F!,y0,tspan,ana = ts + T = eltype(tspan) + stepper = rks{T}() + # jac = ODE.forward_jacobian(F!,y0) + # jac! = (t,y,J) -> copy!(J,jac(t,y)) + equation = ODE.ExplicitODE(tspan[1],y0,F!) + opts = ODE.Options{T}( + tspan = tspan, + reltol = eps(T)^T(1//3)/10, + abstol = eps(T)^T(1//2)/10, + initstep = 0.001 + ) + solver = ODE.solve(equation,stepper,opts) + solution = collect(ODE.dense(solver)) + nn = length(solution) + t = Array(T,nn) + y = Array(typeof(y0),nn) + + for (n,(tt,yy)) in enumerate(solution) + t[n] = tt + y[n] = isa(y0,Number) ? yy[1] : yy + end + y = hcat(y...).' + @test maximum(abs(y-ana(t,y))) < tol + end + end +end + + +test_ode() +test_iterator_out_place() +test_iterator_in_place() diff --git a/test/runtests.jl b/test/runtests.jl index 1cf7bf393..739272ee4 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,7 +1,7 @@ using ODE using Base.Test -tol = 1e-2 +const tol = 1e-2 solvers = [ ## Non-stiff @@ -93,5 +93,6 @@ let @test norm(refsol-y[end], Inf) < 2e-10 end include("interface-tests.jl") +include("iterators.jl") println("All looks OK") From ec61459f3a96ba465dde10c6b9e9ce9a54ef8890 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 26 Jun 2016 22:22:20 -0400 Subject: [PATCH 026/113] Improved type stability for ode23s --- src/ode23s.jl | 39 ++++++++++++++------------------------- src/runge-kutta.jl | 2 +- 2 files changed, 15 insertions(+), 26 deletions(-) diff --git a/src/ode23s.jl b/src/ode23s.jl index 4e5e760f7..eaf9519f0 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -4,8 +4,8 @@ # [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper - d :: T - e32 :: T + d ::T + e32::T function ModifiedRosenbrockStepper() d = T(1/(2 + sqrt(2))) @@ -36,18 +36,18 @@ The state for the Rosenbrock stepper - iters: Number of successful steps made """ -type RosenbrockState{T,S} <: AbstractState - step ::Step{T,S} +type RosenbrockState{T,Y} <: AbstractState + step ::Step{T,Vector{Y}} dt ::T - F1 ::S - F2 ::S - J # :: ? + F1 ::Vector{Y} + F2 ::Vector{Y} + J ::Matrix{Y} iters::Int end # for debugging -function show(io::IO, state :: RosenbrockState) +function show(io::IO, state::RosenbrockState) show(io,state.step) println("dt =$(state.dt)") println("F1 =$(state.F1)") @@ -56,7 +56,7 @@ function show(io::IO, state :: RosenbrockState) end -function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s :: Solver{O,S}) +function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) t = s.ode.t0 dt = s.options.initstep y = s.ode.y0 @@ -64,7 +64,7 @@ function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s :: Solver{O,S}) J = Array(eltype(y),length(y),length(y)) - step = Step(t,deepcopy(y),deepcopy(dy)) + step = Step(t,copy(y),copy(dy)) state = RosenbrockState(step, dt, zero(y), # F1 @@ -104,18 +104,7 @@ function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state # trim the step size to match the bounds of integration dt = min(s.options.tstop-t,dt) - # TODO: this should go to a specialized function for type stabilty sake - # maybe make W a part of ExplicitODE? Same for tder below? - if size(J,1) == 1 - W = one(J) - dt*d*J - else - # note: if there is a mass matrix M on the lhs of the ODE, i.e., - # M * dy/dt = F(t,y) - # we can simply replace eye(J) by M in the following expression - # (see Sec. 5 in [SR97]) - - W = lufact( eye(J) - dt*d*J ) - end + W = lufact!( eye(J) - dt*d*J ) # Approximate time-derivative of F, we are using F1 as a # temporary array @@ -131,11 +120,11 @@ function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state ode.F!(t+dt, ynew, F2) k3 = W \ (F2 - e32*(k2 - F1) - 2*(k1 - F0) + tder ) - delta = max(opts.reltol*max(opts.norm(y), - opts.norm(ynew)), + delta = max(opts.reltol*max(opts.norm(y)::eltype(y), + opts.norm(ynew)::eltype(y)), opts.abstol) # allowable error - err = (dt/6)*opts.norm(k1 - 2*k2 + k3)/delta # error estimate + err = (dt/6)*(opts.norm(k1 - 2*k2 + k3)::eltype(y))/delta # error estimate # upon a failed step decrease the step size dtnew = min(opts.maxstep, diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 27d71fc90..df7744ab0 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -273,7 +273,7 @@ function stepsize_hw92!{T}(work, # TODO: for some reason calling options.isoutofdomain # generates a lot of allocations - # if options.isoutofdomain(work.y[d]) + # if options.isoutofdomain(work.y[d])::Bool if isnan(work.y[d]) return T(10), dt*facmin, timout_after_nan end From 7cc07ac286247382603d166352e0553d31bd5f3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 30 Jun 2016 12:16:34 +0200 Subject: [PATCH 027/113] Tests for the jac keyword argument --- test/runtests.jl | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/runtests.jl b/test/runtests.jl index 739272ee4..190d0bb33 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -34,12 +34,16 @@ for solver in solvers # we need to fix initstep for the fixed-step methods t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) @test maximum(abs(y-6t)) < tol + t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, jac = (t,y)->0.) + @test maximum(abs(y-6t)) < tol # dy # -- = 2t ==> y = t.^2 # dt t,y=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) @test maximum(abs(y-t.^2)) < tol + t,y=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, jac = (t,y)->0.) + @test maximum(abs(y-t.^2)) < tol # test typeof(tspan)==Vector{Int} does not throw @test_throws ErrorException t,y=solver((t,y)->2y, 0., [0,1]) @@ -55,9 +59,13 @@ for solver in solvers # dt t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001) @test maximum(abs(y-e.^t)) < tol + t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, jac = (t,y)->1.) + @test maximum(abs(y-e.^t)) < tol t,y=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001) @test maximum(abs(y-e.^(t-1))) < tol + t,y=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, jac = (t,y)->1.) + @test maximum(abs(y-e.^(t-1))) < tol # dv dw # -- = -w, -- = v ==> v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) @@ -67,6 +75,9 @@ for solver in solvers t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol + t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, jac=(t,y)->Float64[[0,1] [-1,0]]) + ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} + @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol end # Test negative starting times ODE.ode23s From 7a082b186fd5d10c1c01056fe94a8cae04621bca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 30 Jun 2016 14:52:40 +0200 Subject: [PATCH 028/113] Better tests and fixed a jacobian bug --- src/helpers.jl | 10 --- src/interfaces.jl | 9 +- src/rosenbrock.jl | 10 +++ src/types.jl | 12 +-- test/iterators.jl | 204 ++++++++++++++++++++++++---------------------- test/runtests.jl | 46 ++++++----- 6 files changed, 157 insertions(+), 134 deletions(-) diff --git a/src/helpers.jl b/src/helpers.jl index 35e031cd0..7270fe015 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -67,13 +67,3 @@ function findroot(f,rng,eps) return (xr+xl)/2 end - - -# generate a jacobian using ForwardDiff -function forward_jacobian(F,y0::AbstractArray) - (t,y)->ForwardDiff.jacobian(y->F(t,y),y) -end - -function forward_jacobian(F,y0) - (t,y)->ForwardDiff.derivative(y->F(t,y),y) -end diff --git a/src/interfaces.jl b/src/interfaces.jl index ea211ca26..9bd987cbb 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -6,7 +6,6 @@ tspan[end] is the last integration time. """ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; - jac = forward_jacobian(F,y0), # we need these options explicitly for the dtinit reltol::T = eps(T)^T(1//3)/10, abstol::T = eps(T)^T(1//2)/10, @@ -16,7 +15,7 @@ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractSteppe t0 = tspan[1] # construct a solver - equation = explicit_ineff(t0,y0,F,jac) + equation = explicit_ineff(t0,y0,F;kargs...) opts = Options{T}(; tspan = tspan, @@ -57,6 +56,12 @@ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractSteppe return (tn,yn) end +""" + ODE.odeXX(F,y0,t0;kargs...) + +Solves an ODE `y'=F(t,y)` with initial conditions `y0` and `t0`. +""" + ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockStepper; kargs...) ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:feuler}; kargs...) ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:midpoint}; kargs...) diff --git a/src/rosenbrock.jl b/src/rosenbrock.jl index 8bcba8e6e..27b4d886d 100644 --- a/src/rosenbrock.jl +++ b/src/rosenbrock.jl @@ -1,3 +1,13 @@ +# generate a jacobian using ForwardDiff +function forward_jacobian(F,y0::AbstractArray) + (t,y)->ForwardDiff.jacobian(y->F(t,y),y) +end + +function forward_jacobian(F,y0) + (t,y)->ForwardDiff.derivative(y->F(t,y),y) +end + + #ODEROSENBROCK Solve stiff differential equations, Rosenbrock method # with provided coefficients. function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, diff --git a/src/types.jl b/src/types.jl index 52289566f..62fe6f432 100644 --- a/src/types.jl +++ b/src/types.jl @@ -20,11 +20,11 @@ immutable ExplicitODE{T,Y} <: AbstractODE{T,Y} end ExplicitODE{T,Y}(t0::T, y0::Y, F!::Function; - jac!::Function = forward_jacobian!(F!,similar(y0))) = + jac!::Function = forward_jacobian!(F!,similar(y0)), kargs...) = ExplicitODE{T,Y}(t0,y0,F!,jac!) function forward_jacobian!(F!,tmp) - (t,y,J)->ForwardDiff.jacobian!(J,(y,dy)->F!(t,y,dy),tmp,y) + (t,y,J)->ForwardDiff.jacobian!(J,(dy,y)->F!(t,y,dy),tmp,y) end """ @@ -44,10 +44,10 @@ Convert a out-of-place explicitly defined ODE function to an in-place function. Note, this does not help with memory allocations. """ -function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function, jac) +function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function; kargs...) F!(t,y,dy) =copy!(dy,F(t,y)) jac!(t,y,J)=copy!(J,jac(t,y)) - return ExplicitODE(t0,y0,F!,jac! = jac!) + return ExplicitODE(t0,y0,F!;kargs...) end # A temporary solution for handling scalars, should be faster then the @@ -56,10 +56,10 @@ end # and jac to vector functions F! and jac!. Still, solving this ODE # will result in a vector of length one result, so additional external # conversion is necessary. -function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function, jac) +function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function; kargs...) F!(t,y,dy) =(dy[1]=F(t,y[1])) jac!(t,y,J)=(J[1]=jac(t,y[1])) - return ExplicitODE(t0,[y0],F!,jac! = jac!) + return ExplicitODE(t0,[y0],F!;kargs...) end diff --git a/test/iterators.jl b/test/iterators.jl index 33718cc1f..fab084941 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -1,21 +1,50 @@ -# Test sets [F, y0, tspan, analytic] -testsets_scalar = Vector[ - Any[(t,y)->6.0, 0., [0:.1:1;], (t,y)->6t], - Any[(t,y)->2t, 0., [0:.001:1;], (t,y)->t.^2], - Any[(t,y)->y, 1., [0:.001:1;], (t,y)->e.^t], - Any[(t,y)->y, 1., [1:-.001:0;], (t,y)->e.^(t-1)], - Any[(t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], - (t,y)->[cos(t)-2*sin(t) 2*cos(t)+sin(t)] ] -] - -testsets_vector = Vector[ - Any[(t,y,dy)-> dy[1]=6.0, [0.], [0:.1:1;], (t,y)->6t], - Any[(t,y,dy)-> dy[1]=2t, [0.], [0:.001:1;], (t,y)->t.^2], - Any[(t,y,dy)-> dy[1]=y[1], [1.], [0:.001:1;], (t,y)->e.^t], - Any[(t,y,dy)-> dy[1]=y[1], [1.], [1:-.001:0;], (t,y)->e.^(t-1)], - Any[(t,y,dy)->(dy[1]=-y[2]; dy[2]=y[1]), [1., 2.], [0:.001:2*pi;], - (t,y)->[cos(t)-2*sin(t) 2*cos(t)+sin(t)] ] -] +testsets = [ + Dict( + :F! => (t,y,dy)->dy[1]=6.0, + :y0 => [0.], + :tspan => [0:0.1:1;], + :jac => (t,y,dy)->dy[1]=0.0, + :sol => t->[6t], + :isscalar => true, + :name => "y'=6t", + :initstep => 0.1), + Dict( + :F! => (t,y,dy)->dy[1]=2t, + :y0 => [0.], + :tspan => [0:0.001:1;], + :jac => (t,y,dy)->dy[1]=0.0, + :sol => t->[t^2], + :isscalar => true, + :name => "y'=2t", + :initstep => 0.001), + Dict( + :F! => (t,y,dy)->dy[1]=y[1], + :y0 => [1.0], + :tspan => [0:0.001:1;], + :jac => (t,y,dy)->dy[1]=1.0, + :sol => t->[exp(t)], + :isscalar => true, + :name => "y'=y", + :initstep => 0.001), + Dict( + :F! => (t,y,dy)->dy[1]=y[1], + :y0 => [1.0], + :tspan => [1:-0.001:0;], + :jac => (t,y,dy)->dy[1]=1.0, + :sol => t->[exp(t-1)], + :isscalar => true, + :name => "y'=y backwards", + :initstep => 0.001), + Dict( + :F! => (t,y,dy)->(dy[1]=-y[2];dy[2]=y[1]), + :y0 => [1.0,2.0], + :tspan => [0:.1:1;], + :jac => (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]]), + :sol => t->[cos(t)-2*sin(t) 2*cos(t)+sin(t)], + :isscalar => false, + :name => "pendulum", + :initstep => 0.001) + ] # Testing function ode @@ -29,96 +58,79 @@ steppers = [ODE.RKStepperFixed{:feuler}, ODE.RKStepperAdaptive{:dopri5}, ODE.RKStepperAdaptive{:feh78}, ODE.ModifiedRosenbrockStepper{} - ] +] -# F,y0,tspan,ana = (1,1,1,1) -rks =1 -ts =1 -println("Testing `ode`") function test_ode() - for rks in steppers - println("Testing $rks") - for ts in testsets_scalar - F,y0,tspan,ana = ts - t,y = ODE.ode(F,y0,tspan,rks{eltype(tspan)}()) - y = hcat(y...).' - @test maximum(abs(y-ana(t,y))) < tol - end - end -end + tol = 0.002 -function test_iterator_out_place() - # Testing the lower-level iteration API - println("\nTesting iterators") for rks in steppers println("Testing $rks") - for ts in testsets_scalar - F,y0,tspan,ana = ts - T = eltype(tspan) - stepper = rks{T}() - jac = ODE.forward_jacobian(F,y0) - equation = ODE.explicit_ineff(tspan[1],y0,F,jac) - opts = ODE.Options{T}( - tspan = tspan, - reltol = eps(T)^T(1//3)/10, - abstol = eps(T)^T(1//2)/10, - initstep = ODE.dtinit(F, y0, tspan, eps(T)^T(1//3)/10, eps(T)^T(1//2)/10, order=ODE.order(stepper)) - ) - solver = ODE.solve(equation,stepper,opts) - solution = collect(ODE.dense(solver)) - nn = length(solution) - t = Array(T,nn) - y = Array(typeof(y0),nn) - - for (n,(tt,yy)) in enumerate(solution) - t[n] = tt - y[n] = isa(y0,Number) ? yy[1] : yy - end - y = hcat(y...).' - @test maximum(abs(y-ana(t,y))) < tol - end - end -end + for ts in testsets + println("Testing problem $(ts[:name])") -# Testing the lower-level iteration API + tspan, h0, stepper = ts[:tspan], ts[:initstep], rks{eltype(ts[:tspan])}() -# Test sets with in-place F! [F!, y0, tspan, analytic] + y0, F!, jac!, sol = ts[:y0], ts[:F!], ts[:jac], ts[:sol] -function test_iterator_in_place() - println("\nTesting iterators using in-place functions:") + F(t,y) = (dy = similar(y); F!(t,y,dy); return dy) - for rks in steppers - println("Testing $rks") - for ts in testsets_vector - F!,y0,tspan,ana = ts - T = eltype(tspan) - stepper = rks{T}() - # jac = ODE.forward_jacobian(F!,y0) - # jac! = (t,y,J) -> copy!(J,jac(t,y)) - equation = ODE.ExplicitODE(tspan[1],y0,F!) - opts = ODE.Options{T}( - tspan = tspan, - reltol = eps(T)^T(1//3)/10, - abstol = eps(T)^T(1//2)/10, - initstep = 0.001 - ) - solver = ODE.solve(equation,stepper,opts) - solution = collect(ODE.dense(solver)) - nn = length(solution) - t = Array(T,nn) - y = Array(typeof(y0),nn) - - for (n,(tt,yy)) in enumerate(solution) - t[n] = tt - y[n] = isa(y0,Number) ? yy[1] : yy + for points = [:specified, :all] + if ts[:isscalar] + # test the ODE.odeXX scalar interface (if the equation is scalar) + Fscal = (t,y)->F(t,[y])[1] + y0scal = y0[1] + # with jacobian + tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,jac! = jac!) + @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol + # without jacobian + t,y = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0) + @test_approx_eq_eps y map(x->sol(x)[1],tj) tol + + # results with and without jacobian should be exactly the same + @test_approx_eq yj y + + if points == :specified + # test if we covered the whole timespan + @test length(tspan) == length(t) == length(tj) + @test_approx_eq tspan t + @test_approx_eq tspan tj + end + end + + # ODE.odeXX vector interface + # with jacobian + tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,jac! = jac!) + @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol + # without jacobian + t,y = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0) + @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol + + @test_approx_eq hcat(yj...) hcat(y...) + + if points == :specified + # test if we covered the whole timespan + @test length(tspan) == length(t) == length(tj) + @test_approx_eq tspan t + @test_approx_eq tspan tj + end + + # test the iterator interface (they only support forward time integration) + if issorted(tspan) + equation = ODE.ExplicitODE(tspan[1],y0,F!) + opts = ODE.Options{eltype(tspan)}(tspan = tspan,initstep = h0,points = points) + solver = ODE.solve(equation,stepper,opts) + + for (t,y) in solver + @test_approx_eq_eps y sol(t) tol + end + + for (t,y) in ODE.dense(solver) + @test_approx_eq_eps y sol(t) tol + end + end end - y = hcat(y...).' - @test maximum(abs(y-ana(t,y))) < tol end end end - test_ode() -test_iterator_out_place() -test_iterator_in_place() diff --git a/test/runtests.jl b/test/runtests.jl index 190d0bb33..740da6fea 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -28,44 +28,40 @@ solvers = [ for solver in solvers println("using $solver") + # dy # -- = 6 ==> y = 6t # dt # we need to fix initstep for the fixed-step methods t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) @test maximum(abs(y-6t)) < tol - t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, jac = (t,y)->0.) - @test maximum(abs(y-6t)) < tol + tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, jac! = (t,y,dy)->dy[1]=0.0) + @test maximum(abs(yj-6tj)) < tol + @test norm(map(norm,yj-y,Inf)) y = t.^2 # dt t,y=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) @test maximum(abs(y-t.^2)) < tol - t,y=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, jac = (t,y)->0.) - @test maximum(abs(y-t.^2)) < tol - - # test typeof(tspan)==Vector{Int} does not throw - @test_throws ErrorException t,y=solver((t,y)->2y, 0., [0,1]) - # test typeof(y0)==Vector{Int} does not throw - @test_throws ErrorException t,y=solver((t,y)->[2y], [0], [0,1]) - # test typeof(y0)==Int does not throw - @test_throws ErrorException t,y=solver((t,y)->2y, 0, [0,1]) - # test if we can deal with a mixed case - @test_throws ErrorException t,y=solver((t,y)->2y, Number[1,1.1,BigInt(1)], Rational[0,1]) + tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, jac! = (t,y,dy)->dy[1]=0.0) + @test maximum(abs(yj-tj.^2)) < tol + @test norm(map(norm,yj-y,Inf)) y = y0*e.^t # dt t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001) @test maximum(abs(y-e.^t)) < tol - t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, jac = (t,y)->1.) - @test maximum(abs(y-e.^t)) < tol + tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, jac! = (t,y,dy)->dy[1]=1.0) + @test maximum(abs(yj-e.^tj)) < tol + @test norm(map(norm,yj-y,Inf))y, 1., [1:-.001:0;], initstep=0.001) @test maximum(abs(y-e.^(t-1))) < tol - t,y=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, jac = (t,y)->1.) - @test maximum(abs(y-e.^(t-1))) < tol + tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, jac! = (t,y,dy)->dy[1]=1.0) + @test maximum(abs(yj-e.^(tj-1))) < tol + @test norm(map(norm,yj-y,Inf)) v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) @@ -75,9 +71,19 @@ for solver in solvers t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol - t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, jac=(t,y)->Float64[[0,1] [-1,0]]) - ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} - @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol + tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, jac! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) + ysj = hcat(yj...).' # convert Vector{Vector{Float}} to Matrix{Float} + @test maximum(abs(ysj-[cos(tj)-2*sin(tj) 2*cos(tj)+sin(tj)])) < tol + @test norm(map(norm,yj-y,Inf))2y, 0., [0,1]) + # test typeof(y0)==Vector{Int} does not throw + @test_throws ErrorException t,y=solver((t,y)->[2y], [0], [0,1]) + # test typeof(y0)==Int does not throw + @test_throws ErrorException t,y=solver((t,y)->2y, 0, [0,1]) + # test if we can deal with a mixed case + @test_throws ErrorException t,y=solver((t,y)->2y, Number[1,1.1,BigInt(1)], Rational[0,1]) end # Test negative starting times ODE.ode23s From 2a52053c3cb407d57f12939227a8e682bc77d2e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 1 Jul 2016 12:53:57 +0200 Subject: [PATCH 029/113] Improved convert for RKTableau --- src/runge-kutta.jl | 2 +- src/tableaus.jl | 27 +++++---------------------- 2 files changed, 6 insertions(+), 23 deletions(-) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index df7744ab0..64ae0401f 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -14,7 +14,7 @@ or an adaptive step algorithm). immutable RKStepper{Kind,Name,T} <: AbstractStepper{T} tableau::TableauRKExplicit{T} function RKStepper() - tab = convert(T,tableaus_rk_explicit[Name]) + tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) if Kind == :fixed && isadaptive(tab) error("Cannot construct a fixed step method from an adaptive step tableau") elseif Kind == :adaptive && !isadaptive(tab) diff --git a/src/tableaus.jl b/src/tableaus.jl index 6fe52b763..a8045f4b1 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -10,8 +10,6 @@ abstract Tableau{T<:Real} # S is the number of stages (an int) # T is the type of the coefficients # -# TODO: have a type parameter which specifies adaptive vs non-adaptive -# # For all types of tableaus it assumes fields: # order::(Int...) # order of the method(s) # @@ -78,28 +76,13 @@ function TableauRKExplicit(name::AbstractString, order::(@compat(Tuple{Vararg{In convert(Matrix{T},b), convert(Vector{T},c) ) end - -# TODO: remove conv_field -conv_field{T,N}(D,a::Array{T,N}) = convert(Array{D,N}, a) - - lengthks(tab::TableauRKExplicit) = length(tab.c) -# TODO: there should be a better way to do it -function Base.convert{Tnew<:Real,T}(::Type{Tnew}, tab::TableauRKExplicit{T}) - # Converts the tableau coefficients to the new type Tnew - newflds = () - @compat for n in [:a,:b,:c] - fld = getfield(tab,n) - if eltype(fld)==T - newflds = tuple(newflds..., conv_field(Tnew, fld)) - else - newflds = tuple(newflds..., fld) - end - end - TableauRKExplicit{Tnew}(tab.name, tab.order, newflds...) # TODO: could this be done more generically in a type-stable way? -end - +Base.convert{Tnew<:Real,T}(::Type{TableauRKExplicit{Tnew}}, tab::TableauRKExplicit{T}) = + TableauRKExplicit{Tnew}(tab.name, tab.order, + convert(Matrix{Tnew},tab.a), + convert(Matrix{Tnew},tab.b), + convert(Vector{Tnew},tab.c)) isexplicit(b::TableauRKExplicit) = istril(b.a) # Test whether it's an explicit method isadaptive(b::TableauRKExplicit) = size(b.b, 1)==2 From 6f057b784262d2f79997d30bfb98250f22dab708 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 13 Jul 2016 15:04:35 +0200 Subject: [PATCH 030/113] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a9b25ee7..c7c04982e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ Various basic Ordinary Differential Equation solvers implemented in Julia. +[![Join the chat at https://gitter.im/pwl/ODE.jl](https://badges.gitter.im/pwl/ODE.jl.svg)](https://gitter.im/pwl/ODE.jl?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://travis-ci.org/JuliaLang/ODE.jl.svg?branch=master)](https://travis-ci.org/JuliaLang/ODE.jl) [![Coverage Status](https://img.shields.io/coveralls/JuliaLang/ODE.jl.svg)](https://coveralls.io/r/JuliaLang/ODE.jl) [![ODE](http://pkg.julialang.org/badges/ODE_0.3.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.3) @@ -9,7 +10,6 @@ Pull requests are always highly welcome to fix bugs, add solvers, or anything el # API discussions -[![Join the chat at https://gitter.im/pwl/ODE.jl](https://badges.gitter.im/pwl/ODE.jl.svg)](https://gitter.im/pwl/ODE.jl?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) There are currently discussions about how the Julian API for ODE solvers should look like, and the current documentation is more like a wishlist than a documentation. The API has changed considerably since the initial v0.1 release, so be carefull when you upgrade to v0.2 or later versions. # Current status of the project From 210a81e1d3dda0a0af7f67b833cfa3f8e25daea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 17 Jul 2016 13:55:06 +0200 Subject: [PATCH 031/113] Minor fixes --- src/helpers.jl | 2 +- src/runge-kutta.jl | 2 +- src/types.jl | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/helpers.jl b/src/helpers.jl index 7270fe015..870cc85b2 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -6,7 +6,7 @@ Chooses an initial step-size basing on the equation, initial data, time span and the order of the method of integration. """ -function dtinit{T}(F, y0, tspan::Vector{T}, reltol, abstol; order = 1) +function dtinit{T}(F, y0, tspan::AbstractVector{T}, reltol, abstol; order = 1) t0 = abs(tspan[1]) tstop = abs(tspan[end]) tau = max(reltol*norm(y0, Inf), abstol) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 64ae0401f..f239c4fe1 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -285,7 +285,7 @@ function stepsize_hw92!{T}(work, end # TOOD: should we use options.norm here as well? - err = norm(work.yerr) # Eq. 4.11 + err = options.norm(work.yerr) # Eq. 4.11 newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1/(ord+1)))) # Eq 4.13 modified if timeout > 0 diff --git a/src/types.jl b/src/types.jl index 62fe6f432..fd4d3d84f 100644 --- a/src/types.jl +++ b/src/types.jl @@ -24,7 +24,8 @@ ExplicitODE{T,Y}(t0::T, y0::Y, F!::Function; ExplicitODE{T,Y}(t0,y0,F!,jac!) function forward_jacobian!(F!,tmp) - (t,y,J)->ForwardDiff.jacobian!(J,(dy,y)->F!(t,y,dy),tmp,y) + jac!(t,y,J)=ForwardDiff.jacobian!(J,(dy,y)->F!(t,y,dy),tmp,y) + return jac! end """ From af2dc465c8d43b546201fb08015a428ae7727ce9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 17 Jul 2016 15:20:04 +0200 Subject: [PATCH 032/113] New ODE types --- src/helpers.jl | 18 +++++++++ src/interfaces.jl | 33 ++++++++++++++-- src/ode23s.jl | 6 +-- src/rosenbrock.jl | 10 ----- src/types.jl | 98 +++++++++++++++++++++++++---------------------- test/iterators.jl | 4 +- test/runtests.jl | 10 ++--- 7 files changed, 111 insertions(+), 68 deletions(-) diff --git a/src/helpers.jl b/src/helpers.jl index 870cc85b2..31419fafd 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -67,3 +67,21 @@ function findroot(f,rng,eps) return (xr+xl)/2 end + +# generate a jacobian using ForwardDiff +function forward_jacobian(F,y0::AbstractArray) + (t,y)->ForwardDiff.jacobian(y->F(t,y),y) +end + +function forward_jacobian(F,y0) + (t,y)->ForwardDiff.derivative(y->F(t,y),y) +end + +function forward_jacobian!(F!,tmp) + jac!(t,y,J)=ForwardDiff.jacobian!(J,(dy,y)->F!(t,y,dy),tmp,y) + return jac! +end + +function forward_jacobian_implicit!(F!,tmp) + error("Not implemented yet") +end diff --git a/src/interfaces.jl b/src/interfaces.jl index 9bd987cbb..16031cfcc 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -10,12 +10,13 @@ function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractSteppe reltol::T = eps(T)^T(1//3)/10, abstol::T = eps(T)^T(1//2)/10, initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper))::T, + jac = forward_jacobian(F,copy(y0)), kargs...) t0 = tspan[1] # construct a solver - equation = explicit_ineff(t0,y0,F;kargs...) + equation = explicit_ineff(t0,y0,F,jac) opts = Options{T}(; tspan = tspan, @@ -115,12 +116,12 @@ function reverse_time(sol::Solver) # TODO: is that how the jacobian changes? function jac_reverse!(t,y,J) - ode.jac!(2*t0-t,y,J) + ode.J!(2*t0-t,y,J) J[:]=-J end # ExplicitODE is immutable - ode_reversed = ExplicitODE(t0,y0,F_reverse!,jac! = jac_reverse!) + ode_reversed = ExplicitODE(t0,y0,F_reverse!,J! = jac_reverse!) stopevent = options.stopevent # TODO: we are modifying options here, should we construct new @@ -130,3 +131,29 @@ function reverse_time(sol::Solver) options.stopevent = (t,y)->stopevent(2*t0-t,y) return solve(ode_reversed,stepper,options) end + + +""" + +Convert a out-of-place explicitly defined ODE function to +ExplicitODE. As the name suggests, the result is not going to be very +efficient. + +""" +function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function, jac::Function) + F!(t,y,dy) =copy!(dy,F(t,y)) + jac!(t,y,J)=copy!(J,jac(t,y)) + return ExplicitODE(t0,y0,F!; J! = jac!) +end + +# A temporary solution for handling scalars, should be faster then the +# previous implementation. Should be used only at the top level +# interface. This function cheats by converting scalar functions F +# and jac to vector functions F! and jac!. Still, solving this ODE +# will result in a vector of length one result, so additional external +# conversion is necessary. +function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function, jac) + F!(t,y,dy) =(dy[1]=F(t,y[1])) + jac!(t,y,J)=(J[1]=jac(t,y[1])) + return ExplicitODE(t0,[y0],F!; J! = jac!) +end diff --git a/src/ode23s.jl b/src/ode23s.jl index eaf9519f0..82b3df7a7 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -73,7 +73,7 @@ function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) 0) # iters # initialize the derivative and the Jacobian s.ode.F!(t,y,step.dy) - s.ode.jac!(t,y,state.J) + s.ode.J!(t,y,state.J) return state end @@ -89,7 +89,7 @@ function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state F1, F2, J = state.F1, state.F2, state.J t, dt, y, dy = step.t, state.dt, step.y, step.dy - # F!, jac! = ode.F!, ode.jac! + # F!, J! = ode.F!, ode.J! d, e32 = stepper.d, stepper.e32 F0 = dy @@ -138,7 +138,7 @@ function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state state.dt = dtnew step.y[:] = ynew step.dy[:] = F2 - ode.jac!(step.t,step.y,J) + ode.J!(step.t,step.y,J) return ((step.t,step.y), state) else diff --git a/src/rosenbrock.jl b/src/rosenbrock.jl index 27b4d886d..8bcba8e6e 100644 --- a/src/rosenbrock.jl +++ b/src/rosenbrock.jl @@ -1,13 +1,3 @@ -# generate a jacobian using ForwardDiff -function forward_jacobian(F,y0::AbstractArray) - (t,y)->ForwardDiff.jacobian(y->F(t,y),y) -end - -function forward_jacobian(F,y0) - (t,y)->ForwardDiff.derivative(y->F(t,y),y) -end - - #ODEROSENBROCK Solve stiff differential equations, Rosenbrock method # with provided coefficients. function oderosenbrock{Ty,T}(F, x0::Ty, tspan::AbstractVector{T}, diff --git a/src/types.jl b/src/types.jl index fd4d3d84f..c6ed367de 100644 --- a/src/types.jl +++ b/src/types.jl @@ -1,68 +1,75 @@ -abstract AbstractODE{T,Y} +abstract AbstractIVP{T,Y} """ -Explicitly defined ODE of form dy = F(t,y). -Fields: +Defines the mathematical part of an IVP (initial value problem) +specified in the general form: + +`F(t, y) = G(t, y, dy)` with `y(t0)= y0` + +Depending on the combination of the parameters this type can represent +a wide range of problems, including ODE, DAE and IMEX. Nevertheless +not all solvers will support any combinations of `F` and `G`. Note +that not specifying `G` amounts to `G=dy/dt`. + + +- `tspan` -- tuple `(start_t,end_t)` +- `y0` -- initial condition +- `F!` -- in-place `F` function `F!(t,y,res)`. If `F=0` set to `nothing`. +- `G!` -- in-place `G` function `G!(t,y,dy,res)`. If `G=dy/dt` then + set to `nothing` (or `dy` if the solver supports this). Can + also be a mass matrix for a RHS `M dy/dt` +- `J!` -- in-place Jacobian function `J!(t,y,dy,res)`. + +TODO: how to fit the sparsity pattern in J? -- t0, y0: initial conditions -- F!: ODE function `F!(t,y,dy)` which modifies `dy` in-place -- jac!: TODO """ -immutable ExplicitODE{T,Y} <: AbstractODE{T,Y} +type IVP{T,Y,F,G,J} <: AbstractIVP{T,Y} t0 ::T y0 ::Y - F! ::Function - jac!::Function - function ExplicitODE(t0::T, y0::Y, F!::Function, jac!::Function) - new(t0,y0,F!,jac!) - end + dy0 ::Y + F! ::F + G! ::G + J! ::J end -ExplicitODE{T,Y}(t0::T, y0::Y, F!::Function; - jac!::Function = forward_jacobian!(F!,similar(y0)), kargs...) = - ExplicitODE{T,Y}(t0,y0,F!,jac!) +""" -function forward_jacobian!(F!,tmp) - jac!(t,y,J)=ForwardDiff.jacobian!(J,(dy,y)->F!(t,y,dy),tmp,y) - return jac! -end +Explicit ODE representing the problem -""" +`dy = F(t,y)` with `y(t0)=y0` -This type is not yet implemented, but will serve as an implicitly -defined ODE (i.e. ODE of the form F(t,y,y')=0. +- t0, y0: initial conditions +- F!: in place version of `F` called by `F!(t,y,dy)` +- J!: (optional) computes `J=dF/dy` in place, called with `J!(t,y,J)` """ -immutable ImplicitODE{T,Y} <: AbstractODE{T,Y} -end +typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} +@compat (::Type{ExplicitODE}){T,Y}(t0::T, + y0::Y, + F!::Function; + J!::Function = forward_jacobian!(F!,similar(y0))) = + ExplicitODE{T,Y}(t0,y0,similar(y0),F!,nothing,J!) """ -Convert a out-of-place explicitly defined ODE function to an in-place function. - -Note, this does not help with memory allocations. +Implicit ODE representing the problem -""" -function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function; kargs...) - F!(t,y,dy) =copy!(dy,F(t,y)) - jac!(t,y,J)=copy!(J,jac(t,y)) - return ExplicitODE(t0,y0,F!;kargs...) -end +`F(t,y,dy)=0` with `y(t0)=y0` and optionally `y'(t0)=dy0` -# A temporary solution for handling scalars, should be faster then the -# previous implementation. Should be used only at the top level -# interface. This function cheats by converting scalar functions F -# and jac to vector functions F! and jac!. Still, solving this ODE -# will result in a vector of length one result, so additional external -# conversion is necessary. -function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function; kargs...) - F!(t,y,dy) =(dy[1]=F(t,y[1])) - jac!(t,y,J)=(J[1]=jac(t,y[1])) - return ExplicitODE(t0,[y0],F!;kargs...) -end +- t0, y0: initial conditions +- F!: in place version of `F` called by `F!(t,y,dy)` +- J!: (optional) computes `J=dF/dy+a*dF/dy'` for prescribed `a`, called with `J!(t,y,dy,a)` +""" +typealias ImplicitODE{T,Y} IVP{T,Y,Void,Function,Function} +@compat (::Type{ImplicitODE}){T,Y}(t0::T, + y0::Y, + G!::Function; + J!::Function = forward_jacobian_implicit!(F!,similar(y0)), + dy0::Y = zero(y0)) = + ImplicitODE{T,Y}(t0,y0,dy0,nothing,G!,J!) """ @@ -192,6 +199,7 @@ function show{T}(io::IO, opts :: Options{T}) end end + """ This is an iterable type, each call to next(...) produces a next step @@ -202,7 +210,7 @@ of a numerical solution to an ODE. - options: options passed to the stepper """ -immutable Solver{O<:AbstractODE,S<:AbstractStepper,T} +immutable Solver{O<:AbstractIVP,S<:AbstractStepper,T} ode :: O stepper :: S options :: Options{T} diff --git a/test/iterators.jl b/test/iterators.jl index fab084941..5660f5433 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -80,7 +80,7 @@ function test_ode() Fscal = (t,y)->F(t,[y])[1] y0scal = y0[1] # with jacobian - tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,jac! = jac!) + tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,J! = jac!) @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol # without jacobian t,y = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0) @@ -99,7 +99,7 @@ function test_ode() # ODE.odeXX vector interface # with jacobian - tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,jac! = jac!) + tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,J! = jac!) @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol # without jacobian t,y = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0) diff --git a/test/runtests.jl b/test/runtests.jl index 740da6fea..ee4991d6a 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -35,7 +35,7 @@ for solver in solvers # we need to fix initstep for the fixed-step methods t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) @test maximum(abs(y-6t)) < tol - tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, jac! = (t,y,dy)->dy[1]=0.0) + tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, J! = (t,y,dy)->dy[1]=0.0) @test maximum(abs(yj-6tj)) < tol @test norm(map(norm,yj-y,Inf))2t, 0., [0:.001:1;], initstep=0.001) @test maximum(abs(y-t.^2)) < tol - tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, jac! = (t,y,dy)->dy[1]=0.0) + tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=0.0) @test maximum(abs(yj-tj.^2)) < tol @test norm(map(norm,yj-y,Inf))y, 1., [0:.001:1;], initstep=0.001) @test maximum(abs(y-e.^t)) < tol - tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, jac! = (t,y,dy)->dy[1]=1.0) + tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) @test maximum(abs(yj-e.^tj)) < tol @test norm(map(norm,yj-y,Inf))y, 1., [1:-.001:0;], initstep=0.001) @test maximum(abs(y-e.^(t-1))) < tol - tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, jac! = (t,y,dy)->dy[1]=1.0) + tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) @test maximum(abs(yj-e.^(tj-1))) < tol @test norm(map(norm,yj-y,Inf))[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol - tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, jac! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) + tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, J! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) ysj = hcat(yj...).' # convert Vector{Vector{Float}} to Matrix{Float} @test maximum(abs(ysj-[cos(tj)-2*sin(tj) 2*cos(tj)+sin(tj)])) < tol @test norm(map(norm,yj-y,Inf)) Date: Wed, 20 Jul 2016 11:42:51 +0200 Subject: [PATCH 033/113] Decoupling options and solver --- examples/test.jl | 46 ++++++++++++++++ src/ODE.jl | 4 +- src/dense.jl | 70 +++++++++++++++++++----- src/helpers.jl | 62 ++++++++++----------- src/interfaces.jl | 43 +++++++-------- src/iterators.jl | 8 +-- src/options.jl | 84 ++++++++++++++++++++++++++++ src/runge-kutta.jl | 47 +++++++++------- src/types.jl | 133 +++++++++++---------------------------------- 9 files changed, 302 insertions(+), 195 deletions(-) create mode 100644 examples/test.jl create mode 100644 src/options.jl diff --git a/examples/test.jl b/examples/test.jl new file mode 100644 index 000000000..bd7ef081d --- /dev/null +++ b/examples/test.jl @@ -0,0 +1,46 @@ +include("ODE.jl") + +module Test + +using ODE + +T = Float64 +Y = Vector{T} +t0 = zero(T) +y0 = T[one(T)] + +st = ODE.RKStepperAdaptive{:rk45} +ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) +opts = Dict(:initstep=>0.1, + :tstop=>1., + :tspan=>[0.,1.], + :points=>:specified, + :reltol=>1e-5, + :abstol=>1e-5) + +stepper = st{T}(ode) +sol = ODE.Solver(ode,stepper) +println(sol) + + +sol = ODE.solve(ode,st;opts...) +den = ODE.dense(sol;opts...) +println(sol.stepper.options) + +println("Raw iterator") +for (t,y) in sol + println((t,y)) +end + +println("Dense output") +for (t,y) in den + println((t,y)) +end + +println(collect(sol)) +println(collect(den)) + +println(collect(sol')) +println(collect(den')) + +end diff --git a/src/ODE.jl b/src/ODE.jl index c13afe78c..25dbd8ffc 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -6,14 +6,14 @@ using Polynomials using Compat using Iterators using ForwardDiff +using Parameters import Base.convert, Base.show import Base: start, next, done, call, collect -## complete function export list: see runtests.jl - # basic type definitions include("types.jl") +include("options.jl") include("helpers.jl") # dense output wrapper diff --git a/src/dense.jl b/src/dense.jl index 9e8af6898..16363e672 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -1,15 +1,54 @@ # A higher level stepper, defined as a wrapper around another stepper. +""" + +Dense output options: + +- tspan ::Vector{T} output times +- points ::Symbol which points are returned: `:specified` only the + ones in tspan or `:all` which includes also the step-points of the solver. +- stopevent Stop integration at a zero of this function +- roottol TODO + +""" + +immutable DenseOptions{T<:Number,S<:Function} <: Options{T} + tspan ::Vector{T} + tstop ::T + points ::Symbol + stopevent::S + roottol ::T +end + +@compat function (::Type{DenseOptions{T}}){T,S}(; + tstop = T(Inf), + tspan::Vector = T[tstop], + points::Symbol= :all, + stopevent::S = (t,y)->false, + roottol = eps(T)^T(1//3), + kargs...) + DenseOptions{T,S}(tspan,tstop,points,stopevent,roottol) +end + + #TODO: how about having an DenseStepper <: AbstractWrapper <: AbstractStepper? -immutable DenseStepper <: AbstractStepper - solver::Solver +immutable DenseStepper{S<:Solver,O<:DenseOptions} <: AbstractStepper + solver::S + options::O +end + + +@compat function (::Type{DenseStepper{T}}){T,S<:Solver}(solver::S; + options...) + DenseStepper(solver,DenseOptions{T}(;options...)) end -solve(ode::ExplicitODE, - stepper::DenseStepper, - options::Options) = Solver(ode,stepper,options) -dense(sol::Solver) = solve(sol.ode, DenseStepper(sol), sol.options) +function dense{O<:ExplicitODE,S,T,Y}(sol::Solver{O,S,T,Y}; options...) + opt = DenseOptions{T}(;options...) + den = DenseStepper(sol,opt) + Solver{O,typeof(den),T,Y}(sol.ode, den) +end """ @@ -75,6 +114,7 @@ function next{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) # not using the index of tspan anywhere explicitly. solver = s.stepper.solver + options = s.stepper.options # these guys store the intermediate steps we make s0, s1 = state.s0, state.s1 @@ -84,7 +124,7 @@ function next{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) # t_goal to the next larger time from tspan. Strong inequality # below is crucial, otherwise we would be selecting the same step # every time. - tspan = s.options.tspan + tspan = options.tspan t_goal = tspan[findfirst(t->(t>state.last_tout), tspan)] # Keep computing new steps (i.e. new pairs (t0,t1)) until we reach @@ -119,12 +159,12 @@ function next{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) # we haven't reached t_goal yet (t1= s.options.tspan[end] || - s.options.stopevent(state.s1.t,state.s1.y) + state.last_tout >= options.tspan[end] || + options.stopevent(state.s1.t,state.s1.y) ) end diff --git a/src/helpers.jl b/src/helpers.jl index 31419fafd..b36bbba0b 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,39 +1,39 @@ #TODO make it a function on ExplicitODE and Options -""" +# """ -Chooses an initial step-size basing on the equation, initial data, -time span and the order of the method of integration. +# Chooses an initial step-size basing on the equation, initial data, +# time span and the order of the method of integration. -""" -function dtinit{T}(F, y0, tspan::AbstractVector{T}, reltol, abstol; order = 1) - t0 = abs(tspan[1]) - tstop = abs(tspan[end]) - tau = max(reltol*norm(y0, Inf), abstol) - d0 = norm(y0, Inf)/tau - f0 = F(t0, y0) - d1 = norm(f0, Inf)/tau - if min(d0,d1) < eps(T)^(1/3) - dt0 = eps(T)^(1/3)/10 - else - dt0 = (d0/d1)/100 - end - # perform Euler step - y1 = y0+dt0*f0 - f1 = F(t0 + dt0, y1) - # estimate second derivative - d2 = norm(f1 - f0, Inf)/(tau*dt0) - if max(d1, d2) <= 10*eps(T) - dt1 = max(eps(T)^(1/3)/10, dt0/10^3) - else - pow = -(2 + log10(max(d1, d2)))/(order+1) - dt1 = 10^pow - end - return T(min(100*dt0, dt1, abs(tstop-t0))) -end +# """ +# function dtinit{T}(F::Function, y0, tspan::AbstractVector{T}, reltol, abstol; order = 1) +# t0 = abs(tspan[1]) +# tstop = abs(tspan[end]) +# tau = max(reltol*norm(y0, Inf), abstol) +# d0 = norm(y0, Inf)/tau +# f0 = F(t0, y0) +# d1 = norm(f0, Inf)/tau +# if min(d0,d1) < eps(T)^(1/3) +# dt0 = eps(T)^(1/3)/10 +# else +# dt0 = (d0/d1)/100 +# end +# # perform Euler step +# y1 = y0+dt0*f0 +# f1 = F(t0 + dt0, y1) +# # estimate second derivative +# d2 = norm(f1 - f0, Inf)/(tau*dt0) +# if max(d1, d2) <= 10*eps(T) +# dt1 = max(eps(T)^(1/3)/10, dt0/10^3) +# else +# pow = -(2 + log10(max(d1, d2)))/(order+1) +# dt1 = 10^pow +# end +# return T(min(100*dt0, dt1, abs(tstop-t0))) +# end -# a scalar version of the above -dtinit(F, y0::Number, args...; kargs...) = dtinit((t,y)->[F(t,y[1])], [y0], args...; kargs...) +# # a scalar version of the above +# dtinit(F, y0::Number, args...; kargs...) = dtinit((t,y)->[F(t,y[1])], [y0], args...; kargs...) """ diff --git a/src/interfaces.jl b/src/interfaces.jl index 16031cfcc..fb09eb430 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -5,38 +5,30 @@ tspan[end] is the last integration time. """ -function ode{T<:Number}(F, y0, tspan::AbstractVector{T}, stepper::AbstractStepper; - # we need these options explicitly for the dtinit - reltol::T = eps(T)^T(1//3)/10, - abstol::T = eps(T)^T(1//2)/10, - initstep::T = dtinit(F, y0, tspan, reltol, abstol; order=order(stepper))::T, - jac = forward_jacobian(F,copy(y0)), - kargs...) +function ode{T<:Number,S<:AbstractStepper}(F, y0, + tspan::AbstractVector{T}, + stepper::Type{S}; + jac = forward_jacobian(F,copy(y0)), + kargs...) t0 = tspan[1] # construct a solver equation = explicit_ineff(t0,y0,F,jac) - opts = Options{T}(; - tspan = tspan, - reltol = reltol, - abstol = abstol, - initstep = initstep, - kargs...) - solver = solve(equation,stepper,opts) + solver = solve(equation,stepper;kargs...) # handle different directions of time integration if issorted(tspan) # do nothing, we are already set with the solver - solution = collect(dense(solver)) + solution = collect(dense(solver;kargs...)) elseif issorted(reverse(tspan)) # Reverse the time direction if necessary. dense() only works # for positive time direction. # TODO: still ugly but slightly less bandaid-like then the # previous solution - solution = map(ty->(2*t0-ty[1],ty[2]),collect(dense(reverse_time(solver)))) + solution = map(ty->(2*t0-ty[1],ty[2]),collect(dense(reverse_time(solver; kargs...);kargs...))) else warn("Unsorted output times are not supported") return ([t0],[y0]) @@ -87,7 +79,7 @@ function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) error("The initial data has to be of a concrete type (or an array)") end - ode(F,y0,t0,stepper{T}();kargs...) + ode(F,y0,t0,stepper;kargs...) end @@ -102,8 +94,12 @@ time direction is not supported by steppers (including the dense output). This only works for ExplicitODE. """ -function reverse_time(sol::Solver) - ode, options, stepper = sol.ode, sol.options, sol.stepper +function reverse_time(sol::Solver; + tstop = tstop, + tspan = tspan, + stopevent=stopevent, + kargs...) + ode, stepper = sol.ode, sol.stepper t0 = ode.t0 y0 = ode.y0 @@ -122,14 +118,13 @@ function reverse_time(sol::Solver) # ExplicitODE is immutable ode_reversed = ExplicitODE(t0,y0,F_reverse!,J! = jac_reverse!) - stopevent = options.stopevent # TODO: we are modifying options here, should we construct new # options insted? - options.tstop = 2*t0-options.tstop - options.tspan = reverse(2*t0.-options.tspan) - options.stopevent = (t,y)->stopevent(2*t0-t,y) - return solve(ode_reversed,stepper,options) + tstop = 2*t0-options.tstop + tspan = reverse(2*t0.-options.tspan) + stopevent = (t,y)->stopevent(2*t0-t,y) + return solve(ode_reversed,stepper; tstop = tstop, tspan = tspan, stopevent = stopevent, kargs...) end diff --git a/src/iterators.jl b/src/iterators.jl index 964b8746c..57fe431b6 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -2,14 +2,14 @@ Generic done method, some steppers may implement their own versions. """ - function done(s::Solver, state::AbstractState) - if state.step.t >= s.options.tstop + st = s.stepper + if state.step.t >= st.options.tstop return true - elseif state.dt < s.options.minstep + elseif state.dt < st.options.minstep warn("minstep reached.") return true - elseif state.iters >= s.options.maxiters + elseif state.iters >= st.options.maxiters warn("Maximum number of iterations ($(Int(s.options.maxiters))) reached, consider setting a larger maxiter.") return true end diff --git a/src/options.jl b/src/options.jl new file mode 100644 index 000000000..c1859131c --- /dev/null +++ b/src/options.jl @@ -0,0 +1,84 @@ +abstract Options{T} + +""" + +Options for ODE solvers. This type has a key-word constructor which +will fill the structure with default values. + +General: + +- initstep ::T initial step +- tstop ::T end integration time +- reltol ::T relative tolerance (m3: could this be a vector?) +- abstol ::T absolute tolerance (m3: could this be a vector?) +- minstep ::T minimal allowed step +- maxstep ::T maximal allowed step +- norm function to calculate the norm in step control +- maxiters ::T maximum number of steps +- isoutofdomain::Function checks if the solution became non-numeric (NaN or Inf) + +""" +immutable StepperOptions{T<:Number,N<:Function,O<:Function} <: Options{T} + tstop::T + reltol::T + abstol::T + minstep::T + maxstep::T + initstep::T + norm::N + maxiters::T + isoutofdomain::O +end + +@compat function (::Type{StepperOptions{T}}){T,N,O}(ode::ExplicitODE, + order::Int; + tspan::Vector = T[Inf], + tstop = tspan[end], + reltol = eps(T)^T(1//3)/10, + abstol = eps(T)^T(1//2)/10, + minstep = 10*eps(T), + maxstep = 1/minstep, + initstep = dtinit(ode,order,reltol,abstol,tstop), + norm::N = Base.norm, + maxiters = T(Inf), + isoutofdomain::O = Base.isnan, + kargs...) + + StepperOptions{T,N,O}(tstop,reltol,abstol,minstep,maxstep,initstep,norm,maxiters,isoutofdomain) +end + +function show{T}(io::IO, opts :: Options{T}) + for name in fieldnames(opts) + @printf("%-20s = %s\n",name,getfield(opts,name)) + end +end + + +function dtinit{T}(ode::ExplicitODE{T},order::Int,reltol::T,abstol::T,tstop::T) + t0 = ode.t0 + y0 = ode.y0 + + f0 = similar(y0) + tau = max(reltol*norm(y0, Inf), abstol) + d0 = norm(y0, Inf)/tau + ode.F!(t0, y0, f0) + d1 = norm(f0, Inf)/tau + if min(d0,d1) < eps(T)^(1/3) + dt0 = eps(T)^(1/3)/10 + else + dt0 = (d0/d1)/100 + end + # perform Euler step + y1 = y0+dt0*f0 + f1 = similar(f0) + ode.F!(t0 + dt0, y1, f1) + # estimate second derivative + d2 = norm(f1 - f0, Inf)/(tau*dt0) + if max(d1, d2) <= 10*eps(T) + dt1 = max(eps(T)^(1/3)/10, dt0/10^3) + else + pow = -(2 + log10(max(d1, d2)))/(order+1) + dt1 = 10^pow + end + return T(min(100*dt0, dt1, abs(tstop-t0))) +end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index f239c4fe1..d0cb228d6 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -11,19 +11,25 @@ A general Runge-Kutta stepper (it cen represent either, a fixed step or an adaptive step algorithm). """ -immutable RKStepper{Kind,Name,T} <: AbstractStepper{T} +immutable RKStepper{Kind,Name,T,O<:StepperOptions} <: AbstractStepper{T} tableau::TableauRKExplicit{T} - function RKStepper() - tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) - if Kind == :fixed && isadaptive(tab) - error("Cannot construct a fixed step method from an adaptive step tableau") - elseif Kind == :adaptive && !isadaptive(tab) - error("Cannot construct an adaptive step method from an fixed step tableau") - end - new(tab) + options::O +end + + +@compat function (::Type{RKStepper{Kind,Name,T}}){Kind,Name,T}(ode;options...) + tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) + if Kind == :fixed && isadaptive(tab) + error("Cannot construct a fixed step method from an adaptive step tableau") + elseif Kind == :adaptive && !isadaptive(tab) + error("Cannot construct an adaptive step method from an fixed step tableau") end + ord = minimum(order(tab)) + options = StepperOptions{T}(ode,ord;options...) + RKStepper{Kind,Name,T,typeof(options)}(tab,options) end + typealias RKStepperFixed RKStepper{:fixed} typealias RKStepperAdaptive RKStepper{:adaptive} @@ -32,10 +38,8 @@ order(stepper::RKStepper) = minimum(order(stepper.tableau)) name(stepper::RKStepper) = typeof(stepper.tableau) -# TODO: possibly handle the initial stepsize here? -solve(ode::ExplicitODE, stepper::RKStepper, options) = - Solver(ode,stepper,options) - +solve{T,S<:RKStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = + Solver(ode,stepper{T}(ode;options...)) # lower level interface @@ -76,7 +80,8 @@ end function start{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) - t0, dt0, y0 = s.ode.t0, s.options.initstep, s.ode.y0 + stepper = s.stepper + t0, dt0, y0 = s.ode.t0, stepper.options.initstep, s.ode.y0 lk = lengthks(s.stepper.tableau) work = RKWorkArrays(zero(y0), # y @@ -110,7 +115,7 @@ function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state) dof = length(step.y) b = s.stepper.tableau.b - dt = min(state.dt,s.options.tstop-step.t) + dt = min(state.dt,s.stepper.options.tstop-step.t) copy!(work.ynew,step.y) @@ -141,14 +146,16 @@ function next{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state) timeout = state.timeout work = state.work step = state.step - tableau = sol.stepper.tableau + stepper = sol.stepper + tableau = stepper.tableau + options = stepper.options # The while loop continues until we either find a stepsize which # leads to a small enough error or the stepsize reaches # prob.minstep # trim the inital stepsize to avoid overshooting - dt = min(dt, sol.options.tstop-state.step.t) + dt = min(dt, options.tstop-state.step.t) while true @@ -160,12 +167,12 @@ function next{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state) rk_embedded_step!(work, sol.ode, tableau, step, dt) # changes work.yerr - err, newdt, timeout = stepsize_hw92!(work, step, tableau, dt, timeout, sol.options) + err, newdt, timeout = stepsize_hw92!(work, step, tableau, dt, timeout, options) # trim again in case newdt > dt - newdt = min(newdt, sol.options.tstop-state.step.t) + newdt = min(newdt, options.tstop-state.step.t) - if abs(newdt) < sol.options.minstep # minimum step size reached, break + if abs(newdt) < options.minstep # minimum step size reached, break # passing the newdt to state will result in done() state.dt = newdt break diff --git a/src/types.jl b/src/types.jl index c6ed367de..e70d78a05 100644 --- a/src/types.jl +++ b/src/types.jl @@ -116,123 +116,56 @@ function show(io::IO, state::Step) end -""" - -Options for ODE solvers. This type has a key-word constructor which -will fill the structure with default values. - -General: - -- initstep ::T initial step -- tstop ::T end integration time -- reltol ::T relative tolerance (m3: could this be a vector?) -- abstol ::T absolute tolerance (m3: could this be a vector?) -- minstep ::T minimal allowed step -- maxstep ::T maximal allowed step -- norm function to calculate the norm in step control -- maxiters ::T maximum number of steps -- isoutofdomain::Function checks if the solution became non-numeric (NaN or Inf) - -Dense output options: - -- tspan ::Vector{T} output times -- points ::Symbol which points are returned: `:specified` only the - ones in tspan or `:all` which includes also the step-points of the solver. -- stopevent Stop integration at a zero of this function -- roottol TODO - -""" -type Options{T} - # stepper options - initstep ::T - tstop ::T - reltol ::T - abstol ::T - minstep ::T - maxstep ::T - norm ::Function - maxiters ::T - - isoutofdomain::Function - - # dense output options - tspan ::AbstractVector{T} - points ::Symbol - - # m3: I think this should be an array of functions. Depending on some - # flag each one returns, the iteration stops or continues. Rename it - # to eventfns. I like matlabs interface. - # [value,isterminal,direction] = myEventsFcn(t,y,dy) - # The value gets stored. - stopevent::Function - roottol ::T - - function Options(; - tspan = T[Inf], - tstop = tspan[end], - reltol = eps(T)^T(1//3)/10, - abstol = eps(T)^T(1//2)/10, - minstep = 10*eps(T), - maxstep = 1/minstep, - # TODO: we need a better guess here, possibly - # overwrite it in the call to solve() - initstep = minstep, - norm = Base.norm, - maxiters = T(Inf), - points = :all, - stopevent = (t,y)->false, - roottol = eps(T)^T(1//3), - isoutofdomain = isnan, - kargs...) - if all(points .!= [:specified,:all]) - error("Option points = $points is not supported, use :specified or :all") - end - #TODO iterate over fields here? - new(initstep,tstop,reltol,abstol,minstep,maxstep,norm,maxiters,isoutofdomain,sort(tspan),points,stopevent,roottol) - end - -end - -function show{T}(io::IO, opts :: Options{T}) - for name in fieldnames(opts) - @printf("%-20s = %s\n",name,getfield(opts,name)) - end -end - +abstract AbstractSolver{T,Y} """ This is an iterable type, each call to next(...) produces a next step -of a numerical solution to an ODE. +of a numerical solution to an ODE. Types `T` and `Y` determine the +output type (`Tuple{T,Y}`) for iteration. - ode: is the prescrived ode, along with the initial data - stepper: the algorithm used to produce subsequent steps -- options: options passed to the stepper """ -immutable Solver{O<:AbstractIVP,S<:AbstractStepper,T} +immutable Solver{O<:AbstractIVP,S<:AbstractStepper,T,Y} <: AbstractSolver{T,Y} ode :: O stepper :: S - options :: Options{T} end -# filter the wrong combinations of ode and stepper -solve{T,S}(ode::T, stepper::S, options) = error("The $S doesn't support $T") +Base.eltype{T,Y}(::Type{AbstractSolver{T,Y}}) = Tuple{T,Y} +Solver{T,Y}(ode::AbstractIVP{T,Y},stepper::AbstractStepper{T}) = + Solver{typeof(ode),typeof(stepper),T,Y}(ode,stepper) -# normally we return the working array, which changes at each step and -# expect the user to copy it if necessary. In order for collect to -# return the expected result we need to copy the output at each step. -function collect{T,S}(t::Type{Tuple{T,S}}, s::Solver) - if maximum(s.options.tspan) == T(Inf) - error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") +# filter the wrong combinations of ode and stepper +solve{T,S}(ode::T, stepper::Type{S}, options...) = + error("The $S doesn't support $T") + +# In Julia 0.5 the collect needs length to be defined, we cannot do +# that for a solver +function collect{T,Y}(s::AbstractSolver{T,Y}) + pairs = Array(Tuple{T,Y},0) + for (t,y) in s + push!(pairs,(t,copy(y))) end - collect(t, imap(x->deepcopy(x),s)) + return pairs end -function collect(s::Solver) - if maximum(s.options.tspan) == Inf - error("Attempting to collect an infinite list, use tstop or tspan with finite numbers only") +# Transpose a solver to get a (Vector(t),Vector(yout)) style output +type SolverT{T,Y} <: AbstractSolver{T,Y} + solver::AbstractSolver{T,Y} +end + +Base.transpose(s::AbstractSolver) = SolverT(s) +Base.transpose(s::SolverT) = s.solver + +function collect{T,Y}(s::SolverT{T,Y}) + tout = Array(T,0) + yout = Array(Y,0) + for (t,y) in s.solver + push!(tout,t) + push!(yout,y) end - collect(imap(deepcopy,s)) + return (tout,yout) end From 22e15cd48a83f5d1b6712e68c5e553c59635d471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 11:56:08 +0200 Subject: [PATCH 034/113] No more Iterators --- src/ODE.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/ODE.jl b/src/ODE.jl index 25dbd8ffc..943af7130 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -4,9 +4,7 @@ module ODE using Polynomials using Compat -using Iterators using ForwardDiff -using Parameters import Base.convert, Base.show import Base: start, next, done, call, collect From 4c085e6c9a98a04aa1343cd5dcd64923d89d0932 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 12:06:42 +0200 Subject: [PATCH 035/113] Removed the unused dtinit --- src/helpers.jl | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/src/helpers.jl b/src/helpers.jl index b36bbba0b..d21548cf8 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,40 +1,3 @@ -#TODO make it a function on ExplicitODE and Options - -# """ - -# Chooses an initial step-size basing on the equation, initial data, -# time span and the order of the method of integration. - -# """ -# function dtinit{T}(F::Function, y0, tspan::AbstractVector{T}, reltol, abstol; order = 1) -# t0 = abs(tspan[1]) -# tstop = abs(tspan[end]) -# tau = max(reltol*norm(y0, Inf), abstol) -# d0 = norm(y0, Inf)/tau -# f0 = F(t0, y0) -# d1 = norm(f0, Inf)/tau -# if min(d0,d1) < eps(T)^(1/3) -# dt0 = eps(T)^(1/3)/10 -# else -# dt0 = (d0/d1)/100 -# end -# # perform Euler step -# y1 = y0+dt0*f0 -# f1 = F(t0 + dt0, y1) -# # estimate second derivative -# d2 = norm(f1 - f0, Inf)/(tau*dt0) -# if max(d1, d2) <= 10*eps(T) -# dt1 = max(eps(T)^(1/3)/10, dt0/10^3) -# else -# pow = -(2 + log10(max(d1, d2)))/(order+1) -# dt1 = 10^pow -# end -# return T(min(100*dt0, dt1, abs(tstop-t0))) -# end - -# # a scalar version of the above -# dtinit(F, y0::Number, args...; kargs...) = dtinit((t,y)->[F(t,y[1])], [y0], args...; kargs...) - """ A simple bisection algorithm for finding a root of a solution f(x)=0 From 0fe7ec4dc127b439b3b22aecde5acec3870cd91c Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Wed, 20 Jul 2016 13:52:23 +0200 Subject: [PATCH 036/113] a few comments --- src/options.jl | 3 ++- src/runge-kutta.jl | 2 +- src/types.jl | 47 +++++++++++++++++++++++++++++++--------------- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/src/options.jl b/src/options.jl index c1859131c..15b6278a4 100644 --- a/src/options.jl +++ b/src/options.jl @@ -1,5 +1,6 @@ abstract Options{T} +#m3: these are default options for adaptive stepper """ Options for ODE solvers. This type has a key-word constructor which @@ -15,7 +16,7 @@ General: - maxstep ::T maximal allowed step - norm function to calculate the norm in step control - maxiters ::T maximum number of steps -- isoutofdomain::Function checks if the solution became non-numeric (NaN or Inf) +- isoutofdomain::Function checks if the solution is outside of the allowed domain """ immutable StepperOptions{T<:Number,N<:Function,O<:Function} <: Options{T} diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index d0cb228d6..9d55fa9af 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -7,7 +7,7 @@ include("tableaus.jl") """ -A general Runge-Kutta stepper (it cen represent either, a fixed step +A general Runge-Kutta stepper (it can represent either, a fixed step or an adaptive step algorithm). """ diff --git a/src/types.jl b/src/types.jl index e70d78a05..15f60950f 100644 --- a/src/types.jl +++ b/src/types.jl @@ -1,3 +1,12 @@ +# The main types: +# - IVP -- holds the mathematical aspects of a IVP +# - AbstractStepper -- an integrator/solver +# - AbstractSolver -- holds IVP + Stepper +# - AbstractState -- holds the iterator state +# - Step -- holds the state at one time +# - + + abstract AbstractIVP{T,Y} """ @@ -45,31 +54,34 @@ Explicit ODE representing the problem """ typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} -@compat (::Type{ExplicitODE}){T,Y}(t0::T, - y0::Y, - F!::Function; - J!::Function = forward_jacobian!(F!,similar(y0))) = - ExplicitODE{T,Y}(t0,y0,similar(y0),F!,nothing,J!) - +function ExplicitODE{T,Y}(t0::T, + y0::Y, + F!::Function; + J!::Function = forward_jacobian!(F!,similar(y0))) + ExplicitODE{T,Y}(t0,y0,similar(y0),F!,nothing,J!) +end """ Implicit ODE representing the problem -`F(t,y,dy)=0` with `y(t0)=y0` and optionally `y'(t0)=dy0` +`G(t,y,dy)=0` with `y(t0)=y0` and optionally `y'(t0)=dy0` - t0, y0: initial conditions -- F!: in place version of `F` called by `F!(t,y,dy)` -- J!: (optional) computes `J=dF/dy+a*dF/dy'` for prescribed `a`, called with `J!(t,y,dy,a)` +- G!: in place version of `G` called by `G!(res,t,y,dy)`, + returns residual in-place in `res`. +- J!: (optional) computes `J=dF/dy+a*dF/dy'` for prescribed `a`, called with `J!(out,t,y,dy,a)`. + Returns Jacobian in-place in `out`. """ typealias ImplicitODE{T,Y} IVP{T,Y,Void,Function,Function} -@compat (::Type{ImplicitODE}){T,Y}(t0::T, - y0::Y, - G!::Function; - J!::Function = forward_jacobian_implicit!(F!,similar(y0)), - dy0::Y = zero(y0)) = - ImplicitODE{T,Y}(t0,y0,dy0,nothing,G!,J!) +function ImplicitODE{T,Y}(t0::T, + y0::Y, + G!::Function; + J!::Function = forward_jacobian_implicit!(G!,similar(y0)), + dy0::Y = zero(y0)) + ImplicitODE{T,Y}(t0,y0,dy0,nothing,G!,J!) +end """ @@ -127,11 +139,15 @@ output type (`Tuple{T,Y}`) for iteration. - ode: is the prescrived ode, along with the initial data - stepper: the algorithm used to produce subsequent steps + """ immutable Solver{O<:AbstractIVP,S<:AbstractStepper,T,Y} <: AbstractSolver{T,Y} ode :: O stepper :: S end +#m3: +# - above O also holds the T and Y type information -> thus remove T,Y. +# - calling this `Solver` still trips me up Base.eltype{T,Y}(::Type{AbstractSolver{T,Y}}) = Tuple{T,Y} @@ -152,6 +168,7 @@ function collect{T,Y}(s::AbstractSolver{T,Y}) return pairs end +#m3: Is this necessary? # Transpose a solver to get a (Vector(t),Vector(yout)) style output type SolverT{T,Y} <: AbstractSolver{T,Y} solver::AbstractSolver{T,Y} From f979d94a5701d067eb355e0f629e4294e5051c43 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Wed, 20 Jul 2016 15:01:09 +0200 Subject: [PATCH 037/113] Dropped {T,Y} from Solver --- src/options.jl | 2 +- src/types.jl | 70 +++++++++++++++++++++++++------------------------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/options.jl b/src/options.jl index 15b6278a4..7c5358d70 100644 --- a/src/options.jl +++ b/src/options.jl @@ -54,7 +54,7 @@ function show{T}(io::IO, opts :: Options{T}) end end - +#pwl: should this be dropped? function dtinit{T}(ode::ExplicitODE{T},order::Int,reltol::T,abstol::T,tstop::T) t0 = ode.t0 y0 = ode.y0 diff --git a/src/types.jl b/src/types.jl index 15f60950f..081040980 100644 --- a/src/types.jl +++ b/src/types.jl @@ -1,13 +1,14 @@ # The main types: # - IVP -- holds the mathematical aspects of a IVP -# - AbstractStepper -- an integrator/solver -# - AbstractSolver -- holds IVP + Stepper +# - AbstractStepper -- an integrator/solver (maybe AbstractIntegrator?) +# - Solver -- holds IVP + Stepper (maybe ProblemSpec, Problem, Spec?) # - AbstractState -- holds the iterator state # - Step -- holds the state at one time # - abstract AbstractIVP{T,Y} +Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) = T,Y """ @@ -41,6 +42,9 @@ type IVP{T,Y,F,G,J} <: AbstractIVP{T,Y} G! ::G J! ::J end +@compat Base.eltype(t::Type{IVP}) = eltype(supertype(t)) +Base.eltype(t::IVP) = eltype(typeof(t)) + """ @@ -55,9 +59,9 @@ Explicit ODE representing the problem """ typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} function ExplicitODE{T,Y}(t0::T, - y0::Y, - F!::Function; - J!::Function = forward_jacobian!(F!,similar(y0))) + y0::Y, + F!::Function; + J!::Function = forward_jacobian!(F!,similar(y0))) ExplicitODE{T,Y}(t0,y0,similar(y0),F!,nothing,J!) end @@ -114,10 +118,10 @@ Holds a value of a function and its derivative at time t. This is usually used to store the solution of an ODE at particular times. """ -type Step{T,S} +type Step{T,Y} t ::T - y ::S - dy::S + y ::Y + dy::Y end @@ -128,8 +132,6 @@ function show(io::IO, state::Step) end -abstract AbstractSolver{T,Y} - """ This is an iterable type, each call to next(...) produces a next step @@ -141,18 +143,15 @@ output type (`Tuple{T,Y}`) for iteration. """ -immutable Solver{O<:AbstractIVP,S<:AbstractStepper,T,Y} <: AbstractSolver{T,Y} +immutable Solver{O<:AbstractIVP,S<:AbstractStepper} ode :: O stepper :: S end #m3: -# - above O also holds the T and Y type information -> thus remove T,Y. # - calling this `Solver` still trips me up -Base.eltype{T,Y}(::Type{AbstractSolver{T,Y}}) = Tuple{T,Y} - -Solver{T,Y}(ode::AbstractIVP{T,Y},stepper::AbstractStepper{T}) = - Solver{typeof(ode),typeof(stepper),T,Y}(ode,stepper) +Base.eltype{O}(::Type{Solver{O}}) = eltype(O) +Base.eltype{O}(::Solver{O}) = eltype(O) # filter the wrong combinations of ode and stepper solve{T,S}(ode::T, stepper::Type{S}, options...) = @@ -160,7 +159,8 @@ solve{T,S}(ode::T, stepper::Type{S}, options...) = # In Julia 0.5 the collect needs length to be defined, we cannot do # that for a solver -function collect{T,Y}(s::AbstractSolver{T,Y}) +function collect(s::Solver) + T,Y = eltype(s) pairs = Array(Tuple{T,Y},0) for (t,y) in s push!(pairs,(t,copy(y))) @@ -168,21 +168,21 @@ function collect{T,Y}(s::AbstractSolver{T,Y}) return pairs end -#m3: Is this necessary? -# Transpose a solver to get a (Vector(t),Vector(yout)) style output -type SolverT{T,Y} <: AbstractSolver{T,Y} - solver::AbstractSolver{T,Y} -end - -Base.transpose(s::AbstractSolver) = SolverT(s) -Base.transpose(s::SolverT) = s.solver - -function collect{T,Y}(s::SolverT{T,Y}) - tout = Array(T,0) - yout = Array(Y,0) - for (t,y) in s.solver - push!(tout,t) - push!(yout,y) - end - return (tout,yout) -end +#m3: Is this necessary? -> this would need the AbstractSolver so I commented it. +# # Transpose a solver to get a (Vector(t),Vector(yout)) style output +# type SolverT{O,S} <: Solver{O,S} +# solver::Solver{T,Y} +# end + +# Base.transpose(s::Solver) = SolverT(s) +# Base.transpose(s::SolverT) = s.solver + +# function collect{T,Y}(s::SolverT{T,Y}) +# tout = Array(T,0) +# yout = Array(Y,0) +# for (t,y) in s.solver +# push!(tout,t) +# push!(yout,y) +# end +# return (tout,yout) +# end From 86ec3a06b3291a85a0c837ab8ef02ac0bd68dbd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 17:35:59 +0200 Subject: [PATCH 038/113] Minor fixes after @mauro3 comments --- examples/test.jl | 12 ++--------- src/dense.jl | 8 ++++---- src/options.jl | 39 ++++-------------------------------- src/runge-kutta.jl | 18 ++++++++--------- src/types.jl | 50 ++++++++++++++-------------------------------- 5 files changed, 33 insertions(+), 94 deletions(-) diff --git a/examples/test.jl b/examples/test.jl index bd7ef081d..f018027ab 100644 --- a/examples/test.jl +++ b/examples/test.jl @@ -1,4 +1,4 @@ -include("ODE.jl") +include("../src/ODE.jl") module Test @@ -13,16 +13,11 @@ st = ODE.RKStepperAdaptive{:rk45} ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) opts = Dict(:initstep=>0.1, :tstop=>1., - :tspan=>[0.,1.], + # :tspan=>[0.,1.], :points=>:specified, :reltol=>1e-5, :abstol=>1e-5) -stepper = st{T}(ode) -sol = ODE.Solver(ode,stepper) -println(sol) - - sol = ODE.solve(ode,st;opts...) den = ODE.dense(sol;opts...) println(sol.stepper.options) @@ -40,7 +35,4 @@ end println(collect(sol)) println(collect(den)) -println(collect(sol')) -println(collect(den')) - end diff --git a/src/dense.jl b/src/dense.jl index 16363e672..8e757d747 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -38,16 +38,16 @@ immutable DenseStepper{S<:Solver,O<:DenseOptions} <: AbstractStepper end -@compat function (::Type{DenseStepper{T}}){T,S<:Solver}(solver::S; - options...) +@compat function (::Type{DenseStepper{T}}){T,S<:Solver}(solver::S;options...) DenseStepper(solver,DenseOptions{T}(;options...)) end -function dense{O<:ExplicitODE,S,T,Y}(sol::Solver{O,S,T,Y}; options...) +function dense{O<:ExplicitODE}(sol::Solver{O}; options...) + T,_ = eltype(sol) opt = DenseOptions{T}(;options...) den = DenseStepper(sol,opt) - Solver{O,typeof(den),T,Y}(sol.ode, den) + Solver(sol.ode, den) end """ diff --git a/src/options.jl b/src/options.jl index 7c5358d70..8d1a9001a 100644 --- a/src/options.jl +++ b/src/options.jl @@ -31,16 +31,15 @@ immutable StepperOptions{T<:Number,N<:Function,O<:Function} <: Options{T} isoutofdomain::O end -@compat function (::Type{StepperOptions{T}}){T,N,O}(ode::ExplicitODE, - order::Int; - tspan::Vector = T[Inf], - tstop = tspan[end], +@compat function (::Type{StepperOptions{T}}){T,N,O}(; + tstop = T(Inf), reltol = eps(T)^T(1//3)/10, abstol = eps(T)^T(1//2)/10, minstep = 10*eps(T), maxstep = 1/minstep, - initstep = dtinit(ode,order,reltol,abstol,tstop), + initstep = minstep, norm::N = Base.norm, + steps = repeated(initstep), maxiters = T(Inf), isoutofdomain::O = Base.isnan, kargs...) @@ -53,33 +52,3 @@ function show{T}(io::IO, opts :: Options{T}) @printf("%-20s = %s\n",name,getfield(opts,name)) end end - -#pwl: should this be dropped? -function dtinit{T}(ode::ExplicitODE{T},order::Int,reltol::T,abstol::T,tstop::T) - t0 = ode.t0 - y0 = ode.y0 - - f0 = similar(y0) - tau = max(reltol*norm(y0, Inf), abstol) - d0 = norm(y0, Inf)/tau - ode.F!(t0, y0, f0) - d1 = norm(f0, Inf)/tau - if min(d0,d1) < eps(T)^(1/3) - dt0 = eps(T)^(1/3)/10 - else - dt0 = (d0/d1)/100 - end - # perform Euler step - y1 = y0+dt0*f0 - f1 = similar(f0) - ode.F!(t0 + dt0, y1, f1) - # estimate second derivative - d2 = norm(f1 - f0, Inf)/(tau*dt0) - if max(d1, d2) <= 10*eps(T) - dt1 = max(eps(T)^(1/3)/10, dt0/10^3) - else - pow = -(2 + log10(max(d1, d2)))/(order+1) - dt1 = 10^pow - end - return T(min(100*dt0, dt1, abs(tstop-t0))) -end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 9d55fa9af..74a7cec5a 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -16,30 +16,28 @@ immutable RKStepper{Kind,Name,T,O<:StepperOptions} <: AbstractStepper{T} options::O end +typealias RKStepperFixed RKStepper{:fixed} +typealias RKStepperAdaptive RKStepper{:adaptive} -@compat function (::Type{RKStepper{Kind,Name,T}}){Kind,Name,T}(ode;options...) + +@compat function (::Type{RKStepper{Kind,Name,T}}){Kind,Name,T}(;options...) tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) if Kind == :fixed && isadaptive(tab) error("Cannot construct a fixed step method from an adaptive step tableau") elseif Kind == :adaptive && !isadaptive(tab) error("Cannot construct an adaptive step method from an fixed step tableau") end - ord = minimum(order(tab)) - options = StepperOptions{T}(ode,ord;options...) - RKStepper{Kind,Name,T,typeof(options)}(tab,options) + opts = StepperOptions{T}(;options...) + RKStepper{Kind,Name,T,typeof(opts)}(tab,opts) end -typealias RKStepperFixed RKStepper{:fixed} -typealias RKStepperAdaptive RKStepper{:adaptive} - - order(stepper::RKStepper) = minimum(order(stepper.tableau)) -name(stepper::RKStepper) = typeof(stepper.tableau) +name(stepper::RKStepper) = stepper.tableau.name solve{T,S<:RKStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = - Solver(ode,stepper{T}(ode;options...)) + Solver(ode,stepper{T}(;options...)) # lower level interface diff --git a/src/types.jl b/src/types.jl index 081040980..53c4c2314 100644 --- a/src/types.jl +++ b/src/types.jl @@ -58,10 +58,10 @@ Explicit ODE representing the problem """ typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} -function ExplicitODE{T,Y}(t0::T, - y0::Y, - F!::Function; - J!::Function = forward_jacobian!(F!,similar(y0))) +@compat function (::Type{ExplicitODE}){T,Y}(t0::T, + y0::Y, + F!::Function; + J!::Function = forward_jacobian!(F!,similar(y0))) ExplicitODE{T,Y}(t0,y0,similar(y0),F!,nothing,J!) end @@ -79,11 +79,11 @@ Implicit ODE representing the problem """ typealias ImplicitODE{T,Y} IVP{T,Y,Void,Function,Function} -function ImplicitODE{T,Y}(t0::T, - y0::Y, - G!::Function; - J!::Function = forward_jacobian_implicit!(G!,similar(y0)), - dy0::Y = zero(y0)) +@compat function (::Type{ImplicitODE}){T,Y}(t0::T, + y0::Y, + G!::Function; + J!::Function = forward_jacobian_implicit!(G!,similar(y0)), + dy0::Y = zero(y0)) ImplicitODE{T,Y}(t0,y0,dy0,nothing,G!,J!) end @@ -135,8 +135,7 @@ end """ This is an iterable type, each call to next(...) produces a next step -of a numerical solution to an ODE. Types `T` and `Y` determine the -output type (`Tuple{T,Y}`) for iteration. +of a numerical solution to an ODE. - ode: is the prescrived ode, along with the initial data - stepper: the algorithm used to produce subsequent steps @@ -144,8 +143,8 @@ output type (`Tuple{T,Y}`) for iteration. """ immutable Solver{O<:AbstractIVP,S<:AbstractStepper} - ode :: O - stepper :: S + ode ::O + stepper ::S end #m3: # - calling this `Solver` still trips me up @@ -154,11 +153,11 @@ Base.eltype{O}(::Type{Solver{O}}) = eltype(O) Base.eltype{O}(::Solver{O}) = eltype(O) # filter the wrong combinations of ode and stepper -solve{T,S}(ode::T, stepper::Type{S}, options...) = - error("The $S doesn't support $T") +solve{O,S}(ode::O, stepper::Type{S}, options...) = + error("The $S doesn't support $O") # In Julia 0.5 the collect needs length to be defined, we cannot do -# that for a solver +# that for a solver but we can implement our own collect function collect(s::Solver) T,Y = eltype(s) pairs = Array(Tuple{T,Y},0) @@ -167,22 +166,3 @@ function collect(s::Solver) end return pairs end - -#m3: Is this necessary? -> this would need the AbstractSolver so I commented it. -# # Transpose a solver to get a (Vector(t),Vector(yout)) style output -# type SolverT{O,S} <: Solver{O,S} -# solver::Solver{T,Y} -# end - -# Base.transpose(s::Solver) = SolverT(s) -# Base.transpose(s::SolverT) = s.solver - -# function collect{T,Y}(s::SolverT{T,Y}) -# tout = Array(T,0) -# yout = Array(Y,0) -# for (t,y) in s.solver -# push!(tout,t) -# push!(yout,y) -# end -# return (tout,yout) -# end From b4706118b70d6e59d11231f4b72cf7c10b7d3850 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 17:53:03 +0200 Subject: [PATCH 039/113] Adaptive and Fixed Options --- examples/test.jl | 53 +++++++++++++++++++++++------------------- src/iterators.jl | 19 ++++++++++----- src/options.jl | 58 ++++++++++++++++++++++++++++++++-------------- src/runge-kutta.jl | 22 +++++++++++++----- 4 files changed, 98 insertions(+), 54 deletions(-) diff --git a/examples/test.jl b/examples/test.jl index f018027ab..61f0c8df2 100644 --- a/examples/test.jl +++ b/examples/test.jl @@ -9,30 +9,35 @@ Y = Vector{T} t0 = zero(T) y0 = T[one(T)] -st = ODE.RKStepperAdaptive{:rk45} -ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) -opts = Dict(:initstep=>0.1, - :tstop=>1., - # :tspan=>[0.,1.], - :points=>:specified, - :reltol=>1e-5, - :abstol=>1e-5) - -sol = ODE.solve(ode,st;opts...) -den = ODE.dense(sol;opts...) -println(sol.stepper.options) - -println("Raw iterator") -for (t,y) in sol - println((t,y)) +steppers = [ODE.RKStepperAdaptive{:rk45}, + ODE.RKStepperFixed{:feuler}] + +for st in steppers + ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) + opts = Dict(:initstep=>0.1, + :tstop=>1., + # :tspan=>[0.,1.], + :points=>:specified, + :reltol=>1e-5, + :abstol=>1e-5) + + sol = ODE.solve(ode,st;opts...) + den = ODE.dense(sol;opts...) + println(typeof(sol.stepper.options)) + println(sol.stepper.options) + + println("Raw iterator") + for (t,y) in sol + println((t,y)) + end + + println("Dense output") + for (t,y) in den + println((t,y)) + end + + println(collect(sol)) + println(collect(den)) end -println("Dense output") -for (t,y) in den - println((t,y)) -end - -println(collect(sol)) -println(collect(den)) - end diff --git a/src/iterators.jl b/src/iterators.jl index 57fe431b6..9eff999d8 100644 --- a/src/iterators.jl +++ b/src/iterators.jl @@ -4,14 +4,21 @@ Generic done method, some steppers may implement their own versions. function done(s::Solver, state::AbstractState) st = s.stepper + if state.step.t >= st.options.tstop return true - elseif state.dt < st.options.minstep - warn("minstep reached.") - return true - elseif state.iters >= st.options.maxiters - warn("Maximum number of iterations ($(Int(s.options.maxiters))) reached, consider setting a larger maxiter.") - return true end + + # specific for adaptive stepper + if isadaptive(st) + if state.dt < st.options.minstep + warn("Minstep reached.") + return true + elseif state.iters >= st.options.maxiters + warn("Maximum number of iterations ($(Int(s.options.maxiters))) reached, consider setting a larger maxiter.") + return true + end + end + return false end diff --git a/src/options.jl b/src/options.jl index 8d1a9001a..1985bf4de 100644 --- a/src/options.jl +++ b/src/options.jl @@ -1,10 +1,9 @@ abstract Options{T} -#m3: these are default options for adaptive stepper """ -Options for ODE solvers. This type has a key-word constructor which -will fill the structure with default values. +Generic options for adaptive ODE solvers. This type has a key-word +constructor which will fill the structure with default values. General: @@ -19,7 +18,7 @@ General: - isoutofdomain::Function checks if the solution is outside of the allowed domain """ -immutable StepperOptions{T<:Number,N<:Function,O<:Function} <: Options{T} +immutable AdaptiveOptions{T,N<:Function,O<:Function} <: Options{T} tstop::T reltol::T abstol::T @@ -31,20 +30,43 @@ immutable StepperOptions{T<:Number,N<:Function,O<:Function} <: Options{T} isoutofdomain::O end -@compat function (::Type{StepperOptions{T}}){T,N,O}(; - tstop = T(Inf), - reltol = eps(T)^T(1//3)/10, - abstol = eps(T)^T(1//2)/10, - minstep = 10*eps(T), - maxstep = 1/minstep, - initstep = minstep, - norm::N = Base.norm, - steps = repeated(initstep), - maxiters = T(Inf), - isoutofdomain::O = Base.isnan, - kargs...) - - StepperOptions{T,N,O}(tstop,reltol,abstol,minstep,maxstep,initstep,norm,maxiters,isoutofdomain) +@compat function (::Type{AdaptiveOptions{T}}){T,N,O}(; + tstop = T(Inf), + reltol = eps(T)^T(1//3)/10, + abstol = eps(T)^T(1//2)/10, + minstep = 10*eps(T), + maxstep = 1/minstep, + initstep = minstep, + norm::N = Base.norm, + maxiters = T(Inf), + isoutofdomain::O = Base.isnan, + kargs...) + + AdaptiveOptions{T,N,O}(tstop,reltol,abstol,minstep,maxstep,initstep,norm,maxiters,isoutofdomain) +end + +""" + +Generic options for fixed step ODE solvers. This type has a key-word +constructor which will fill the structure with default values. + +General: + +- initstep ::T initial step +- tstop ::T end integration time + +""" +immutable FixedOptions{T} <: Options{T} + tstop::T + initstep::T +end + +@compat function (::Type{FixedOptions{T}}){T}(; + tstop = T(Inf), + initstep = minstep, + kargs...) + + FixedOptions{T}(tstop,initstep) end function show{T}(io::IO, opts :: Options{T}) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 74a7cec5a..bec8de2f4 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -5,29 +5,36 @@ include("tableaus.jl") # intermediate level interface + """ A general Runge-Kutta stepper (it can represent either, a fixed step or an adaptive step algorithm). """ -immutable RKStepper{Kind,Name,T,O<:StepperOptions} <: AbstractStepper{T} +immutable RKStepper{Kind,Name,T,O<:Options} <: AbstractStepper{T} tableau::TableauRKExplicit{T} options::O end + typealias RKStepperFixed RKStepper{:fixed} typealias RKStepperAdaptive RKStepper{:adaptive} @compat function (::Type{RKStepper{Kind,Name,T}}){Kind,Name,T}(;options...) tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) - if Kind == :fixed && isadaptive(tab) - error("Cannot construct a fixed step method from an adaptive step tableau") - elseif Kind == :adaptive && !isadaptive(tab) - error("Cannot construct an adaptive step method from an fixed step tableau") + if Kind == :fixed + opts = FixedOptions{T}(;options...) + if isadaptive(tab) + error("Cannot construct a fixed step method from an adaptive step tableau") + end + elseif Kind == :adaptive + opts = AdaptiveOptions{T}(;options...) + if !isadaptive(tab) + error("Cannot construct an adaptive step method from an fixed step tableau") + end end - opts = StepperOptions{T}(;options...) RKStepper{Kind,Name,T,typeof(opts)}(tab,opts) end @@ -36,6 +43,9 @@ order(stepper::RKStepper) = minimum(order(stepper.tableau)) name(stepper::RKStepper) = stepper.tableau.name +isadaptive(::RKStepper{:adaptive}) = true +isadaptive(::RKStepper{:fixed}) = false + solve{T,S<:RKStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = Solver(ode,stepper{T}(;options...)) From 843c26de01437d70e3f0c58c568ddcab2eca7bf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 19:49:40 +0200 Subject: [PATCH 040/113] Tests for RK pass, removed backward integration --- examples/test.jl | 2 +- src/interfaces.jl | 110 ++++++++++------------------------------------ src/options.jl | 8 ++-- test/runtests.jl | 21 ++++----- 4 files changed, 40 insertions(+), 101 deletions(-) diff --git a/examples/test.jl b/examples/test.jl index 61f0c8df2..4129bf7cd 100644 --- a/examples/test.jl +++ b/examples/test.jl @@ -16,7 +16,7 @@ for st in steppers ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) opts = Dict(:initstep=>0.1, :tstop=>1., - # :tspan=>[0.,1.], + :tspan=>[0.0,0.5,1.0], :points=>:specified, :reltol=>1e-5, :abstol=>1e-5) diff --git a/src/interfaces.jl b/src/interfaces.jl index fb09eb430..24e4479ef 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -5,48 +5,29 @@ tspan[end] is the last integration time. """ -function ode{T<:Number,S<:AbstractStepper}(F, y0, - tspan::AbstractVector{T}, - stepper::Type{S}; - jac = forward_jacobian(F,copy(y0)), - kargs...) +function ode{T,Y,S<:AbstractStepper}(F, y0::Y, + tspan::AbstractVector{T}, + stepper::Type{S}; + kargs...) t0 = tspan[1] # construct a solver - equation = explicit_ineff(t0,y0,F,jac) - - solver = solve(equation,stepper;kargs...) - - # handle different directions of time integration - if issorted(tspan) - # do nothing, we are already set with the solver - solution = collect(dense(solver;kargs...)) - elseif issorted(reverse(tspan)) - # Reverse the time direction if necessary. dense() only works - # for positive time direction. - - # TODO: still ugly but slightly less bandaid-like then the - # previous solution - solution = map(ty->(2*t0-ty[1],ty[2]),collect(dense(reverse_time(solver; kargs...);kargs...))) - else - warn("Unsorted output times are not supported") - return ([t0],[y0]) + equation = explicit_ineff(t0,y0,F) + solver = solve(equation, stepper; tspan = tspan, kargs...) + dsolver = dense(solver; tspan = tspan, kargs...) + + # determine if we have to unpack y + extract = Y <: Number + + tout = Array(T,0) + yout = Array(Y,0) + for (t,y) in dsolver + push!(tout,t) + push!(yout, extract ? y[1] : copy(y)) end - n = length(solution) - - # convert a list of pairs to a pair of arrays - # TODO: leave it out as a list of pairs? - tn = Array(T,n) - yn = Array(typeof(y0),n) - - for (n,(t,y)) in enumerate(solution) - tn[n] = t - yn[n] = isa(y0,Number) ? y[1] : y - end - - return (tn,yn) + return (tout,yout) end """ @@ -85,49 +66,6 @@ end const ode45 = ode45_dp - -""" - -A nasty hack to convert a solver with negative time direction into a -solver with positive time direction. This is necessary as negative -time direction is not supported by steppers (including the dense -output). This only works for ExplicitODE. - -""" -function reverse_time(sol::Solver; - tstop = tstop, - tspan = tspan, - stopevent=stopevent, - kargs...) - ode, stepper = sol.ode, sol.stepper - - t0 = ode.t0 - y0 = ode.y0 - - # TODO: improve the implementation - function F_reverse!(t,y,dy) - ode.F!(2*t0-t,y,dy) - dy[:]=-dy - end - - # TODO: is that how the jacobian changes? - function jac_reverse!(t,y,J) - ode.J!(2*t0-t,y,J) - J[:]=-J - end - - # ExplicitODE is immutable - ode_reversed = ExplicitODE(t0,y0,F_reverse!,J! = jac_reverse!) - - # TODO: we are modifying options here, should we construct new - # options insted? - tstop = 2*t0-options.tstop - tspan = reverse(2*t0.-options.tspan) - stopevent = (t,y)->stopevent(2*t0-t,y) - return solve(ode_reversed,stepper; tstop = tstop, tspan = tspan, stopevent = stopevent, kargs...) -end - - """ Convert a out-of-place explicitly defined ODE function to @@ -135,10 +73,9 @@ ExplicitODE. As the name suggests, the result is not going to be very efficient. """ -function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function, jac::Function) - F!(t,y,dy) =copy!(dy,F(t,y)) - jac!(t,y,J)=copy!(J,jac(t,y)) - return ExplicitODE(t0,y0,F!; J! = jac!) +function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function) + F!(t,y,dy) = copy!(dy,F(t,y)) + return ExplicitODE(t0,y0,F!) end # A temporary solution for handling scalars, should be faster then the @@ -147,8 +84,7 @@ end # and jac to vector functions F! and jac!. Still, solving this ODE # will result in a vector of length one result, so additional external # conversion is necessary. -function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function, jac) - F!(t,y,dy) =(dy[1]=F(t,y[1])) - jac!(t,y,J)=(J[1]=jac(t,y[1])) - return ExplicitODE(t0,[y0],F!; J! = jac!) +function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function) + F!(t,y,dy) = (dy[1]=F(t,y[1])) + return ExplicitODE(t0,[y0],F!) end diff --git a/src/options.jl b/src/options.jl index 1985bf4de..20d8097ae 100644 --- a/src/options.jl +++ b/src/options.jl @@ -31,7 +31,8 @@ immutable AdaptiveOptions{T,N<:Function,O<:Function} <: Options{T} end @compat function (::Type{AdaptiveOptions{T}}){T,N,O}(; - tstop = T(Inf), + tspan = T[Inf], + tstop = tspan[end], reltol = eps(T)^T(1//3)/10, abstol = eps(T)^T(1//2)/10, minstep = 10*eps(T), @@ -62,8 +63,9 @@ immutable FixedOptions{T} <: Options{T} end @compat function (::Type{FixedOptions{T}}){T}(; - tstop = T(Inf), - initstep = minstep, + tspan = T[Inf], + tstop = tspan[end], + initstep = 10*eps(T), kargs...) FixedOptions{T}(tstop,initstep) diff --git a/test/runtests.jl b/test/runtests.jl index ee4991d6a..dee887f8e 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -37,16 +37,16 @@ for solver in solvers @test maximum(abs(y-6t)) < tol tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, J! = (t,y,dy)->dy[1]=0.0) @test maximum(abs(yj-6tj)) < tol - @test norm(map(norm,yj-y,Inf)) y = t.^2 # dt - t,y=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) + t,y =solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) @test maximum(abs(y-t.^2)) < tol tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=0.0) @test maximum(abs(yj-tj.^2)) < tol - @test norm(map(norm,yj-y,Inf)) y = y0*e.^t @@ -55,13 +55,14 @@ for solver in solvers @test maximum(abs(y-e.^t)) < tol tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) @test maximum(abs(yj-e.^tj)) < tol - @test norm(map(norm,yj-y,Inf))y, 1., [1:-.001:0;], initstep=0.001) - @test maximum(abs(y-e.^(t-1))) < tol - tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) - @test maximum(abs(yj-e.^(tj-1))) < tol - @test norm(map(norm,yj-y,Inf))y, 1., [1:-.001:0;], initstep=0.001) + # @test maximum(abs(y-e.^(t-1))) < tol + # tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) + # @test maximum(abs(yj-e.^(tj-1))) < tol + # @test norm(yj-y,Inf) v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) @@ -74,7 +75,7 @@ for solver in solvers tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, J! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) ysj = hcat(yj...).' # convert Vector{Vector{Float}} to Matrix{Float} @test maximum(abs(ysj-[cos(tj)-2*sin(tj) 2*cos(tj)+sin(tj)])) < tol - @test norm(map(norm,yj-y,Inf))2y, 0., [0,1]) From a2c15275f50fcd0c09e43c86fa2b1b6f1b13365f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 19:57:34 +0200 Subject: [PATCH 041/113] Fixed ode23s --- src/ode23s.jl | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/src/ode23s.jl b/src/ode23s.jl index 82b3df7a7..eba85e059 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -3,26 +3,30 @@ # # [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 -immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper +immutable ModifiedRosenbrockStepper{T<:Number,O<:Options} <: AbstractStepper d ::T e32::T + options::O +end - function ModifiedRosenbrockStepper() - d = T(1/(2 + sqrt(2))) - e32 = T(6 + sqrt(2)) - new(d,e32) - end +@compat function (::Type{ModifiedRosenbrockStepper{T}}){T}(;options...) + d = T(1/(2 + sqrt(2))) + e32 = T(6 + sqrt(2)) + opt = AdaptiveOptions{T}(;options...) + ModifiedRosenbrockStepper(d,e32,opt) end + # TODO: is this correct? order(::ModifiedRosenbrockStepper) = 2 name(::ModifiedRosenbrockStepper) = "Modified Rosenbrock Stepper" +isadaptive(::ModifiedRosenbrockStepper) = true # define the set of ODE problems with which this stepper can work -solve(ode::ExplicitODE, stepper::ModifiedRosenbrockStepper, options) = - Solver(ode, stepper, options) +solve{T,S<:ModifiedRosenbrockStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = + Solver(ode,stepper{T}(;options...)) # lower level interface (iterator) @@ -58,7 +62,7 @@ end function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) t = s.ode.t0 - dt = s.options.initstep + dt = s.stepper.options.initstep y = s.ode.y0 dy = zero(y) @@ -84,7 +88,7 @@ function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state stepper = s.stepper ode = s.ode step = state.step - opts = s.options + opts = s.stepper.options F1, F2, J = state.F1, state.F2, state.J @@ -97,12 +101,12 @@ function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state while true state.iters += 1 - if state.iters > s.options.maxiters + if state.iters > opts.maxiters return ((step.t,step.y), state) end # trim the step size to match the bounds of integration - dt = min(s.options.tstop-t,dt) + dt = min(opts.tstop-t,dt) W = lufact!( eye(J) - dt*d*J ) From 7b8b4490ed63ef52c6c906546f121ff36c1a1dee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 20:29:23 +0200 Subject: [PATCH 042/113] Fixed the tests for the iterator interface --- src/dense.jl | 5 ----- test/iterators.jl | 27 +++++++++++++++------------ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 8e757d747..f8ab9cb23 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -38,11 +38,6 @@ immutable DenseStepper{S<:Solver,O<:DenseOptions} <: AbstractStepper end -@compat function (::Type{DenseStepper{T}}){T,S<:Solver}(solver::S;options...) - DenseStepper(solver,DenseOptions{T}(;options...)) -end - - function dense{O<:ExplicitODE}(sol::Solver{O}; options...) T,_ = eltype(sol) opt = DenseOptions{T}(;options...) diff --git a/test/iterators.jl b/test/iterators.jl index 5660f5433..9f751516d 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -26,15 +26,15 @@ testsets = [ :isscalar => true, :name => "y'=y", :initstep => 0.001), - Dict( - :F! => (t,y,dy)->dy[1]=y[1], - :y0 => [1.0], - :tspan => [1:-0.001:0;], - :jac => (t,y,dy)->dy[1]=1.0, - :sol => t->[exp(t-1)], - :isscalar => true, - :name => "y'=y backwards", - :initstep => 0.001), + # Dict( + # :F! => (t,y,dy)->dy[1]=y[1], + # :y0 => [1.0], + # :tspan => [1:-0.001:0;], + # :jac => (t,y,dy)->dy[1]=1.0, + # :sol => t->[exp(t-1)], + # :isscalar => true, + # :name => "y'=y backwards", + # :initstep => 0.001), Dict( :F! => (t,y,dy)->(dy[1]=-y[2];dy[2]=y[1]), :y0 => [1.0,2.0], @@ -68,7 +68,7 @@ function test_ode() for ts in testsets println("Testing problem $(ts[:name])") - tspan, h0, stepper = ts[:tspan], ts[:initstep], rks{eltype(ts[:tspan])}() + tspan, h0, stepper = ts[:tspan], ts[:initstep], rks y0, F!, jac!, sol = ts[:y0], ts[:F!], ts[:jac], ts[:sol] @@ -117,8 +117,11 @@ function test_ode() # test the iterator interface (they only support forward time integration) if issorted(tspan) equation = ODE.ExplicitODE(tspan[1],y0,F!) - opts = ODE.Options{eltype(tspan)}(tspan = tspan,initstep = h0,points = points) - solver = ODE.solve(equation,stepper,opts) + opts = Dict(:tspan => tspan, + :initstep => h0, + :points => points) + + solver = ODE.solve(equation,stepper;opts...) for (t,y) in solver @test_approx_eq_eps y sol(t) tol From fbb68e22c6fb2bcdaf36c298a035f1e7a7163af5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 20 Jul 2016 20:38:35 +0200 Subject: [PATCH 043/113] Brought back the Jacobian --- src/interfaces.jl | 14 +++++++------- src/types.jl | 6 ++++-- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/interfaces.jl b/src/interfaces.jl index 24e4479ef..8eabe6a09 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -13,7 +13,7 @@ function ode{T,Y,S<:AbstractStepper}(F, y0::Y, t0 = tspan[1] # construct a solver - equation = explicit_ineff(t0,y0,F) + equation = explicit_ineff(t0,y0,F;kargs...) solver = solve(equation, stepper; tspan = tspan, kargs...) dsolver = dense(solver; tspan = tspan, kargs...) @@ -73,9 +73,9 @@ ExplicitODE. As the name suggests, the result is not going to be very efficient. """ -function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function) - F!(t,y,dy) = copy!(dy,F(t,y)) - return ExplicitODE(t0,y0,F!) +function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function; kargs...) + F!(t,y,dy) =copy!(dy,F(t,y)) + return ExplicitODE(t0,y0,F!; kargs...) end # A temporary solution for handling scalars, should be faster then the @@ -84,7 +84,7 @@ end # and jac to vector functions F! and jac!. Still, solving this ODE # will result in a vector of length one result, so additional external # conversion is necessary. -function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function) - F!(t,y,dy) = (dy[1]=F(t,y[1])) - return ExplicitODE(t0,[y0],F!) +function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function; kargs...) + F!(t,y,dy) =(dy[1]=F(t,y[1])) + return ExplicitODE(t0,[y0],F!; kargs...) end diff --git a/src/types.jl b/src/types.jl index 53c4c2314..8fb622324 100644 --- a/src/types.jl +++ b/src/types.jl @@ -61,7 +61,8 @@ typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} @compat function (::Type{ExplicitODE}){T,Y}(t0::T, y0::Y, F!::Function; - J!::Function = forward_jacobian!(F!,similar(y0))) + J!::Function = forward_jacobian!(F!,similar(y0)), + kargs...) ExplicitODE{T,Y}(t0,y0,similar(y0),F!,nothing,J!) end @@ -83,7 +84,8 @@ typealias ImplicitODE{T,Y} IVP{T,Y,Void,Function,Function} y0::Y, G!::Function; J!::Function = forward_jacobian_implicit!(G!,similar(y0)), - dy0::Y = zero(y0)) + dy0::Y = zero(y0), + kargs...) ImplicitODE{T,Y}(t0,y0,dy0,nothing,G!,J!) end From 21e35068f919b940868bd2fe367231740735a197 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 21 Jul 2016 16:55:53 +0200 Subject: [PATCH 044/113] New dense interface --- examples/test.jl | 15 +++------------ src/dense.jl | 13 ++++++++----- src/interfaces.jl | 6 ++++-- test/iterators.jl | 3 ++- 4 files changed, 17 insertions(+), 20 deletions(-) diff --git a/examples/test.jl b/examples/test.jl index 4129bf7cd..3f5067124 100644 --- a/examples/test.jl +++ b/examples/test.jl @@ -10,34 +10,25 @@ t0 = zero(T) y0 = T[one(T)] steppers = [ODE.RKStepperAdaptive{:rk45}, - ODE.RKStepperFixed{:feuler}] + ODE.RKStepperFixed{:feuler}, + ODE.DenseStepper] for st in steppers ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) opts = Dict(:initstep=>0.1, - :tstop=>1., - :tspan=>[0.0,0.5,1.0], + :tspan=>[0.,0.5,1.], :points=>:specified, :reltol=>1e-5, :abstol=>1e-5) sol = ODE.solve(ode,st;opts...) - den = ODE.dense(sol;opts...) - println(typeof(sol.stepper.options)) - println(sol.stepper.options) println("Raw iterator") for (t,y) in sol println((t,y)) end - println("Dense output") - for (t,y) in den - println((t,y)) - end - println(collect(sol)) - println(collect(den)) end end diff --git a/src/dense.jl b/src/dense.jl index f8ab9cb23..847d99195 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -38,11 +38,14 @@ immutable DenseStepper{S<:Solver,O<:DenseOptions} <: AbstractStepper end -function dense{O<:ExplicitODE}(sol::Solver{O}; options...) - T,_ = eltype(sol) - opt = DenseOptions{T}(;options...) - den = DenseStepper(sol,opt) - Solver(sol.ode, den) +function solve{T,S<:DenseStepper}(ode::ExplicitODE{T}, + ::Type{S}; + method = RKStepperAdaptive{:rk45}, + options...) + sol_orig = Solver(ode,method{T}(; options...)) + dense_options = DenseOptions{T}(; options...) + dense_stepper = S(sol_orig,dense_options) + return Solver(ode,dense_stepper) end """ diff --git a/src/interfaces.jl b/src/interfaces.jl index 8eabe6a09..f567a5b70 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -14,8 +14,10 @@ function ode{T,Y,S<:AbstractStepper}(F, y0::Y, # construct a solver equation = explicit_ineff(t0,y0,F;kargs...) - solver = solve(equation, stepper; tspan = tspan, kargs...) - dsolver = dense(solver; tspan = tspan, kargs...) + dsolver = solve(equation, DenseStepper; + mehtod = stepper, + tspan = tspan, + kargs...) # determine if we have to unpack y extract = Y <: Number diff --git a/test/iterators.jl b/test/iterators.jl index 9f751516d..7a5844552 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -127,7 +127,8 @@ function test_ode() @test_approx_eq_eps y sol(t) tol end - for (t,y) in ODE.dense(solver) + for (t,y) in ODE.solve(equation,ODE.DenseStepper; + method = stepper, opts...) @test_approx_eq_eps y sol(t) tol end end From 7e64beb3c6227e9c18a387b6abea02a0e45512cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 24 Jul 2016 11:24:34 +0200 Subject: [PATCH 045/113] New backend and Runge-Kutta implementaion --- src/ODE.jl | 1 - src/iterators.jl | 24 -------- src/runge-kutta.jl | 137 ++++++++++++++++++++++++++------------------- src/types.jl | 116 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 194 insertions(+), 84 deletions(-) delete mode 100644 src/iterators.jl diff --git a/src/ODE.jl b/src/ODE.jl index 943af7130..a79023b4d 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -25,7 +25,6 @@ include("adams-bashford-moulton.jl") include("rosenbrock.jl") # include("taylor.jl") -include("iterators.jl") include("interfaces.jl") end # module ODE diff --git a/src/iterators.jl b/src/iterators.jl deleted file mode 100644 index 9eff999d8..000000000 --- a/src/iterators.jl +++ /dev/null @@ -1,24 +0,0 @@ -""" -Generic done method, some steppers may implement their own versions. -""" - -function done(s::Solver, state::AbstractState) - st = s.stepper - - if state.step.t >= st.options.tstop - return true - end - - # specific for adaptive stepper - if isadaptive(st) - if state.dt < st.options.minstep - warn("Minstep reached.") - return true - elseif state.iters >= st.options.maxiters - warn("Maximum number of iterations ($(Int(s.options.maxiters))) reached, consider setting a larger maxiter.") - return true - end - end - - return false -end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index bec8de2f4..e7a53620d 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -43,9 +43,6 @@ order(stepper::RKStepper) = minimum(order(stepper.tableau)) name(stepper::RKStepper) = stepper.tableau.name -isadaptive(::RKStepper{:adaptive}) = true -isadaptive(::RKStepper{:fixed}) = false - solve{T,S<:RKStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = Solver(ode,stepper{T}(;options...)) @@ -73,12 +70,17 @@ State for the Runge-Kutta stepper. type RKState{T,Y} <: AbstractState{T,Y} step ::Step{T,Y} dt ::T + newdt ::T work ::RKWorkArrays{Y} timeout ::Int # This is not currently incremented with each step iters ::Int end + +output(st::RKState) = st.step.t, st.step.y + + function show(io::IO, state::RKState) show(state.step) println("dt = $(state.dt)") @@ -87,10 +89,13 @@ function show(io::IO, state::RKState) end -function start{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) +function init{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) stepper = s.stepper t0, dt0, y0 = s.ode.t0, stepper.options.initstep, s.ode.y0 + # clip the dt0 if t0+dt0 exceeds tstop + dt0 = min(dt0,stepper.options.tstop-t0) + lk = lengthks(s.stepper.tableau) work = RKWorkArrays(zero(y0), # y zero(y0), # ynew @@ -108,7 +113,7 @@ function start{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) step = Step(t0,copy(y0),copy(work.ks[1])) timeout = 0 # for step control - return RKState(step,dt0,work,timeout,0) + return RKState(step,dt0,dt0,work,timeout,0) end @@ -117,10 +122,15 @@ end ##################### -function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state) +function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKState) step = state.step work = state.work + if step.t >= s.stepper.options.tstop + # nothing left to integrate + return StatusFinished + end + dof = length(step.y) b = s.stepper.tableau.b dt = min(state.dt,s.stepper.options.tstop-step.t) @@ -135,7 +145,7 @@ function next{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state) end step.t += dt copy!(step.y,work.ynew) - return ((step.t,step.y), state) + return StatusContinue end @@ -146,72 +156,81 @@ end const timeout_const = 5 -function next{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state) - - # the initial values - dt = state.dt # dt is the previous stepisze, it is - # modified inside the loop - timeout = state.timeout +# `trialstep!` ends with a step computed for the stepsize `state.dt` +# and stores it in `work.y`, so `work.y` contains a candidate for +# `y(t+dt)` with `dt=state.dt`. +function trialstep!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::RKState) work = state.work step = state.step stepper = sol.stepper tableau = stepper.tableau options = stepper.options - # The while loop continues until we either find a stepsize which - # leads to a small enough error or the stepsize reaches - # prob.minstep + # use the proposed step size to perform the computations + state.dt = state.newdt + dt = state.dt - # trim the inital stepsize to avoid overshooting - dt = min(dt, options.tstop-state.step.t) + if step.t >= options.tstop + # nothing left to integrate + return StatusFinished + end - while true + if dt < options.minstep + # minimum step size reached + return StatusFailed + end + + # work.y and work.yerr and work.ks are updated after this step + rk_embedded_step!(work, sol.ode, tableau, step, dt) + + return StatusContinue +end - # Do one step (assumes ks[1]==f0). After calling work.ynew - # holds the new step. - # TODO: return ynew instead of passing it as work.ynew? +# computes the error for the candidate solution `y(t+dt)` with +# `dt=state.dt` and proposes a new time step +function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::RKState) + work = state.work + step = state.step + stepper = sol.stepper + tableau = stepper.tableau + timeout = state.timeout + options = stepper.options + err, state.newdt, state.timeout = + stepsize_hw92!(work, step, tableau, state.dt, state.timeout, options) - # work.y and work.yerr and work.ks are updated after this step - rk_embedded_step!(work, sol.ode, tableau, step, dt) + # trim in case newdt > dt + state.newdt = min(state.newdt, options.tstop-state.step.t) - # changes work.yerr - err, newdt, timeout = stepsize_hw92!(work, step, tableau, dt, timeout, options) + if err > 1 + # The error is too large, the step will be rejected. We reset + # the timeout and set the new stepsize + state.timeout = timeout_const + end - # trim again in case newdt > dt - newdt = min(newdt, options.tstop-state.step.t) + return err, StatusContinue +end - if abs(newdt) < options.minstep # minimum step size reached, break - # passing the newdt to state will result in done() - state.dt = newdt - break - end +# Here we assume that trialstep! and errorcontrol! have already been +# called, that is `work.y` holds `y(t+dt)` with `dt=state.dt`, and +# error was small enough for us to keep `y(t+dt)` as the next step. +function accept!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::RKState) + work = state.work + step = state.step + tableau = sol.stepper.tableau - if err > 1 # error is too large, repeat the step with smaller dt - # redo step with smaller dt and reset the timeout - dt = newdt - timeout = timeout_const - else - # step is accepted - - # preload ks[1] for the next step - if sol.stepper.tableau.isFSAL - copy!(work.ks[1],work.ks[end]) - else - sol.ode.F!(step.t+dt, work.ynew, work.ks[1]) - end - - # Swap bindings of y and ytrial, avoids one copy - step.y, work.ynew = work.ynew, step.y - - # Update state with the data from the step we have just - # made: - step.t += dt - state.dt = newdt - state.timeout = timeout - break - end + # preload ks[1] for the next step + if tableau.isFSAL + copy!(work.ks[1],work.ks[end]) + else + sol.ode.F!(step.t+state.dt, work.ynew, work.ks[1]) end - return ((step.t,step.y),state) + + # Swap bindings of y and ytrial, avoids one copy + step.y, work.ynew = work.ynew, step.y + # state.dt holds the size of the last successful step + step.t += state.dt + + return StatusContinue end @@ -301,7 +320,7 @@ function stepsize_hw92!{T}(work, # TOOD: should we use options.norm here as well? err = options.norm(work.yerr) # Eq. 4.11 - newdt = min(options.maxstep, dt*max(facmin, fac*(1/err)^(1/(ord+1)))) # Eq 4.13 modified + newdt = min(options.maxstep, dt*clamp(fac*(1/err)^(1/(ord+1)),facmin,facmax)) # Eq 4.13 modified if timeout > 0 newdt = min(newdt, dt) diff --git a/src/types.jl b/src/types.jl index 8fb622324..e98cf538d 100644 --- a/src/types.jl +++ b/src/types.jl @@ -168,3 +168,119 @@ function collect(s::Solver) end return pairs end + + +# Iteration: take one step on a ODE/DAE `Problem` +# +# Define: +# start(iter) -> state +# next(iter, state) -> output(state), state +# done(iter, state) -> bool +# +# Perhaps unintuitively, the next step is computed in `done`. Such +# implementation allows to decide if the iterator is exhausted in case +# when the next step was computed but it was deemed incorrect. In +# such situation `done` returns `false` after computing the step and +# the failed step is never sees the light of the day (by being +# returned by `next`). +# +# TODO: this implementation fails to return the zeroth step (t0,y0) +# +# TODO: store the current Step outside of the actual state +# Base.start(sol::Solver) = (init(sol), Step(ode.sol)) + +Base.start(sol::Solver) = init(sol) + +function Base.done(s::Solver, st) + # Determine whether the next step can be made + status = onestep!(s, st) + return !successful(status) +end + +function Base.next(sol::Solver, st) + # Output the step (we know that `done` allowed it, so we are safe + # to do it) + return output(st), st +end + + +""" +TODO: Holds the solver status after onestep. +""" +type Status{T} end +successful(status::Status) = status == StatusContinue +const StatusContinue = Status{:cont}() +const StatusFailed = Status{:failed}() +const StatusFinished = Status{:finished}() + +##### +# Interface to implement by solvers to hook into iteration +##### +# +# See runge_kutta.jl and rosenbrock.jl for example implementations. + +# A stepper has to implement +# - init +# - output +# and either +# - onestep! +# - trialstep!, errorcontrol! and accept! + +""" + +Determines wheter we can take one step. This is the core function to +be implemented by a solver. Note that adaptive solvers may want to +implement only some of the substeps. + +""" + +# onestep! tries to find out if the next step can be made +function onestep!(sol::Solver, state::AbstractState) + opt = sol.stepper.options + while true + status = trialstep!(sol, state) + + if !successful(status) + return status + else + err, statuserr = errorcontrol!(sol, state) + if !successful(statuserr) + return statuserr + elseif err <= 1 + statusaccept = accept!(sol, state) + if !successful(statusaccept) + return statusaccept + else + return status + end + end + end + + end +end + + +""" +Advances the solution to new state by a given time step. Updates +state in-place such that it reflects the new state. +Returns the stats for this step (TODO). +""" +trialstep!{O,S}(::Solver{O,S}, ::AbstractState) = + error("Function `trialstep!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") + +""" +Accepts (in-place) the computed step state back to the previous state after a failed +trial step. The reverting needn't be 100% as long as a new trial step +can be calculated from it. +Returns nothing. +""" +accept!{O,S}(::Solver{O,S}, ::AbstractState) = + error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") + + +""" +Estimates the error (such that a step is accepted if err<=1), a new dt +and a new order. Updates state with new dt and order (as appropriate). +Returns err & stats (TODO). +""" +errorcontrol!{T}(::Solver, ::AbstractState{T}) = zero(T), Status() From 7d6edcd432aa04c09f2d460e605b49d209c786e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 24 Jul 2016 11:36:50 +0200 Subject: [PATCH 046/113] Updated docs --- src/types.jl | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/src/types.jl b/src/types.jl index e98cf538d..9621783ff 100644 --- a/src/types.jl +++ b/src/types.jl @@ -260,27 +260,47 @@ function onestep!(sol::Solver, state::AbstractState) end +# TODO: the docs here are still confusing, I would rather have a +# separate type to store the `accepted` step (perhaps `Step`?) and +# call `trialstep!(solver,state,step)` to fill the `state` with the +# newly made step, then `accept!(solver,state,step)` would use the +# data in `state` to fill the `step` with new step. This way we could +# also implement a standard `output` function that would work on +# `step` instead of `state`. The step would contain the current state +# of the solution: `(t,y)` at minimum, but it could also be +# `(t,y,dy,dt)`. Thoughts? """ -Advances the solution to new state by a given time step. Updates -state in-place such that it reflects the new state. -Returns the stats for this step (TODO). + +Advances the solution by trying to compute a single step. The new +step is kept in the `state` in work arrays so that `statuserr!` can +compute the magnitude of its error. If the error is small enough +`accept!` saves the step in the `state`. + +Returns `Status`. + """ trialstep!{O,S}(::Solver{O,S}, ::AbstractState) = error("Function `trialstep!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") """ -Accepts (in-place) the computed step state back to the previous state after a failed -trial step. The reverting needn't be 100% as long as a new trial step -can be calculated from it. -Returns nothing. + +Accepts (in-place) the computed step if `errorcontrol!` gave a small +enough error. + +Returns `Status`. + """ accept!{O,S}(::Solver{O,S}, ::AbstractState) = error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") """ -Estimates the error (such that a step is accepted if err<=1), a new dt -and a new order. Updates state with new dt and order (as appropriate). -Returns err & stats (TODO). + +Estimates the error (such that a step is accepted if +err<=1). Depending on the stepper it may update the state, e.g. by +computing a new dt or a new order. + +Returns `(err,Status)`. + """ errorcontrol!{T}(::Solver, ::AbstractState{T}) = zero(T), Status() From e18fbe2d027c5d2268943acb2352862cda3dfd28 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 10:23:50 +0200 Subject: [PATCH 047/113] some cleanup of iteration --- src/types.jl | 114 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 45 deletions(-) diff --git a/src/types.jl b/src/types.jl index 9621783ff..28896f6f2 100644 --- a/src/types.jl +++ b/src/types.jl @@ -172,7 +172,7 @@ end # Iteration: take one step on a ODE/DAE `Problem` # -# Define: +# Defines: # start(iter) -> state # next(iter, state) -> output(state), state # done(iter, state) -> bool @@ -181,7 +181,7 @@ end # implementation allows to decide if the iterator is exhausted in case # when the next step was computed but it was deemed incorrect. In # such situation `done` returns `false` after computing the step and -# the failed step is never sees the light of the day (by being +# the failed step never sees the light of the day (by not being # returned by `next`). # # TODO: this implementation fails to return the zeroth step (t0,y0) @@ -192,9 +192,10 @@ end Base.start(sol::Solver) = init(sol) function Base.done(s::Solver, st) - # Determine whether the next step can be made - status = onestep!(s, st) - return !successful(status) + # Determine whether the next step can be made by calling the + # stepping routine. onestep! will take the step in-place. + finished = onestep!(s, st) + return finished end function Base.next(sol::Solver, st) @@ -203,15 +204,25 @@ function Base.next(sol::Solver, st) return output(st), st end - +#m3: I don't think it makes sense to type-fy this. TODO: delete +# """ +# TODO: Holds the solver status after onestep. +# """ +# type Status{T} end +# successful(status::Status) = status == StatusContinue +# const StatusContinue = Status{:cont}() +# const StatusFailed = Status{:failed}() +# const StatusFinished = Status{:finished}() """ -TODO: Holds the solver status after onestep. +Holds the solver status, used inside of `onestep!`. + +Values: + +- cont -- continue integration +- abort -- abort integration +- finish -- integration reached the end """ -type Status{T} end -successful(status::Status) = status == StatusContinue -const StatusContinue = Status{:cont}() -const StatusFailed = Status{:failed}() -const StatusFinished = Status{:finished}() +@enum Status cont abort finish ##### # Interface to implement by solvers to hook into iteration @@ -226,36 +237,42 @@ const StatusFinished = Status{:finished}() # - onestep! # - trialstep!, errorcontrol! and accept! +const _notdone = false +const _done = true + """ -Determines wheter we can take one step. This is the core function to -be implemented by a solver. Note that adaptive solvers may want to -implement only some of the substeps. +Take a step, modifies `state` in-place. This is the core function to +be implemented by a solver. However, if possible solvers should opt +to implement the sub-step functions `trialstep!`, `errorcontrol!` and +`accept!`, instead of directly `onestep!`. -""" +Input: + +- sol::Solver, state::AbstractState -# onestep! tries to find out if the next step can be made +Output: + +- Bool: `false`: continue iteration, `true`: terminate iteration. + +substeps. +""" function onestep!(sol::Solver, state::AbstractState) opt = sol.stepper.options while true status = trialstep!(sol, state) - - if !successful(status) - return status - else - err, statuserr = errorcontrol!(sol, state) - if !successful(statuserr) - return statuserr - elseif err <= 1 - statusaccept = accept!(sol, state) - if !successful(statusaccept) - return statusaccept - else - return status - end - end + err, status = errorcontrol!(sol, state, status) + if err<=1 && status==cont + # a successful step + accept!(sol, state) + return _notdone + elseif status==abort + warn("Aborting!") # TODO something more fancy here + return _done + elseif status==finish + return _done end - + # else: try again with smaller step end end @@ -269,12 +286,17 @@ end # `step` instead of `state`. The step would contain the current state # of the solution: `(t,y)` at minimum, but it could also be # `(t,y,dy,dt)`. Thoughts? +# +#m3: No, that doesn't work if we want to allow zero-allocation +# algorithms. Unless you make `step` part of `state` but then it +# becomes pointless. + """ Advances the solution by trying to compute a single step. The new -step is kept in the `state` in work arrays so that `statuserr!` can +step is kept in the `state` in work arrays so that `errorcontrol!` can compute the magnitude of its error. If the error is small enough -`accept!` saves the step in the `state`. +`accept!` updates `state` to reflect the state at the new time. Returns `Status`. @@ -284,23 +306,25 @@ trialstep!{O,S}(::Solver{O,S}, ::AbstractState) = """ -Accepts (in-place) the computed step if `errorcontrol!` gave a small -enough error. +Estimates the error (such that a step is accepted if err<=1). +Depending on the stepper it may update the state, e.g. by computing a +new dt or a new order (but not by computing a new solution!). -Returns `Status`. +Returns `(err,Status)`. +Defaults to return (0,cont), i.e. accept step. This can be used for +fixed-step solvers where no error control is done. """ -accept!{O,S}(::Solver{O,S}, ::AbstractState) = - error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") +errorcontrol!{T}(::Solver,::AbstractState{T}) = zero(T), cont """ -Estimates the error (such that a step is accepted if -err<=1). Depending on the stepper it may update the state, e.g. by -computing a new dt or a new order. +Accepts (in-place) the computed step. Called if `errorcontrol!` gave +a small enough error. -Returns `(err,Status)`. +Returns `Status`. """ -errorcontrol!{T}(::Solver, ::AbstractState{T}) = zero(T), Status() +accept!{O,S}(::Solver{O,S}, ::AbstractState) = + error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") From dabbf5d96ad793dbc188963f77c280019f9220bb Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 10:44:25 +0200 Subject: [PATCH 048/113] running partly again --- src/runge-kutta.jl | 18 ++++++++++-------- src/types.jl | 7 +++++-- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index e7a53620d..a3ddf4ee5 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -128,7 +128,7 @@ function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKSta if step.t >= s.stepper.options.tstop # nothing left to integrate - return StatusFinished + return finish end dof = length(step.y) @@ -145,7 +145,7 @@ function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKSta end step.t += dt copy!(step.y,work.ynew) - return StatusContinue + return cont end @@ -172,23 +172,25 @@ function trialstep!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state if step.t >= options.tstop # nothing left to integrate - return StatusFinished + return finish end if dt < options.minstep # minimum step size reached - return StatusFailed + return abort end # work.y and work.yerr and work.ks are updated after this step rk_embedded_step!(work, sol.ode, tableau, step, dt) - return StatusContinue + return cont end # computes the error for the candidate solution `y(t+dt)` with # `dt=state.dt` and proposes a new time step -function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::RKState) +function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, + state::RKState, + status::Status) work = state.work step = state.step stepper = sol.stepper @@ -207,7 +209,7 @@ function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, st state.timeout = timeout_const end - return err, StatusContinue + return err, status end # Here we assume that trialstep! and errorcontrol! have already been @@ -230,7 +232,7 @@ function accept!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::R # state.dt holds the size of the last successful step step.t += state.dt - return StatusContinue + return nothing end diff --git a/src/types.jl b/src/types.jl index 28896f6f2..24a28eb12 100644 --- a/src/types.jl +++ b/src/types.jl @@ -237,6 +237,7 @@ Values: # - onestep! # - trialstep!, errorcontrol! and accept! +# Just to make it more readable below const _notdone = false const _done = true @@ -272,7 +273,7 @@ function onestep!(sol::Solver, state::AbstractState) elseif status==finish return _done end - # else: try again with smaller step + # else: try again with updates as done inside errorcontrol! end end @@ -308,7 +309,9 @@ trialstep!{O,S}(::Solver{O,S}, ::AbstractState) = Estimates the error (such that a step is accepted if err<=1). Depending on the stepper it may update the state, e.g. by computing a -new dt or a new order (but not by computing a new solution!). +new dt or a new order (but not by computing a new solution!). It also +takes the `status` as input, which should probably just be passed +through, but could be modified. Returns `(err,Status)`. From e7c99c8ced1108039af337fb46f3b8fc272512c5 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 10:49:38 +0200 Subject: [PATCH 049/113] addressed comments --- src/runge-kutta.jl | 7 +++---- src/types.jl | 44 ++++++++++++++++++++++++++++---------------- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index a3ddf4ee5..9651dc52a 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -189,8 +189,7 @@ end # computes the error for the candidate solution `y(t+dt)` with # `dt=state.dt` and proposes a new time step function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, - state::RKState, - status::Status) + state::RKState) work = state.work step = state.step stepper = sol.stepper @@ -209,7 +208,7 @@ function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state.timeout = timeout_const end - return err, status + return err, cont end # Here we assume that trialstep! and errorcontrol! have already been @@ -232,7 +231,7 @@ function accept!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::R # state.dt holds the size of the last successful step step.t += state.dt - return nothing + return cont end diff --git a/src/types.jl b/src/types.jl index 24a28eb12..4dc58da37 100644 --- a/src/types.jl +++ b/src/types.jl @@ -222,7 +222,7 @@ Values: - abort -- abort integration - finish -- integration reached the end """ -@enum Status cont abort finish +@enum Status cont abort finish # TODO these need better names ##### # Interface to implement by solvers to hook into iteration @@ -262,18 +262,31 @@ function onestep!(sol::Solver, state::AbstractState) opt = sol.stepper.options while true status = trialstep!(sol, state) - err, status = errorcontrol!(sol, state, status) - if err<=1 && status==cont - # a successful step - accept!(sol, state) - return _notdone - elseif status==abort - warn("Aborting!") # TODO something more fancy here + # This could be moved into a @check macro: + if status==abort + warn("Abort in trialstep!") return _done elseif status==finish return _done end - # else: try again with updates as done inside errorcontrol! + + err, status_err = errorcontrol!(sol, state) + if status_err==abort + warn("Abort in errorcontrol!") + return _done + end + if err<=1 && status==cont + # a successful step + status_acc = accept!(sol, state) + if status_acc==abort + warn("Abort in accept!") + return _done + else + return _notdone + end + end + # if we get here: try step again with updated state (step + # size, order) as done inside errorcontrol! end end @@ -309,17 +322,16 @@ trialstep!{O,S}(::Solver{O,S}, ::AbstractState) = Estimates the error (such that a step is accepted if err<=1). Depending on the stepper it may update the state, e.g. by computing a -new dt or a new order (but not by computing a new solution!). It also -takes the `status` as input, which should probably just be passed -through, but could be modified. +new dt or a new order (but not by computing a new solution!). Returns `(err,Status)`. -Defaults to return (0,cont), i.e. accept step. This can be used for -fixed-step solvers where no error control is done. -""" -errorcontrol!{T}(::Solver,::AbstractState{T}) = zero(T), cont +If the `status==abort` then the integration is aborted, status values +of `cont` and `finish` are ignored. +""" +errorcontrol!{T}(::Solver,::AbstractState{T}) = + error("Function `errorcontrol!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") """ From 86c75c11ee2f9767a3366ccd19efba589f130946 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 13:44:35 +0200 Subject: [PATCH 050/113] Bug-dic in errorcontrol! in RK --- src/runge-kutta.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 9651dc52a..0bcbec90e 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -200,7 +200,7 @@ function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, stepsize_hw92!(work, step, tableau, state.dt, state.timeout, options) # trim in case newdt > dt - state.newdt = min(state.newdt, options.tstop-state.step.t) + state.newdt = min(state.newdt, options.tstop-(state.step.t+state.dt)) if err > 1 # The error is too large, the step will be rejected. We reset From b3cba83466b1bce429d7466c0ffbf14c28e9864e Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 14:09:24 +0200 Subject: [PATCH 051/113] Added tdir Iterator works, higher level functions don't yet. --- src/options.jl | 14 ++--- src/runge-kutta.jl | 20 +++---- src/types.jl | 2 + test/iterators.jl | 130 ++++++++++++++++++++++----------------------- test/runtests.jl | 42 ++++++++------- 5 files changed, 106 insertions(+), 102 deletions(-) diff --git a/src/options.jl b/src/options.jl index 20d8097ae..7a4b58ba6 100644 --- a/src/options.jl +++ b/src/options.jl @@ -7,13 +7,13 @@ constructor which will fill the structure with default values. General: -- initstep ::T initial step +- initstep ::T initial step size (always positive) - tstop ::T end integration time - reltol ::T relative tolerance (m3: could this be a vector?) - abstol ::T absolute tolerance (m3: could this be a vector?) -- minstep ::T minimal allowed step -- maxstep ::T maximal allowed step -- norm function to calculate the norm in step control +- minstep ::T minimal allowed step size (always positive) +- maxstep ::T maximal allowed step size (always positive) +- norm function to calculate the norm in step control - maxiters ::T maximum number of steps - isoutofdomain::Function checks if the solution is outside of the allowed domain @@ -42,7 +42,7 @@ end maxiters = T(Inf), isoutofdomain::O = Base.isnan, kargs...) - + @assert minstep>=0 && maxstep>=0 && initstep>=0 # TODO: move to inner constructor AdaptiveOptions{T,N,O}(tstop,reltol,abstol,minstep,maxstep,initstep,norm,maxiters,isoutofdomain) end @@ -53,7 +53,7 @@ constructor which will fill the structure with default values. General: -- initstep ::T initial step +- initstep ::T initial step (always positive) - tstop ::T end integration time """ @@ -67,7 +67,7 @@ end tstop = tspan[end], initstep = 10*eps(T), kargs...) - + @assert initstep>=0 FixedOptions{T}(tstop,initstep) end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 0bcbec90e..7da8be0d5 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -94,7 +94,7 @@ function init{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) t0, dt0, y0 = s.ode.t0, stepper.options.initstep, s.ode.y0 # clip the dt0 if t0+dt0 exceeds tstop - dt0 = min(dt0,stepper.options.tstop-t0) + dt0 = tdir(s)*min(abs(dt0),abs(stepper.options.tstop-t0)) lk = lengthks(s.stepper.tableau) work = RKWorkArrays(zero(y0), # y @@ -126,14 +126,14 @@ function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKSta step = state.step work = state.work - if step.t >= s.stepper.options.tstop + if tdir(s)*step.t >= tdir(s)*s.stepper.options.tstop # nothing left to integrate - return finish + return true end dof = length(step.y) b = s.stepper.tableau.b - dt = min(state.dt,s.stepper.options.tstop-step.t) + dt = tdir(s)*min(abs(state.dt),abs(s.stepper.options.tstop-step.t)) copy!(work.ynew,step.y) @@ -145,7 +145,7 @@ function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKSta end step.t += dt copy!(step.y,work.ynew) - return cont + return false end @@ -170,12 +170,12 @@ function trialstep!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state state.dt = state.newdt dt = state.dt - if step.t >= options.tstop + if tdir(sol)*step.t >= tdir(sol)*options.tstop # nothing left to integrate return finish end - if dt < options.minstep + if abs(dt) < options.minstep # minimum step size reached return abort end @@ -200,7 +200,7 @@ function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, stepsize_hw92!(work, step, tableau, state.dt, state.timeout, options) # trim in case newdt > dt - state.newdt = min(state.newdt, options.tstop-(state.step.t+state.dt)) + state.newdt = tdir(sol)*min(abs(state.newdt), abs(options.tstop-(state.step.t+state.dt))) if err > 1 # The error is too large, the step will be rejected. We reset @@ -321,10 +321,10 @@ function stepsize_hw92!{T}(work, # TOOD: should we use options.norm here as well? err = options.norm(work.yerr) # Eq. 4.11 - newdt = min(options.maxstep, dt*clamp(fac*(1/err)^(1/(ord+1)),facmin,facmax)) # Eq 4.13 modified + newdt = sign(dt)*min(options.maxstep, abs(dt)*clamp(fac*(1/err)^(1/(ord+1)),facmin,facmax)) # Eq 4.13 modified if timeout > 0 - newdt = min(newdt, dt) + newdt = sign(dt)*min(abs(newdt), abs(dt)) timeout -= 1 end diff --git a/src/types.jl b/src/types.jl index 4dc58da37..8d1b2381b 100644 --- a/src/types.jl +++ b/src/types.jl @@ -154,6 +154,8 @@ end Base.eltype{O}(::Type{Solver{O}}) = eltype(O) Base.eltype{O}(::Solver{O}) = eltype(O) +tdir(s::Solver) = sign(s.stepper.options.tstop - s.ode.t0) + # filter the wrong combinations of ode and stepper solve{O,S}(ode::O, stepper::Type{S}, options...) = error("The $S doesn't support $O") diff --git a/test/iterators.jl b/test/iterators.jl index 7a5844552..a77a059c5 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -1,4 +1,4 @@ -testsets = [ +const testsets = [ Dict( :F! => (t,y,dy)->dy[1]=6.0, :y0 => [0.], @@ -26,15 +26,15 @@ testsets = [ :isscalar => true, :name => "y'=y", :initstep => 0.001), - # Dict( - # :F! => (t,y,dy)->dy[1]=y[1], - # :y0 => [1.0], - # :tspan => [1:-0.001:0;], - # :jac => (t,y,dy)->dy[1]=1.0, - # :sol => t->[exp(t-1)], - # :isscalar => true, - # :name => "y'=y backwards", - # :initstep => 0.001), + Dict( + :F! => (t,y,dy)->dy[1]=y[1], + :y0 => [1.0], + :tspan => [1:-0.001:0;], + :jac => (t,y,dy)->dy[1]=1.0, + :sol => t->[exp(t-1)], + :isscalar => true, + :name => "y'=y backwards", + :initstep => 0.001), Dict( :F! => (t,y,dy)->(dy[1]=-y[2];dy[2]=y[1]), :y0 => [1.0,2.0], @@ -48,7 +48,7 @@ testsets = [ # Testing function ode -steppers = [ODE.RKStepperFixed{:feuler}, +const steppers = [ODE.RKStepperFixed{:feuler}, ODE.RKStepperFixed{:midpoint}, ODE.RKStepperFixed{:heun}, ODE.RKStepperFixed{:rk4}, @@ -57,9 +57,10 @@ steppers = [ODE.RKStepperFixed{:feuler}, ODE.RKStepperAdaptive{:rk45}, ODE.RKStepperAdaptive{:dopri5}, ODE.RKStepperAdaptive{:feh78}, - ODE.ModifiedRosenbrockStepper{} + #ODE.ModifiedRosenbrockStepper{} ] +warn("TODO: re-enable some tests") function test_ode() tol = 0.002 @@ -75,63 +76,62 @@ function test_ode() F(t,y) = (dy = similar(y); F!(t,y,dy); return dy) for points = [:specified, :all] - if ts[:isscalar] - # test the ODE.odeXX scalar interface (if the equation is scalar) - Fscal = (t,y)->F(t,[y])[1] - y0scal = y0[1] - # with jacobian - tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,J! = jac!) - @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol - # without jacobian - t,y = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0) - @test_approx_eq_eps y map(x->sol(x)[1],tj) tol - - # results with and without jacobian should be exactly the same - @test_approx_eq yj y - - if points == :specified - # test if we covered the whole timespan - @test length(tspan) == length(t) == length(tj) - @test_approx_eq tspan t - @test_approx_eq tspan tj - end + # if ts[:isscalar] + # # test the ODE.odeXX scalar interface (if the equation is scalar) + # Fscal = (t,y)->F(t,[y])[1] + # y0scal = y0[1] + # # with jacobian + # tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,J! = jac!) + # @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol + # # without jacobian + # t,y = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0) + # @test_approx_eq_eps y map(x->sol(x)[1],tj) tol + + # # results with and without jacobian should be exactly the same + # @test_approx_eq yj y + + # if points == :specified + # # test if we covered the whole timespan + # @test length(tspan) == length(t) == length(tj) + # @test_approx_eq tspan t + # @test_approx_eq tspan tj + # end + # end + + # # ODE.odeXX vector interface + # # with jacobian + # tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,J! = jac!) + # @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol + # # without jacobian + # t,y = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0) + # @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol + + # @test_approx_eq hcat(yj...) hcat(y...) + + # if points == :specified + # # test if we covered the whole timespan + # @test length(tspan) == length(t) == length(tj) + # @test_approx_eq tspan t + # @test_approx_eq tspan tj + # end + + # test the iterator interface + equation = ODE.ExplicitODE(tspan[1],y0,F!) + opts = Dict(:tspan => tspan, + :initstep => h0, + :points => points) + + solver = ODE.solve(equation,stepper;opts...) + + for (t,y) in solver + @test_approx_eq_eps y sol(t) tol end - # ODE.odeXX vector interface - # with jacobian - tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,J! = jac!) - @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol - # without jacobian - t,y = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0) - @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol - - @test_approx_eq hcat(yj...) hcat(y...) - - if points == :specified - # test if we covered the whole timespan - @test length(tspan) == length(t) == length(tj) - @test_approx_eq tspan t - @test_approx_eq tspan tj + for (t,y) in ODE.solve(equation,ODE.DenseStepper; + method = stepper, opts...) + @test_approx_eq_eps y sol(t) tol end - # test the iterator interface (they only support forward time integration) - if issorted(tspan) - equation = ODE.ExplicitODE(tspan[1],y0,F!) - opts = Dict(:tspan => tspan, - :initstep => h0, - :points => points) - - solver = ODE.solve(equation,stepper;opts...) - - for (t,y) in solver - @test_approx_eq_eps y sol(t) tol - end - - for (t,y) in ODE.solve(equation,ODE.DenseStepper; - method = stepper, opts...) - @test_approx_eq_eps y sol(t) tol - end - end end end end diff --git a/test/runtests.jl b/test/runtests.jl index dee887f8e..e608988ee 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -57,7 +57,7 @@ for solver in solvers @test maximum(abs(yj-e.^tj)) < tol @test norm(yj-y,Inf)y, 1., [1:-.001:0;], initstep=0.001) # @test maximum(abs(y-e.^(t-1))) < tol # tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) @@ -88,28 +88,30 @@ for solver in solvers end # Test negative starting times ODE.ode23s -@assert length(ODE.ode23s((t,y)->[-y[2]; y[1]], [1., 2.], [-5., 0])[1]) > 1 +@test length(ODE.ode23s((t,y)->[-y[2]; y[1]], [1., 2.], [-5., 0])[1]) > 1 +warn("TODO: Re-enable ROBER") +# error("stop here for now") +# # rober testcase from http://www.unige.ch/~hairer/testset/testset.html +# let +# println("ROBER test case") +# function f(t, y) +# ydot = similar(y) +# ydot[1] = -0.04*y[1] + 1.0e4*y[2]*y[3] +# ydot[3] = 3.0e7*y[2]*y[2] +# ydot[2] = -ydot[1] - ydot[3] +# ydot +# end +# t = [0., 1e11] +# t,y = ODE.ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, +# maxstep=1e11/10, minstep=1e11/1e18) -# rober testcase from http://www.unige.ch/~hairer/testset/testset.html -let - println("ROBER test case") - function f(t, y) - ydot = similar(y) - ydot[1] = -0.04*y[1] + 1.0e4*y[2]*y[3] - ydot[3] = 3.0e7*y[2]*y[2] - ydot[2] = -ydot[1] - ydot[3] - ydot - end - t = [0., 1e11] - t,y = ODE.ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, - maxstep=1e11/10, minstep=1e11/1e18) +# refsol = [0.2083340149701255e-07, +# 0.8333360770334713e-13, +# 0.9999999791665050] # reference solution at tspan[2] +# @test norm(refsol-y[end], Inf) < 2e-10 +# end - refsol = [0.2083340149701255e-07, - 0.8333360770334713e-13, - 0.9999999791665050] # reference solution at tspan[2] - @test norm(refsol-y[end], Inf) < 2e-10 -end include("interface-tests.jl") include("iterators.jl") From 54fa6e0209c2007e8426a9ad6004e865b2c4f030 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 14:22:57 +0200 Subject: [PATCH 052/113] import Compat.String --- src/ODE.jl | 1 + src/tableaus.jl | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ODE.jl b/src/ODE.jl index a79023b4d..a78cfedf4 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -4,6 +4,7 @@ module ODE using Polynomials using Compat +import Compat.String using ForwardDiff import Base.convert, Base.show diff --git a/src/tableaus.jl b/src/tableaus.jl index a8045f4b1..12979b0e0 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -50,7 +50,7 @@ immutable TableauRKExplicit{T} <: Tableau{T} c::Vector{T} isFSAL::Bool s::Int - name :: AbstractString + name::String function TableauRKExplicit(name,order,a,b,c) s = length(c) @assert c[1]==0 From 23e0d195eb58bfacbc1ee255b52caf4627feca86 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 16:19:17 +0200 Subject: [PATCH 053/113] updated statuses --- src/runge-kutta.jl | 7 +++--- src/types.jl | 55 +++++++++++++++++++++++----------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 7da8be0d5..7e87fa93d 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -128,7 +128,7 @@ function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKSta if tdir(s)*step.t >= tdir(s)*s.stepper.options.tstop # nothing left to integrate - return true + return finish end dof = length(step.y) @@ -145,7 +145,7 @@ function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKSta end step.t += dt copy!(step.y,work.ynew) - return false + return cont end @@ -176,7 +176,8 @@ function trialstep!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state end if abs(dt) < options.minstep - # minimum step size reached + # TODO: use some sort of logging system + println("Minimum step size reached") return abort end diff --git a/src/types.jl b/src/types.jl index 8d1b2381b..7a462749e 100644 --- a/src/types.jl +++ b/src/types.jl @@ -196,8 +196,17 @@ Base.start(sol::Solver) = init(sol) function Base.done(s::Solver, st) # Determine whether the next step can be made by calling the # stepping routine. onestep! will take the step in-place. - finished = onestep!(s, st) - return finished + status = onestep!(s, st) + if status==cont + return false + elseif status==finish + return true + else #if status==abort + warn("aborting") + return true + # else + # error("unsported Status: $status") + end end function Base.next(sol::Solver, st) @@ -223,8 +232,16 @@ Values: - cont -- continue integration - abort -- abort integration - finish -- integration reached the end + +Statuses can be combined with &: +- cont&cont == cont +- finish&cont == finish +- abort&cont == abort +- abort&finish = abort """ -@enum Status cont abort finish # TODO these need better names +@enum Status cont=1 abort=0 finish=-1 +# The values of Statuses are chose to turn & into a *: +@compat Base.:&(s1::Status, s2::Status) = Status(Int(s1)*Int(s2)) ##### # Interface to implement by solvers to hook into iteration @@ -239,10 +256,6 @@ Values: # - onestep! # - trialstep!, errorcontrol! and accept! -# Just to make it more readable below -const _notdone = false -const _done = true - """ Take a step, modifies `state` in-place. This is the core function to @@ -264,31 +277,17 @@ function onestep!(sol::Solver, state::AbstractState) opt = sol.stepper.options while true status = trialstep!(sol, state) - # This could be moved into a @check macro: - if status==abort - warn("Abort in trialstep!") - return _done - elseif status==finish - return _done - end - err, status_err = errorcontrol!(sol, state) - if status_err==abort - warn("Abort in errorcontrol!") - return _done - end - if err<=1 && status==cont + status &= status_err + if err<=1 # a successful step - status_acc = accept!(sol, state) - if status_acc==abort - warn("Abort in accept!") - return _done - else - return _notdone - end + status &= accept!(sol, state) + return status + elseif status==abort || status==finish + return status end # if we get here: try step again with updated state (step - # size, order) as done inside errorcontrol! + # size, order). end end From d2a1d9dc1467dd8b067bbc0b06cdf88b4d4ee31f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 20:41:44 +0200 Subject: [PATCH 054/113] New dense output --- examples/test.jl | 6 +- src/dense.jl | 294 ++++++++++++++++++++------------------------- src/runge-kutta.jl | 2 +- src/types.jl | 13 ++ 4 files changed, 149 insertions(+), 166 deletions(-) diff --git a/examples/test.jl b/examples/test.jl index 3f5067124..ed1aea44c 100644 --- a/examples/test.jl +++ b/examples/test.jl @@ -9,8 +9,8 @@ Y = Vector{T} t0 = zero(T) y0 = T[one(T)] -steppers = [ODE.RKStepperAdaptive{:rk45}, - ODE.RKStepperFixed{:feuler}, +steppers = [# ODE.RKStepperAdaptive{:rk45}, + # ODE.RKStepperFixed{:feuler}, ODE.DenseStepper] for st in steppers @@ -25,7 +25,7 @@ for st in steppers println("Raw iterator") for (t,y) in sol - println((t,y)) + println((t,y,norm(y-[exp(t)]))) end println(collect(sol)) diff --git a/src/dense.jl b/src/dense.jl index 847d99195..40c30a2fb 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -12,32 +12,38 @@ Dense output options: """ -immutable DenseOptions{T<:Number,S<:Function} <: Options{T} +immutable DenseOptions{T<:Number} <: Options{T} tspan ::Vector{T} tstop ::T - points ::Symbol - stopevent::S - roottol ::T + # points ::Symbol + # stopevent::S + # roottol ::T end -@compat function (::Type{DenseOptions{T}}){T,S}(; - tstop = T(Inf), - tspan::Vector = T[tstop], - points::Symbol= :all, - stopevent::S = (t,y)->false, - roottol = eps(T)^T(1//3), - kargs...) - DenseOptions{T,S}(tspan,tstop,points,stopevent,roottol) +@compat function (::Type{DenseOptions{T}}){T}(; + tstop = T(Inf), + tspan::Vector = T[tstop], + # points::Symbol= :all, + # stopevent::S = (t,y)->false, + # roottol = eps(T)^T(1//3), + kargs...) + DenseOptions{T}(tspan,tstop) end -#TODO: how about having an DenseStepper <: AbstractWrapper <: AbstractStepper? +""" + +A stepper specialized in dense output. It wraps around another +`Solver` and stores the subsequent steps generated by `Solver` and +interpolates the results on request (currently this means at the +output times stored in `options.tspan`). + +""" immutable DenseStepper{S<:Solver,O<:DenseOptions} <: AbstractStepper solver::S options::O end - function solve{T,S<:DenseStepper}(ode::ExplicitODE{T}, ::Type{S}; method = RKStepperAdaptive{:rk45}, @@ -52,182 +58,146 @@ end The state of the dense stepper -- s0, s1: Previous steps, used to produce interpolated output -- solver_state: The state of the associated solver -- ytmp: work array - """ type DenseState{St<:AbstractState,T,Y} <: AbstractState{T,Y} - s0::Step{T,Y} - s1::Step{T,Y} - last_tout::T - first_step + tout_i::Int + step_prev::Step{T,Y} + step_out::Step{T,Y} solver_state::St - # used for storing the interpolation result - ytmp::Y - solver_done end +output(ds::DenseState) = output(ds.step_out) -function start{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}) - # extract the real solver - solver = s.stepper.solver - t0 = solver.ode.t0 - y0 = solver.ode.y0 - dy0 = copy(y0) - solver.ode.F!(t0,y0,dy0) - step0 = Step(t0,copy(y0),copy(dy0)) - step1 = Step(t0,copy(y0),copy(dy0)) - solver_state = start(solver) - ytmp = copy(y0) - return DenseState(step0, step1, t0-1, true, solver_state, ytmp, false) +function init{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}) + ode = s.stepper.solver.ode + solver_state = init(s.stepper.solver) + step_prev = Step(ode.t0,similar(ode.y0),similar(ode.y0)) + step_out = Step(ode.t0,similar(ode.y0),similar(ode.y0)) + return DenseState(1,step_prev,step_out,solver_state) end -# m3: I think it would be nice to factor out the dense-output and -# root-finding into its own function. That way it could be used also -# independently of the dense-output iterator. Also, it would make -# this next function more compact. - -# pwl: I agree, but then the problem is that once you decouple them -# you would lose the opprotunity to detect the roots with each step. -function next{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) - - - # m3: I'm not 100% sure what happens here. I would implement it like so: - # Initialize in `start`: calculate next t1, y1 and also hold onto IC in t0,y0, - # set state.last_t=1 - # - # in next have this loop - # for t in tspan[state.last_t:end] - # if t>t1 - # make new t1, y1, move old t1, y1 into t0, y0 - # end - # make dense output at t - # find events - # state.last_t += 1 - # return ((t, y), state) - # end - - # pwl: @m3 this is basically what happens here:-), although I'm - # not using the index of tspan anywhere explicitly. - - solver = s.stepper.solver - options = s.stepper.options - - # these guys store the intermediate steps we make - s0, s1 = state.s0, state.s1 - t0, t1 = s0.t, s1.t - - # assuming the last output was done at state.last_tout set the - # t_goal to the next larger time from tspan. Strong inequality - # below is crucial, otherwise we would be selecting the same step - # every time. - tspan = options.tspan - t_goal = tspan[findfirst(t->(t>state.last_tout), tspan)] - - # Keep computing new steps (i.e. new pairs (t0,t1)) until we reach - # t0 < t_goal <= t1, then we use interpolation to get the value at - # t_goal. Unless points==:all, then we break the while loop after - # making the first step. - while t_goal > t1 - - # s1 stores the last succesfull step, the new step is stored - # in s0 - - if done(solver, state.solver_state) - warn("The iterator was exhausted before the dense output completed.") - # prevents calling done(..) twice - state.solver_done = true - # TODO: deepcopy? - # Return whatever we got as the last step - return ((s0.t,s0.y[:]),state) - else - # at this point s0 is updated with the new step, "s2" if you will - ((s0.t,s0.y[:]), state.solver_state) = next(solver, state.solver_state) - end - - # swap s0 and s1 - s0, s1 = s1, s0 - # and times - t0, t1 = s0.t, s1.t - # update the state accordingly - state.s0, state.s1 = s0, s1 +""" - # we haven't reached t_goal yet (t1 length(s.stepper.options.tspan) + return finish end - # at this point we have t0 < t_goal < t1 so we can apply the - # interpolation to get a value of the solution at t_goal - - # TODO: is this necessary? The solver should store the value of dy. - solver.ode.F!(t0,s0.y,s0.dy) - solver.ode.F!(t1,s1.y,s1.dy) - - if options.stopevent(t1,s1.y) - function stopfun(t) - hermite_interp!(state.ytmp,t,s0,s1) - res = Int(options.stopevent(t,state.ytmp)) - return 2*res-1 # -1 if false, +1 if true - end - t_goal = findroot(stopfun, [s0.t,s1.t], options.roottol) - # state.ytmp is already overwriten to the correct result as a - # side-effect of calling stopfun + # our next output time + tout = s.stepper.options.tspan[i] + + sol = s.stepper.solver # this looks weird + sol_state = state.solver_state + + # try to get a new set of steps enclosing `tout`, if all goes + # right we end up with t∈[t1,t2] with + # t1,_=output(state.step_prev) + # t2,_=output(state.solver_state) + status = next_interval!(sol,sol_state,state.step_prev,tout) + if status != cont + # we failed to get enough steps + warn("Iterator was exhausted before the dense output could produce the output.") + return abort else - hermite_interp!(state.ytmp,t_goal,s0,s1) + # we got the steps, proceed with the interpolation, this fills + # the state.step_out with y(tout) and y'(tout) according to an + # interpolation algorithm specific for a method (defaults to + # hermite O(3)). + interpolate!(state.solver_state,state.step_prev,tout,state.step_out) + + # increase the counter + state.tout_i += 1 + return cont end +end - # update the last output time - state.last_tout = t_goal +""" - return ((t_goal,state.ytmp),state) +Pulls the results from the (solver,state) pair using `onestep!` until +we reach a first step such that `t>=tout`. It fills the `steps` +variable with (Step(t1,y(t1),dy(t1)),Step(t2,y(t2),dy(t2))), where +`t1` is is the step before `tout` and `t2` is `>=tout`. In +other words `tout∈[t1,t2]`. -end +TODO: tdir +""" +function next_interval!(solver,state,step_prev,tout) + # get the current time + while true + # save the current state of solution + t, y, dy = output(state) + step_prev.t = t + copy!(step_prev.y,y) + copy!(step_prev.dy,dy) + + # try to perform a single step with the solver + status = onestep!(solver, state) + + if status != cont + return status + else + t1 = step_prev.t + t2,_ = output(state) + if t1 <= tout <= t2 + # we found the enclosing times + return cont + end + end + end -function done{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) + # this will never happen + return abort +end - options = s.stepper.options +""" - return ( - state.solver_done || - state.last_tout >= options.tspan[end] || - options.stopevent(state.s1.t,state.s1.y) - ) -end +Make dense output using Hermite interpolation of order O(3). Updates +yout in-place. Only needs y and dy at t1 and t2. +Input +- state::AbstractState -- state of a stepper at time t2 +- step_prev::Step -- solution at time t1 respectively +- tout -- time of requested output +- yout -- inplace y output +Ref: Hairer & Wanner p.190 +TODO: tdir (I think this works for any order of t1 and t2 but needs +verifying. -function hermite_interp!(y,t,step0::Step,step1::Step) - # For dense output see Hairer & Wanner p.190 using Hermite - # interpolation. Updates y in-place. - # - # f_0 = f(x_0 , y_0) , f_1 = f(x_0 + h, y_1 ) - # this is O(3). TODO for higher order. +TODO: fill dy - y0, y1 = step0.y, step1.y - dy0, dy1 = step0.dy, step1.dy +TODO: arbitrary order method (change step_prev::Step to step_prevs::Tuple{Step,N}) - if t == step0.t - copy!(y,y0) - elseif t == step1.t - copy!(y,y1) +""" +function interpolate!{T,Y}(state::AbstractState,step_prev::Step{T,Y},tout::T,step_out::Step{T,Y}) + t1,y1,dy1 = output(step_prev) + t2,y2,dy2 = output(state) + if tout==t1 + copy!(step_out.y,y1) + elseif tout==t2 + copy!(step_out.y,y2) else - dt = step1.t-step0.t - theta = (t-step0.t)/dt - for i=1:length(y0) - y[i] = ((1-theta)*y0[i] + theta*y1[i] + theta*(theta-1) * - ((1-2*theta)*(y1[i]-y0[i]) + (theta-1)*dt*dy0[i] + theta*dt*dy1[i]) ) + dt = t2-t1 + theta = (tout-t1)/dt + for i=1:length(y1) + step_out.y[i] = + (1-theta)*y1[i] + + theta*y2[i] + + theta*(theta-1) * + ( (1-2*theta)*(y2[i]-y1[i]) + + (theta-1)*dt*dy1[i] + + theta*dt*dy2[i]) end end + step_out.t = tout + return nothing end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 7e87fa93d..8e789f58c 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -78,7 +78,7 @@ type RKState{T,Y} <: AbstractState{T,Y} end -output(st::RKState) = st.step.t, st.step.y +output(st::RKState) = st.step.t, st.step.y, st.work.ks[1] function show(io::IO, state::RKState) diff --git a/src/types.jl b/src/types.jl index 7a462749e..0984faf8d 100644 --- a/src/types.jl +++ b/src/types.jl @@ -126,6 +126,7 @@ type Step{T,Y} dy::Y end +output(s::Step) = s.t, s.y, s.dy function show(io::IO, state::Step) println("t =$(state.t)") @@ -197,6 +198,7 @@ function Base.done(s::Solver, st) # Determine whether the next step can be made by calling the # stepping routine. onestep! will take the step in-place. status = onestep!(s, st) + # can't this be a function on a status? if status==cont return false elseif status==finish @@ -272,6 +274,17 @@ Output: - Bool: `false`: continue iteration, `true`: terminate iteration. substeps. + +TODO: this effectively dispatches on the type of state, we should +splice the Solver as IVP and Stepper and make calls as follows + +``` +function onestep!(ode::ExplicitODE, stepper::DenseStepper, state) +``` + +We always access s.stepper and s.ode anyway and the definitions would +look more readable. + """ function onestep!(sol::Solver, state::AbstractState) opt = sol.stepper.options From 69a276c91f170586aa44deb3704c708a85fc0f91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 20:49:05 +0200 Subject: [PATCH 055/113] Brought interfaces.jl up do date with the new dense --- src/interfaces.jl | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/interfaces.jl b/src/interfaces.jl index f567a5b70..08de1871b 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -8,23 +8,32 @@ tspan[end] is the last integration time. function ode{T,Y,S<:AbstractStepper}(F, y0::Y, tspan::AbstractVector{T}, stepper::Type{S}; + points = :all, kargs...) t0 = tspan[1] # construct a solver equation = explicit_ineff(t0,y0,F;kargs...) - dsolver = solve(equation, DenseStepper; - mehtod = stepper, - tspan = tspan, - kargs...) + if points == :all + solver = solve(equation, stepper; + tspan = tspan, + kargs...) + elseif points == :specified + solver = solve(equation, DenseStepper; + mehtod = stepper, + tspan = tspan, + kargs...) + else + error("Unsupported points value (should be :all or :specified)") + end # determine if we have to unpack y extract = Y <: Number tout = Array(T,0) yout = Array(Y,0) - for (t,y) in dsolver + for (t,y) in solver push!(tout,t) push!(yout, extract ? y[1] : copy(y)) end From 27d370c2a0c60ec70ade46f97129618be5ab1eb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 20:54:23 +0200 Subject: [PATCH 056/113] Reverse time integration for dense output --- src/dense.jl | 1 + test/runtests.jl | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 40c30a2fb..50303f690 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -148,6 +148,7 @@ function next_interval!(solver,state,step_prev,tout) else t1 = step_prev.t t2,_ = output(state) + t1, t2 = sort([t1,t2]) if t1 <= tout <= t2 # we found the enclosing times return cont diff --git a/test/runtests.jl b/test/runtests.jl index e608988ee..2db33b649 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -57,12 +57,12 @@ for solver in solvers @test maximum(abs(yj-e.^tj)) < tol @test norm(yj-y,Inf)y, 1., [1:-.001:0;], initstep=0.001) - # @test maximum(abs(y-e.^(t-1))) < tol - # tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) - # @test maximum(abs(yj-e.^(tj-1))) < tol - # @test norm(yj-y,Inf)y, 1., [1:-.001:0;], initstep=0.001) + @test maximum(abs(y-e.^(t-1))) < tol + tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) + @test maximum(abs(yj-e.^(tj-1))) < tol + @test norm(yj-y,Inf) v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) From 54cb490a6d788a9c6f5c9520f61c91394c0310ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 21:18:24 +0200 Subject: [PATCH 057/113] Fixed some problems with dense output --- src/dense.jl | 25 ++++++------ test/iterators.jl | 98 +++++++++++++++++++++++------------------------ 2 files changed, 63 insertions(+), 60 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 50303f690..cf8c56594 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -71,7 +71,9 @@ output(ds::DenseState) = output(ds.step_out) function init{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}) ode = s.stepper.solver.ode solver_state = init(s.stepper.solver) - step_prev = Step(ode.t0,similar(ode.y0),similar(ode.y0)) + dy0 = similar(ode.y0) + ode.F!(ode.t0,ode.y0,dy0) + step_prev = Step(ode.t0,copy(ode.y0),dy0) step_out = Step(ode.t0,similar(ode.y0),similar(ode.y0)) return DenseState(1,step_prev,step_out,solver_state) end @@ -103,7 +105,7 @@ function onestep!{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseSt # t1,_=output(state.step_prev) # t2,_=output(state.solver_state) status = next_interval!(sol,sol_state,state.step_prev,tout) - if status != cont + if status == abort # we failed to get enough steps warn("Iterator was exhausted before the dense output could produce the output.") return abort @@ -132,8 +134,17 @@ TODO: tdir """ function next_interval!(solver,state,step_prev,tout) - # get the current time + while true + # get the current time + t1 = step_prev.t + t2,_ = output(state) + t1, t2 = sort([t1,t2]) + if t1 <= tout <= t2 + # we found the enclosing times + return cont + end + # save the current state of solution t, y, dy = output(state) step_prev.t = t @@ -145,14 +156,6 @@ function next_interval!(solver,state,step_prev,tout) if status != cont return status - else - t1 = step_prev.t - t2,_ = output(state) - t1, t2 = sort([t1,t2]) - if t1 <= tout <= t2 - # we found the enclosing times - return cont - end end end diff --git a/test/iterators.jl b/test/iterators.jl index a77a059c5..471370bf8 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -6,7 +6,7 @@ const testsets = [ :jac => (t,y,dy)->dy[1]=0.0, :sol => t->[6t], :isscalar => true, - :name => "y'=6t", + :name => "y'=6", :initstep => 0.1), Dict( :F! => (t,y,dy)->dy[1]=2t, @@ -49,16 +49,16 @@ const testsets = [ # Testing function ode const steppers = [ODE.RKStepperFixed{:feuler}, - ODE.RKStepperFixed{:midpoint}, - ODE.RKStepperFixed{:heun}, - ODE.RKStepperFixed{:rk4}, - ODE.RKStepperAdaptive{:rk21}, - ODE.RKStepperAdaptive{:rk23}, - ODE.RKStepperAdaptive{:rk45}, - ODE.RKStepperAdaptive{:dopri5}, - ODE.RKStepperAdaptive{:feh78}, - #ODE.ModifiedRosenbrockStepper{} -] + ODE.RKStepperFixed{:midpoint}, + ODE.RKStepperFixed{:heun}, + ODE.RKStepperFixed{:rk4}, + ODE.RKStepperAdaptive{:rk21}, + ODE.RKStepperAdaptive{:rk23}, + ODE.RKStepperAdaptive{:rk45}, + ODE.RKStepperAdaptive{:dopri5}, + ODE.RKStepperAdaptive{:feh78}, + #ODE.ModifiedRosenbrockStepper{} + ] warn("TODO: re-enable some tests") function test_ode() @@ -76,44 +76,44 @@ function test_ode() F(t,y) = (dy = similar(y); F!(t,y,dy); return dy) for points = [:specified, :all] - # if ts[:isscalar] - # # test the ODE.odeXX scalar interface (if the equation is scalar) - # Fscal = (t,y)->F(t,[y])[1] - # y0scal = y0[1] - # # with jacobian - # tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,J! = jac!) - # @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol - # # without jacobian - # t,y = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0) - # @test_approx_eq_eps y map(x->sol(x)[1],tj) tol - - # # results with and without jacobian should be exactly the same - # @test_approx_eq yj y - - # if points == :specified - # # test if we covered the whole timespan - # @test length(tspan) == length(t) == length(tj) - # @test_approx_eq tspan t - # @test_approx_eq tspan tj - # end - # end - - # # ODE.odeXX vector interface - # # with jacobian - # tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,J! = jac!) - # @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol - # # without jacobian - # t,y = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0) - # @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol - - # @test_approx_eq hcat(yj...) hcat(y...) - - # if points == :specified - # # test if we covered the whole timespan - # @test length(tspan) == length(t) == length(tj) - # @test_approx_eq tspan t - # @test_approx_eq tspan tj - # end + if ts[:isscalar] + # test the ODE.odeXX scalar interface (if the equation is scalar) + Fscal = (t,y)->F(t,[y])[1] + y0scal = y0[1] + # with jacobian + tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,J! = jac!) + @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol + # without jacobian + t,y = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0) + @test_approx_eq_eps y map(x->sol(x)[1],tj) tol + + # results with and without jacobian should be exactly the same + @test_approx_eq yj y + + if points == :specified + # test if we covered the whole timespan + @test length(tspan) == length(t) == length(tj) + @test_approx_eq tspan t + @test_approx_eq tspan tj + end + end + + # ODE.odeXX vector interface + # with jacobian + tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,J! = jac!) + @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol + # without jacobian + t,y = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0) + @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol + + @test_approx_eq hcat(yj...) hcat(y...) + + if points == :specified + # test if we covered the whole timespan + @test length(tspan) == length(t) == length(tj) + @test_approx_eq tspan t + @test_approx_eq tspan tj + end # test the iterator interface equation = ODE.ExplicitODE(tspan[1],y0,F!) From 0db44f116c628f7e037247345ff265e411b9d376 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 19:27:45 +0200 Subject: [PATCH 058/113] ode23s mostly working --- src/interfaces.jl | 21 ++--- src/ode23s.jl | 215 +++++++++++++++++++++++++++++++-------------- src/runge-kutta.jl | 5 +- src/types.jl | 7 ++ test/runtests.jl | 70 +++++++-------- 5 files changed, 204 insertions(+), 114 deletions(-) diff --git a/src/interfaces.jl b/src/interfaces.jl index 08de1871b..24c7c39e8 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -48,15 +48,16 @@ Solves an ODE `y'=F(t,y)` with initial conditions `y0` and `t0`. """ ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockStepper; kargs...) -ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:feuler}; kargs...) -ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:midpoint}; kargs...) -ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:heun}; kargs...) -ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:rk4}; kargs...) -ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk21}; kargs...) -ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk23}; kargs...) -ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk45}; kargs...) -ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:dopri5}; kargs...) -ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:feh78}; kargs...) +# ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:feuler}; kargs...) +# ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:midpoint}; kargs...) +# ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:heun}; kargs...) +# ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:rk4}; kargs...) +# ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk21}; kargs...) +# ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk23}; kargs...) +# ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk45}; kargs...) +# ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:dopri5}; kargs...) +# const ode45 = ode45_dp +# ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:feh78}; kargs...) function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) @@ -75,7 +76,7 @@ function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) end -const ode45 = ode45_dp + """ diff --git a/src/ode23s.jl b/src/ode23s.jl index eba85e059..56c1b86c4 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -3,34 +3,22 @@ # # [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 -immutable ModifiedRosenbrockStepper{T<:Number,O<:Options} <: AbstractStepper - d ::T - e32::T - options::O +immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper + options::AdaptiveOptions{T} end @compat function (::Type{ModifiedRosenbrockStepper{T}}){T}(;options...) - d = T(1/(2 + sqrt(2))) - e32 = T(6 + sqrt(2)) - opt = AdaptiveOptions{T}(;options...) - ModifiedRosenbrockStepper(d,e32,opt) + ModifiedRosenbrockStepper( AdaptiveOptions{T}(;options...) ) end - -# TODO: is this correct? order(::ModifiedRosenbrockStepper) = 2 - name(::ModifiedRosenbrockStepper) = "Modified Rosenbrock Stepper" - isadaptive(::ModifiedRosenbrockStepper) = true # define the set of ODE problems with which this stepper can work solve{T,S<:ModifiedRosenbrockStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = Solver(ode,stepper{T}(;options...)) - -# lower level interface (iterator) - """ The state for the Rosenbrock stepper @@ -45,6 +33,11 @@ type RosenbrockState{T,Y} <: AbstractState dt ::T F1 ::Vector{Y} F2 ::Vector{Y} + k1 ::Vector{Y} + k2 ::Vector{Y} + k3 ::Vector{Y} + ynew ::Vector{Y} + dtold::T J ::Matrix{Y} iters::Int end @@ -60,7 +53,7 @@ function show(io::IO, state::RosenbrockState) end -function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) +function init{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) t = s.ode.t0 dt = s.stepper.options.initstep y = s.ode.y0 @@ -73,6 +66,11 @@ function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) dt, zero(y), # F1 zero(y), # F2 + zero(y), # k1 + zero(y), # k2 + zero(y), # k3 + zero(y), # ynew + dt*0, # dtnew J, # J 0) # iters # initialize the derivative and the Jacobian @@ -82,76 +80,165 @@ function start{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) return state end +# two irrational constants +Base.@irrational const_d 0.2928932188134525 (1/(2 + sqrt(BigFloat(2)))) +Base.@irrational const_e 7.414213562373095 (6 + sqrt(BigFloat(2))) -function next{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state) +function trialstep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state::RosenbrockState) + # unpack stepper = s.stepper ode = s.ode step = state.step opts = s.stepper.options - F1, F2, J = state.F1, state.F2, state.J - + k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy - # F!, J! = ode.F!, ode.J! - d, e32 = stepper.d, stepper.e32 - + F! = ode.F! F0 = dy - while true + # see whether we're done + if tdir(s)*t >= tdir(s)*opts.tstop + # nothing left to integrate + return finish + end - state.iters += 1 - if state.iters > opts.maxiters - return ((step.t,step.y), state) - end + # increment iteration counter + state.iters += 1 + if state.iters > opts.maxiters + println("Reached maximum number of iterations $(opts.maxiters)") + return abort + end - # trim the step size to match the bounds of integration - dt = min(opts.tstop-t,dt) + W = lufact!( eye(J) - dt*const_d*J ) - W = lufact!( eye(J) - dt*d*J ) + # Approximate time-derivative of F, we are using F1 as a + # temporary array + F!(t+dt/100,y,F1) + tder = 100*const_d*(F1-F0) - # Approximate time-derivative of F, we are using F1 as a - # temporary array - ode.F!(t+dt/100,y,F1) - tder = 100*d*(F1-F0) + # modified Rosenbrock formula + # TODO: update k1,k2,k3 in-place + k1[:] = W \ (F0 + tder) + F!(t+dt/2, y+dt*k1/2, F1) + k2[:] = W \ (F1 - k1) + k1 + for i=1:length(y) + ynew[i] = y[i] + dt*k2[i] + end + F!(t+dt, ynew, F2) + k3[:] = W \ (F2 - const_e*(k2 - F1) - 2*(k1 - F0) + tder ) - # modified Rosenbrock formula - # TODO: allocate some temporary space for these variables - k1 = W \ (F0 + tder) - ode.F!(t+dt/2, y+dt*k1/2, F1) - k2 = W \ (F1 - k1) + k1 - ynew = y + dt*k2 - ode.F!(t+dt, ynew, F2) - k3 = W \ (F2 - e32*(k2 - F1) - 2*(k1 - F0) + tder ) + return cont +end - delta = max(opts.reltol*max(opts.norm(y)::eltype(y), - opts.norm(ynew)::eltype(y)), - opts.abstol) # allowable error +function errorcontrol!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state::RosenbrockState) - err = (dt/6)*(opts.norm(k1 - 2*k2 + k3)::eltype(y))/delta # error estimate + stepper = s.stepper + ode = s.ode + step = state.step + opts = s.stepper.options + k1,k2,k3 = state.k1, state.k2, state.k3 + k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew + t, dt, y, dy = step.t, state.dt, step.y, step.dy - # upon a failed step decrease the step size - dtnew = min(opts.maxstep, - dt*0.8*err^(-1/3) ) + # allowable error + delta = max(opts.reltol*max(opts.norm(y), opts.norm(ynew),opts.abstol)) - # check if the new solution is acceptable - if err <= 1 + # error estimate + err = (abs(dt)/6)*(opts.norm(k1 - 2*k2 + k3))/delta - # update the state and return - step.t = t+dt - state.dt = dtnew - step.y[:] = ynew - step.dy[:] = F2 - ode.J!(step.t,step.y,J) + # new step-size + dtnew = tdir(s)*min(opts.maxstep, abs(dt)*0.8*err^(-1/3) ) - return ((step.t,step.y), state) - else - # continue with the decreased time step - dt = dtnew - end + # trim in case newdt > dt + dtnew = tdir(s)*min(abs(dtnew), abs(opts.tstop-(t+dt))) - end + state.dtold = dt + state.dt = dtnew + return err, cont +end - return tout, yout +function accept!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state::RosenbrockState) + step = state.step + # update the state + step.t = step.t+state.dtold + step.y[:] = state.ynew + step.dy[:] = state.F2 + s.ode.J!(step.t,step.y,state.J) + return cont end + + + +# function onestep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state) + +# stepper = s.stepper +# ode = s.ode +# step = state.step +# opts = s.stepper.options + +# F1, F2, J = state.F1, state.F2, state.J + +# t, dt, y, dy = step.t, state.dt, step.y, step.dy +# # F!, J! = ode.F!, ode.J! + +# F0 = dy + +# while true + +# state.iters += 1 +# if state.iters > opts.maxiters +# return ((step.t,step.y), state) +# end + +# # trim the step size to match the bounds of integration +# dt = min(opts.tstop-t,dt) + +# W = lufact!( eye(J) - dt*d*J ) + +# # Approximate time-derivative of F, we are using F1 as a +# # temporary array +# ode.F!(t+dt/100,y,F1) +# tder = 100*const_d*(F1-F0) + +# # modified Rosenbrock formula +# # TODO: allocate some temporary space for these variables +# k1 = W \ (F0 + tder) +# ode.F!(t+dt/2, y+dt*k1/2, F1) +# k2 = W \ (F1 - k1) + k1 +# ynew = y + dt*k2 +# ode.F!(t+dt, ynew, F2) +# k3 = W \ (F2 - const_e*(k2 - F1) - 2*(k1 - F0) + tder ) + +# delta = max(opts.reltol*max(opts.norm(y)::eltype(y), +# opts.norm(ynew)::eltype(y)), +# opts.abstol) # allowable error + +# err = (dt/6)*(opts.norm(k1 - 2*k2 + k3)::eltype(y))/delta # error estimate + +# # upon a failed step decrease the step size +# dtnew = min(opts.maxstep, +# dt*0.8*err^(-1/3) ) + +# # check if the new solution is acceptable +# if err <= 1 + +# # update the state and return +# step.t = t+dt +# state.dt = dtnew +# step.y[:] = ynew +# step.dy[:] = F2 +# ode.J!(step.t,step.y,J) + +# return ((step.t,step.y), state) +# else +# # continue with the decreased time step +# dt = dtnew +# end + +# end + +# return tout, yout + +# end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 8e789f58c..ff534364d 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -73,14 +73,11 @@ type RKState{T,Y} <: AbstractState{T,Y} newdt ::T work ::RKWorkArrays{Y} timeout ::Int - # This is not currently incremented with each step - iters ::Int + iters ::Int # iters[-y[2]; y[1]], [1., 2.], [-5., 0])[1]) > 1 -warn("TODO: Re-enable ROBER") -# error("stop here for now") -# # rober testcase from http://www.unige.ch/~hairer/testset/testset.html -# let -# println("ROBER test case") -# function f(t, y) -# ydot = similar(y) -# ydot[1] = -0.04*y[1] + 1.0e4*y[2]*y[3] -# ydot[3] = 3.0e7*y[2]*y[2] -# ydot[2] = -ydot[1] - ydot[3] -# ydot -# end -# t = [0., 1e11] -# t,y = ODE.ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, -# maxstep=1e11/10, minstep=1e11/1e18) +# rober testcase from http://www.unige.ch/~hairer/testset/testset.html +let + println("ROBER test case") + function f(t, y) + ydot = similar(y) + ydot[1] = -0.04*y[1] + 1.0e4*y[2]*y[3] + ydot[3] = 3.0e7*y[2]*y[2] + ydot[2] = -ydot[1] - ydot[3] + ydot + end + t = [0., 1e11] + t,y = ODE.ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, + maxstep=1e11/10, minstep=1e11/1e18) -# refsol = [0.2083340149701255e-07, -# 0.8333360770334713e-13, -# 0.9999999791665050] # reference solution at tspan[2] -# @test norm(refsol-y[end], Inf) < 2e-10 -# end + refsol = [0.2083340149701255e-07, + 0.8333360770334713e-13, + 0.9999999791665050] # reference solution at tspan[2] + @test norm(refsol-y[end], Inf) < 2e-10 +end include("interface-tests.jl") include("iterators.jl") From 1089bdd2d1eeae922fe892722e1ccd4450a054dc Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 19:54:08 +0200 Subject: [PATCH 059/113] Working ode23s --- src/interfaces.jl | 20 ++++++++++---------- src/ode23s.jl | 2 +- src/types.jl | 4 +++- test/iterators.jl | 3 +-- test/runtests.jl | 32 ++++++++++++++++---------------- 5 files changed, 31 insertions(+), 30 deletions(-) diff --git a/src/interfaces.jl b/src/interfaces.jl index 24c7c39e8..0857cfb32 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -48,16 +48,16 @@ Solves an ODE `y'=F(t,y)` with initial conditions `y0` and `t0`. """ ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockStepper; kargs...) -# ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:feuler}; kargs...) -# ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:midpoint}; kargs...) -# ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:heun}; kargs...) -# ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:rk4}; kargs...) -# ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk21}; kargs...) -# ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk23}; kargs...) -# ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk45}; kargs...) -# ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:dopri5}; kargs...) -# const ode45 = ode45_dp -# ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:feh78}; kargs...) +ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:feuler}; kargs...) +ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:midpoint}; kargs...) +ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:heun}; kargs...) +ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:rk4}; kargs...) +ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk21}; kargs...) +ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk23}; kargs...) +ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk45}; kargs...) +ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:dopri5}; kargs...) +const ode45 = ode45_dp +ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:feh78}; kargs...) function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) diff --git a/src/ode23s.jl b/src/ode23s.jl index 56c1b86c4..2ee5378d6 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -142,7 +142,7 @@ function errorcontrol!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O, t, dt, y, dy = step.t, state.dt, step.y, step.dy # allowable error - delta = max(opts.reltol*max(opts.norm(y), opts.norm(ynew),opts.abstol)) + delta = max(opts.reltol*max(opts.norm(y), opts.norm(ynew)),opts.abstol) # error estimate err = (abs(dt)/6)*(opts.norm(k1 - 2*k2 + k3))/delta diff --git a/src/types.jl b/src/types.jl index 596fafd51..cb955608a 100644 --- a/src/types.jl +++ b/src/types.jl @@ -107,8 +107,10 @@ abstract AbstractState{T,Y} """ Returns variables returned during iterations. + +output(st::AbstractState) = t,y,dy """ -output(st::AbstractState) = st.step.t, st.step.y +output(st::AbstractState) = st.step.t, st.step.y, st.step.dy diff --git a/test/iterators.jl b/test/iterators.jl index 471370bf8..338f0ca2b 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -57,10 +57,9 @@ const steppers = [ODE.RKStepperFixed{:feuler}, ODE.RKStepperAdaptive{:rk45}, ODE.RKStepperAdaptive{:dopri5}, ODE.RKStepperAdaptive{:feh78}, - #ODE.ModifiedRosenbrockStepper{} + ODE.ModifiedRosenbrockStepper ] -warn("TODO: re-enable some tests") function test_ode() tol = 0.002 diff --git a/test/runtests.jl b/test/runtests.jl index a3f1f338b..f94ab8f06 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,23 +6,23 @@ const tol = 1e-2 solvers = [ ## Non-stiff # fixed step - # ODE.ode1, - # ODE.ode2_midpoint, - # ODE.ode2_heun, - # ODE.ode4, - # ODE.ode4ms, - # ODE.ode5ms, - # # adaptive - # ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. - # ODE.ode23, - # ODE.ode45_dp, - # ODE.ode45_fe, - # ODE.ode78, + ODE.ode1, + ODE.ode2_midpoint, + ODE.ode2_heun, + ODE.ode4, + ODE.ode4ms, + ODE.ode5ms, + # adaptive + ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. + ODE.ode23, + ODE.ode45_dp, + ODE.ode45_fe, + ODE.ode78, - # ## Stiff - # # fixed-step - # ODE.ode4s_s, - # ODE.ode4s_kr, + ## Stiff + # fixed-step + ODE.ode4s_s, + ODE.ode4s_kr, # adaptive ODE.ode23s] From 581ba3bd8879116724052132c63313d518c012dc Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 20:43:14 +0200 Subject: [PATCH 060/113] added example --- examples/ex_iter.jl | 45 +++++++++++++++++++++++++++++++++++++++++++++ test/runtests.jl | 2 +- 2 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 examples/ex_iter.jl diff --git a/examples/ex_iter.jl b/examples/ex_iter.jl new file mode 100644 index 000000000..d0768af67 --- /dev/null +++ b/examples/ex_iter.jl @@ -0,0 +1,45 @@ +using ODE + +# Define IVP-instance which holds the mathematical problem definition: +t0 = 0.0 +y0 = [1.0] +ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) + +# options for the solver +opts = Dict(:initstep=>0.1, + :tstop=>1.0, + :reltol=>1e-5, + :abstol=>1e-5) +# pick your solver +stepper = [ODE.RKStepperAdaptive{:rk45}, + ODE.ModifiedRosenbrockStepper][2] + +# create a Solver instance +sol = ODE.solve(ode,stepper;opts...) + + # iterate over the solution +println("t, y, err") +for (t,y) in sol + println((t,y[1],abs(y[1]-e.^t))) +end + +# or collect it +println(collect(sol)) + +### Reverse time integration, rest as above +t0 = 1.0 +y0 = [1.0] +ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) +opts = Dict(:initstep=>0.1, + :tstop=>0.0, + :reltol=>1e-5, + :abstol=>1e-5) + +sol = ODE.solve(ode,stepper;opts...) + +println("t, y, err") +for (t,y) in sol # iterate over the solution + println((t,y[1],abs(y[1]-e.^(t-1)))) +end + +println(collect(sol)) diff --git a/test/runtests.jl b/test/runtests.jl index f94ab8f06..9f75e3898 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -13,7 +13,7 @@ solvers = [ ODE.ode4ms, ODE.ode5ms, # adaptive - ODE.ode21, # this fails on Travis with 0.4?! TODO revert once fixed. + ODE.ode21, ODE.ode23, ODE.ode45_dp, ODE.ode45_fe, From 7382d03b719cc3d4bbf64cf17719f6f3d8ae56e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 22:46:11 +0200 Subject: [PATCH 061/113] Updated the badges --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c7c04982e..cdaa88f21 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ Various basic Ordinary Differential Equation solvers implemented in Julia. [![Join the chat at https://gitter.im/pwl/ODE.jl](https://badges.gitter.im/pwl/ODE.jl.svg)](https://gitter.im/pwl/ODE.jl?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/JuliaLang/ODE.jl.svg?branch=master)](https://travis-ci.org/JuliaLang/ODE.jl) +[![Build Status](https://travis-ci.org/JuliaODE/ODE.jl.svg?branch=master)](https://travis-ci.org/JuliaODE/ODE.jl) [![Coverage Status](https://img.shields.io/coveralls/JuliaLang/ODE.jl.svg)](https://coveralls.io/r/JuliaLang/ODE.jl) -[![ODE](http://pkg.julialang.org/badges/ODE_0.3.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.3) [![ODE](http://pkg.julialang.org/badges/ODE_0.4.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.4) +[![ODE](http://pkg.julialang.org/badges/ODE_0.5.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.5) Pull requests are always highly welcome to fix bugs, add solvers, or anything else! From 46b8cfb4a38c5d1b85227c06ddfd9958b7616e79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 22:44:38 +0200 Subject: [PATCH 062/113] Travis --- .travis.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index f00154aa1..d6b8e161b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ os: - osx - linux julia: - - 0.3 + - 0.4 - release - nightly git: @@ -12,6 +12,5 @@ notifications: email: false script: - if [[ -a .git/shallow ]]; then git fetch --unshallow; fi - - julia -e 'Pkg.clone(pwd()); Pkg.build("ODE"); Pkg.test("ODE"; coverage=true)'; -after_success: + - julia -e 'Pkg.clone("https://github.com/JuliaODE/ODE.jl.git"); Pkg.build("ODE"); Pkg.test("ODE"; coverage=true)'; - julia -e 'cd(Pkg.dir("ODE")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(Coveralls.process_folder()); Codecov.submit(process_folder())'; From 00e1667734f0047a60292beb618ee3aea2bfc170 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 22:46:11 +0200 Subject: [PATCH 063/113] Updated the badges --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c7c04982e..e3c894341 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ Various basic Ordinary Differential Equation solvers implemented in Julia. [![Join the chat at https://gitter.im/pwl/ODE.jl](https://badges.gitter.im/pwl/ODE.jl.svg)](https://gitter.im/pwl/ODE.jl?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/JuliaLang/ODE.jl.svg?branch=master)](https://travis-ci.org/JuliaLang/ODE.jl) -[![Coverage Status](https://img.shields.io/coveralls/JuliaLang/ODE.jl.svg)](https://coveralls.io/r/JuliaLang/ODE.jl) -[![ODE](http://pkg.julialang.org/badges/ODE_0.3.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.3) +[![Build Status](https://travis-ci.org/JuliaODE/ODE.jl.svg?branch=master)](https://travis-ci.org/JuliaODE/ODE.jl) +[![Coverage Status](https://img.shields.io/coveralls/JuliaODE/ODE.jl.svg)](https://coveralls.io/r/JuliaODE/ODE.jl) [![ODE](http://pkg.julialang.org/badges/ODE_0.4.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.4) +[![ODE](http://pkg.julialang.org/badges/ODE_0.5.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.5) Pull requests are always highly welcome to fix bugs, add solvers, or anything else! From 66eacc6bf430a82164c6125fede6ed28ab8edd09 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 21:31:19 +0200 Subject: [PATCH 064/113] updated according to pwl's comments [ci skip] --- src/ode23s.jl | 80 ++-------------------------------------------- src/runge-kutta.jl | 2 +- 2 files changed, 4 insertions(+), 78 deletions(-) diff --git a/src/ode23s.jl b/src/ode23s.jl index 2ee5378d6..5aef73aae 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -106,7 +106,7 @@ function trialstep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, # increment iteration counter state.iters += 1 if state.iters > opts.maxiters - println("Reached maximum number of iterations $(opts.maxiters)") + warn("Reached maximum number of iterations $(opts.maxiters)") return abort end @@ -162,83 +162,9 @@ function accept!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, st step = state.step # update the state step.t = step.t+state.dtold - step.y[:] = state.ynew - step.dy[:] = state.F2 + copy!(step.y, state.ynew) + copy!(step.dy, state.F2) s.ode.J!(step.t,step.y,state.J) return cont end - - - -# function onestep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state) - -# stepper = s.stepper -# ode = s.ode -# step = state.step -# opts = s.stepper.options - -# F1, F2, J = state.F1, state.F2, state.J - -# t, dt, y, dy = step.t, state.dt, step.y, step.dy -# # F!, J! = ode.F!, ode.J! - -# F0 = dy - -# while true - -# state.iters += 1 -# if state.iters > opts.maxiters -# return ((step.t,step.y), state) -# end - -# # trim the step size to match the bounds of integration -# dt = min(opts.tstop-t,dt) - -# W = lufact!( eye(J) - dt*d*J ) - -# # Approximate time-derivative of F, we are using F1 as a -# # temporary array -# ode.F!(t+dt/100,y,F1) -# tder = 100*const_d*(F1-F0) - -# # modified Rosenbrock formula -# # TODO: allocate some temporary space for these variables -# k1 = W \ (F0 + tder) -# ode.F!(t+dt/2, y+dt*k1/2, F1) -# k2 = W \ (F1 - k1) + k1 -# ynew = y + dt*k2 -# ode.F!(t+dt, ynew, F2) -# k3 = W \ (F2 - const_e*(k2 - F1) - 2*(k1 - F0) + tder ) - -# delta = max(opts.reltol*max(opts.norm(y)::eltype(y), -# opts.norm(ynew)::eltype(y)), -# opts.abstol) # allowable error - -# err = (dt/6)*(opts.norm(k1 - 2*k2 + k3)::eltype(y))/delta # error estimate - -# # upon a failed step decrease the step size -# dtnew = min(opts.maxstep, -# dt*0.8*err^(-1/3) ) - -# # check if the new solution is acceptable -# if err <= 1 - -# # update the state and return -# step.t = t+dt -# state.dt = dtnew -# step.y[:] = ynew -# step.dy[:] = F2 -# ode.J!(step.t,step.y,J) - -# return ((step.t,step.y), state) -# else -# # continue with the decreased time step -# dt = dtnew -# end - -# end - -# return tout, yout - -# end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index ff534364d..fe2db86fe 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -174,7 +174,7 @@ function trialstep!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state if abs(dt) < options.minstep # TODO: use some sort of logging system - println("Minimum step size reached") + warn("Minimum step size reached") return abort end From 7d612050d75337e969afecbec9925027cfc1ec0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 21:40:37 +0200 Subject: [PATCH 065/113] Minor refactoring --- src/dense.jl | 19 +++--- src/ode23s.jl | 140 ++++++++++++--------------------------------- src/runge-kutta.jl | 52 ++++++++++------- src/types.jl | 33 ++++------- 4 files changed, 89 insertions(+), 155 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index cf8c56594..62315a231 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -68,9 +68,10 @@ end output(ds::DenseState) = output(ds.step_out) -function init{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}) - ode = s.stepper.solver.ode - solver_state = init(s.stepper.solver) +function init(ode::ExplicitODE, + stepper::DenseStepper) + ode = stepper.solver.ode + solver_state = init(stepper.solver.ode, stepper.solver.stepper) dy0 = similar(ode.y0) ode.F!(ode.t0,ode.y0,dy0) step_prev = Step(ode.t0,copy(ode.y0),dy0) @@ -88,16 +89,18 @@ wouldn't. """ -function onestep!{O<:ExplicitODE,S<:DenseStepper}(s::Solver{O,S}, state::DenseState) +function onestep!(ode::ExplicitODE, + stepper::DenseStepper, + state::DenseState) i = state.tout_i - if i > length(s.stepper.options.tspan) + if i > length(stepper.options.tspan) return finish end # our next output time - tout = s.stepper.options.tspan[i] + tout = stepper.options.tspan[i] - sol = s.stepper.solver # this looks weird + sol = stepper.solver # this looks weird sol_state = state.solver_state # try to get a new set of steps enclosing `tout`, if all goes @@ -152,7 +155,7 @@ function next_interval!(solver,state,step_prev,tout) copy!(step_prev.dy,dy) # try to perform a single step with the solver - status = onestep!(solver, state) + status = onestep!(solver.ode, solver.stepper, state) if status != cont return status diff --git a/src/ode23s.jl b/src/ode23s.jl index 2ee5378d6..7a64ba4c4 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -5,15 +5,21 @@ immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper options::AdaptiveOptions{T} + const_d::T + const_e::T end @compat function (::Type{ModifiedRosenbrockStepper{T}}){T}(;options...) - ModifiedRosenbrockStepper( AdaptiveOptions{T}(;options...) ) + const_d = 1/(2+sqrt(T(2))) + const_e = 6+sqrt(T(2)) + + ModifiedRosenbrockStepper( AdaptiveOptions{T}(;options...), const_d, const_e ) end order(::ModifiedRosenbrockStepper) = 2 name(::ModifiedRosenbrockStepper) = "Modified Rosenbrock Stepper" isadaptive(::ModifiedRosenbrockStepper) = true +tdir(ode::ExplicitODE, stepper::ModifiedRosenbrockStepper) = sign(stepper.options.tstop - ode.t0) # define the set of ODE problems with which this stepper can work solve{T,S<:ModifiedRosenbrockStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = @@ -53,10 +59,11 @@ function show(io::IO, state::RosenbrockState) end -function init{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) - t = s.ode.t0 - dt = s.stepper.options.initstep - y = s.ode.y0 +function init{T}(ode::ExplicitODE{T}, + stepper::ModifiedRosenbrockStepper) + t = ode.t0 + dt = stepper.options.initstep + y = ode.y0 dy = zero(y) J = Array(eltype(y),length(y),length(y)) @@ -73,32 +80,31 @@ function init{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}) dt*0, # dtnew J, # J 0) # iters + # initialize the derivative and the Jacobian - s.ode.F!(t,y,step.dy) - s.ode.J!(t,y,state.J) + ode.F!(t,y,step.dy) + ode.J!(t,y,state.J) return state end -# two irrational constants -Base.@irrational const_d 0.2928932188134525 (1/(2 + sqrt(BigFloat(2)))) -Base.@irrational const_e 7.414213562373095 (6 + sqrt(BigFloat(2))) - -function trialstep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state::RosenbrockState) +function trialstep!(ode::ExplicitODE, + stepper::ModifiedRosenbrockStepper, + state::RosenbrockState) # unpack - stepper = s.stepper - ode = s.ode step = state.step - opts = s.stepper.options + opts = stepper.options F1, F2, J = state.F1, state.F2, state.J k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy F! = ode.F! F0 = dy + td = tdir(ode,stepper) + # see whether we're done - if tdir(s)*t >= tdir(s)*opts.tstop + if td*t >= td*opts.tstop # nothing left to integrate return finish end @@ -110,12 +116,12 @@ function trialstep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, return abort end - W = lufact!( eye(J) - dt*const_d*J ) + W = lufact!( eye(J) - dt*stepper.const_d*J ) # Approximate time-derivative of F, we are using F1 as a # temporary array F!(t+dt/100,y,F1) - tder = 100*const_d*(F1-F0) + tder = 100*stepper.const_d*(F1-F0) # modified Rosenbrock formula # TODO: update k1,k2,k3 in-place @@ -126,119 +132,49 @@ function trialstep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, ynew[i] = y[i] + dt*k2[i] end F!(t+dt, ynew, F2) - k3[:] = W \ (F2 - const_e*(k2 - F1) - 2*(k1 - F0) + tder ) + k3[:] = W \ (F2 - stepper.const_e*(k2 - F1) - 2*(k1 - F0) + tder ) return cont end -function errorcontrol!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state::RosenbrockState) +function errorcontrol!(ode::ExplicitODE, + stepper::ModifiedRosenbrockStepper, + state::RosenbrockState) - stepper = s.stepper - ode = s.ode step = state.step - opts = s.stepper.options + opts = stepper.options k1,k2,k3 = state.k1, state.k2, state.k3 k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy - # allowable error + td = tdir(ode,stepper) + + # allowable error delta = max(opts.reltol*max(opts.norm(y), opts.norm(ynew)),opts.abstol) # error estimate err = (abs(dt)/6)*(opts.norm(k1 - 2*k2 + k3))/delta # new step-size - dtnew = tdir(s)*min(opts.maxstep, abs(dt)*0.8*err^(-1/3) ) + dtnew = td*min(opts.maxstep, abs(dt)*0.8*err^(-1/3) ) # trim in case newdt > dt - dtnew = tdir(s)*min(abs(dtnew), abs(opts.tstop-(t+dt))) + dtnew = td*min(abs(dtnew), abs(opts.tstop-(t+dt))) state.dtold = dt state.dt = dtnew return err, cont end -function accept!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state::RosenbrockState) +function accept!(ode::ExplicitODE, + stepper::ModifiedRosenbrockStepper, + state::RosenbrockState) step = state.step # update the state step.t = step.t+state.dtold step.y[:] = state.ynew step.dy[:] = state.F2 - s.ode.J!(step.t,step.y,state.J) + ode.J!(step.t,step.y,state.J) return cont end - - - -# function onestep!{O<:ExplicitODE,S<:ModifiedRosenbrockStepper}(s::Solver{O,S}, state) - -# stepper = s.stepper -# ode = s.ode -# step = state.step -# opts = s.stepper.options - -# F1, F2, J = state.F1, state.F2, state.J - -# t, dt, y, dy = step.t, state.dt, step.y, step.dy -# # F!, J! = ode.F!, ode.J! - -# F0 = dy - -# while true - -# state.iters += 1 -# if state.iters > opts.maxiters -# return ((step.t,step.y), state) -# end - -# # trim the step size to match the bounds of integration -# dt = min(opts.tstop-t,dt) - -# W = lufact!( eye(J) - dt*d*J ) - -# # Approximate time-derivative of F, we are using F1 as a -# # temporary array -# ode.F!(t+dt/100,y,F1) -# tder = 100*const_d*(F1-F0) - -# # modified Rosenbrock formula -# # TODO: allocate some temporary space for these variables -# k1 = W \ (F0 + tder) -# ode.F!(t+dt/2, y+dt*k1/2, F1) -# k2 = W \ (F1 - k1) + k1 -# ynew = y + dt*k2 -# ode.F!(t+dt, ynew, F2) -# k3 = W \ (F2 - const_e*(k2 - F1) - 2*(k1 - F0) + tder ) - -# delta = max(opts.reltol*max(opts.norm(y)::eltype(y), -# opts.norm(ynew)::eltype(y)), -# opts.abstol) # allowable error - -# err = (dt/6)*(opts.norm(k1 - 2*k2 + k3)::eltype(y))/delta # error estimate - -# # upon a failed step decrease the step size -# dtnew = min(opts.maxstep, -# dt*0.8*err^(-1/3) ) - -# # check if the new solution is acceptable -# if err <= 1 - -# # update the state and return -# step.t = t+dt -# state.dt = dtnew -# step.y[:] = ynew -# step.dy[:] = F2 -# ode.J!(step.t,step.y,J) - -# return ((step.t,step.y), state) -# else -# # continue with the decreased time step -# dt = dtnew -# end - -# end - -# return tout, yout - -# end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index ff534364d..2ee6f1f45 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -43,6 +43,8 @@ order(stepper::RKStepper) = minimum(order(stepper.tableau)) name(stepper::RKStepper) = stepper.tableau.name +tdir(ode::ExplicitODE, stepper::RKStepper) = sign(stepper.options.tstop - ode.t0) + solve{T,S<:RKStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = Solver(ode,stepper{T}(;options...)) @@ -86,14 +88,13 @@ function show(io::IO, state::RKState) end -function init{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) - stepper = s.stepper - t0, dt0, y0 = s.ode.t0, stepper.options.initstep, s.ode.y0 +function init(ode::ExplicitODE,stepper::RKStepper) + t0, dt0, y0 = ode.t0, stepper.options.initstep, ode.y0 # clip the dt0 if t0+dt0 exceeds tstop - dt0 = tdir(s)*min(abs(dt0),abs(stepper.options.tstop-t0)) + dt0 = tdir(ode,stepper)*min(abs(dt0),abs(stepper.options.tstop-t0)) - lk = lengthks(s.stepper.tableau) + lk = lengthks(stepper.tableau) work = RKWorkArrays(zero(y0), # y zero(y0), # ynew zero(y0), # yerr @@ -105,7 +106,7 @@ function init{O<:ExplicitODE,S<:RKStepper}(s::Solver{O,S}) end # pre-initialize work.ks[1] - s.ode.F!(t0,y0,work.ks[1]) + ode.F!(t0,y0,work.ks[1]) step = Step(t0,copy(y0),copy(work.ks[1])) @@ -119,23 +120,25 @@ end ##################### -function onestep!{O<:ExplicitODE,S<:RKStepperFixed}(s::Solver{O,S}, state::RKState) +function onestep!(ode::ExplicitODE, stepper::RKStepperFixed, state::RKState) step = state.step work = state.work - if tdir(s)*step.t >= tdir(s)*s.stepper.options.tstop + td = tdir(ode,stepper) + + if td*step.t >= td*stepper.options.tstop # nothing left to integrate return finish end dof = length(step.y) - b = s.stepper.tableau.b - dt = tdir(s)*min(abs(state.dt),abs(s.stepper.options.tstop-step.t)) + b = stepper.tableau.b + dt = td*min(abs(state.dt),abs(stepper.options.tstop-step.t)) copy!(work.ynew,step.y) for k=1:length(b) - calc_next_k!(work, k, s.ode, s.stepper.tableau, step, dt) + calc_next_k!(work, k, ode, stepper.tableau, step, dt) for d=1:dof work.ynew[d] += dt * b[k]*work.ks[k][d] end @@ -156,18 +159,19 @@ const timeout_const = 5 # `trialstep!` ends with a step computed for the stepsize `state.dt` # and stores it in `work.y`, so `work.y` contains a candidate for # `y(t+dt)` with `dt=state.dt`. -function trialstep!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::RKState) +function trialstep!(ode::ExplicitODE, stepper::RKStepperAdaptive, state::RKState) work = state.work step = state.step - stepper = sol.stepper tableau = stepper.tableau options = stepper.options + td = tdir(ode,stepper) + # use the proposed step size to perform the computations state.dt = state.newdt dt = state.dt - if tdir(sol)*step.t >= tdir(sol)*options.tstop + if td*step.t >= td*options.tstop # nothing left to integrate return finish end @@ -179,26 +183,28 @@ function trialstep!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state end # work.y and work.yerr and work.ks are updated after this step - rk_embedded_step!(work, sol.ode, tableau, step, dt) + rk_embedded_step!(work, ode, tableau, step, dt) return cont end # computes the error for the candidate solution `y(t+dt)` with # `dt=state.dt` and proposes a new time step -function errorcontrol!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, - state::RKState) +function errorcontrol!(ode::ExplicitODE, + stepper::RKStepperAdaptive, + state::RKState) work = state.work step = state.step - stepper = sol.stepper tableau = stepper.tableau timeout = state.timeout options = stepper.options err, state.newdt, state.timeout = stepsize_hw92!(work, step, tableau, state.dt, state.timeout, options) + td = tdir(ode,stepper) + # trim in case newdt > dt - state.newdt = tdir(sol)*min(abs(state.newdt), abs(options.tstop-(state.step.t+state.dt))) + state.newdt = td*min(abs(state.newdt), abs(options.tstop-(state.step.t+state.dt))) if err > 1 # The error is too large, the step will be rejected. We reset @@ -212,16 +218,18 @@ end # Here we assume that trialstep! and errorcontrol! have already been # called, that is `work.y` holds `y(t+dt)` with `dt=state.dt`, and # error was small enough for us to keep `y(t+dt)` as the next step. -function accept!{O<:ExplicitODE,S<:RKStepperAdaptive}(sol::Solver{O,S}, state::RKState) +function accept!(ode::ExplicitODE, + stepper::RKStepperAdaptive, + state::RKState) work = state.work step = state.step - tableau = sol.stepper.tableau + tableau = stepper.tableau # preload ks[1] for the next step if tableau.isFSAL copy!(work.ks[1],work.ks[end]) else - sol.ode.F!(step.t+state.dt, work.ynew, work.ks[1]) + ode.F!(step.t+state.dt, work.ynew, work.ks[1]) end # Swap bindings of y and ytrial, avoids one copy diff --git a/src/types.jl b/src/types.jl index cb955608a..25f7a2eab 100644 --- a/src/types.jl +++ b/src/types.jl @@ -164,8 +164,6 @@ end Base.eltype{O}(::Type{Solver{O}}) = eltype(O) Base.eltype{O}(::Solver{O}) = eltype(O) -tdir(s::Solver) = sign(s.stepper.options.tstop - s.ode.t0) - # filter the wrong combinations of ode and stepper solve{O,S}(ode::O, stepper::Type{S}, options...) = error("The $S doesn't support $O") @@ -201,12 +199,12 @@ end # TODO: store the current Step outside of the actual state # Base.start(sol::Solver) = (init(sol), Step(ode.sol)) -Base.start(sol::Solver) = init(sol) +Base.start(sol::Solver) = init(sol.ode,sol.stepper) function Base.done(s::Solver, st) # Determine whether the next step can be made by calling the # stepping routine. onestep! will take the step in-place. - status = onestep!(s, st) + status = onestep!(s.ode, s.stepper, st) # can't this be a function on a status? if status==cont return false @@ -283,27 +281,16 @@ Output: - Bool: `false`: continue iteration, `true`: terminate iteration. substeps. - -TODO: this effectively dispatches on the type of state, we should -splice the Solver as IVP and Stepper and make calls as follows - -``` -function onestep!(ode::ExplicitODE, stepper::DenseStepper, state) -``` - -We always access s.stepper and s.ode anyway and the definitions would -look more readable. - """ -function onestep!(sol::Solver, state::AbstractState) - opt = sol.stepper.options +function onestep!(ode::IVP, stepper::AbstractStepper, state::AbstractState) + opt = stepper.options while true - status = trialstep!(sol, state) - err, status_err = errorcontrol!(sol, state) + status = trialstep!(ode, stepper, state) + err, status_err = errorcontrol!(ode, stepper, state) status &= status_err if err<=1 # a successful step - status &= accept!(sol, state) + status &= accept!(ode, stepper, state) return status elseif status==abort || status==finish return status @@ -338,7 +325,7 @@ compute the magnitude of its error. If the error is small enough Returns `Status`. """ -trialstep!{O,S}(::Solver{O,S}, ::AbstractState) = +trialstep!{S<:AbstractStepper}(::IVP, ::S, ::AbstractState) = error("Function `trialstep!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") """ @@ -353,7 +340,7 @@ If the `status==abort` then the integration is aborted, status values of `cont` and `finish` are ignored. """ -errorcontrol!{T}(::Solver,::AbstractState{T}) = +errorcontrol!{S<:AbstractStepper}(::IVP, ::S, ::AbstractState) = error("Function `errorcontrol!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") """ @@ -364,5 +351,5 @@ a small enough error. Returns `Status`. """ -accept!{O,S}(::Solver{O,S}, ::AbstractState) = +accept!{S<:AbstractStepper}(::IVP, ::S, ::AbstractState) = error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") From 10595ae99d940fe1086e7c93ecec10e61b7e4e11 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Mon, 25 Jul 2016 21:54:40 +0200 Subject: [PATCH 066/113] updated REQUIRE --- .travis.yml | 1 - REQUIRE | 1 - 2 files changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index d6b8e161b..7ef74c555 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,6 @@ os: - osx - linux julia: - - 0.4 - release - nightly git: diff --git a/REQUIRE b/REQUIRE index 772acbb6e..62155a5ae 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,5 +1,4 @@ julia 0.4 Polynomials -Iterators ForwardDiff Compat 0.4.1 From 531abe4a09740751797e8a80ba4dc9ef0cf48abd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 25 Jul 2016 21:55:34 +0200 Subject: [PATCH 067/113] Changed tspan to tout --- src/dense.jl | 25 ++++++++++++------------- src/interfaces.jl | 18 +++++++++--------- src/options.jl | 8 ++++---- test/iterators.jl | 36 ++++++++++++++++++------------------ 4 files changed, 43 insertions(+), 44 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 62315a231..84200f1db 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -13,21 +13,20 @@ Dense output options: """ immutable DenseOptions{T<:Number} <: Options{T} - tspan ::Vector{T} - tstop ::T + tout::Vector{T} # points ::Symbol # stopevent::S # roottol ::T end @compat function (::Type{DenseOptions{T}}){T}(; - tstop = T(Inf), - tspan::Vector = T[tstop], + tstop = T(Inf), + tout::Vector = T[tstop], # points::Symbol= :all, # stopevent::S = (t,y)->false, # roottol = eps(T)^T(1//3), kargs...) - DenseOptions{T}(tspan,tstop) + DenseOptions{T}(tout) end @@ -36,7 +35,7 @@ end A stepper specialized in dense output. It wraps around another `Solver` and stores the subsequent steps generated by `Solver` and interpolates the results on request (currently this means at the -output times stored in `options.tspan`). +output times stored in `options.tout`). """ immutable DenseStepper{S<:Solver,O<:DenseOptions} <: AbstractStepper @@ -82,7 +81,7 @@ end """ -TODO: rename `tspan` to `tout` and drop the support for +TODO: rename `tout` to `tout` and drop the support for `points=:all` outside of the `odeXX`? Maybe even `odeXX(;tout=[...])` would use dense output while `odeXX(;)` wouldn't. @@ -93,31 +92,31 @@ function onestep!(ode::ExplicitODE, stepper::DenseStepper, state::DenseState) i = state.tout_i - if i > length(stepper.options.tspan) + if i > length(stepper.options.tout) return finish end # our next output time - tout = stepper.options.tspan[i] + ti = stepper.options.tout[i] sol = stepper.solver # this looks weird sol_state = state.solver_state - # try to get a new set of steps enclosing `tout`, if all goes + # try to get a new set of steps enclosing `ti`, if all goes # right we end up with t∈[t1,t2] with # t1,_=output(state.step_prev) # t2,_=output(state.solver_state) - status = next_interval!(sol,sol_state,state.step_prev,tout) + status = next_interval!(sol,sol_state,state.step_prev,ti) if status == abort # we failed to get enough steps warn("Iterator was exhausted before the dense output could produce the output.") return abort else # we got the steps, proceed with the interpolation, this fills - # the state.step_out with y(tout) and y'(tout) according to an + # the state.step_out with y(ti) and y'(ti) according to an # interpolation algorithm specific for a method (defaults to # hermite O(3)). - interpolate!(state.solver_state,state.step_prev,tout,state.step_out) + interpolate!(state.solver_state,state.step_prev,ti,state.step_out) # increase the counter state.tout_i += 1 diff --git a/src/interfaces.jl b/src/interfaces.jl index 0857cfb32..ec2bc810d 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -6,23 +6,23 @@ tspan[end] is the last integration time. """ function ode{T,Y,S<:AbstractStepper}(F, y0::Y, - tspan::AbstractVector{T}, + tout::AbstractVector{T}, stepper::Type{S}; points = :all, kargs...) - t0 = tspan[1] + t0 = tout[1] # construct a solver equation = explicit_ineff(t0,y0,F;kargs...) if points == :all solver = solve(equation, stepper; - tspan = tspan, + tout = tout, kargs...) elseif points == :specified solver = solve(equation, DenseStepper; mehtod = stepper, - tspan = tspan, + tout = tout, kargs...) else error("Unsupported points value (should be :all or :specified)") @@ -31,14 +31,14 @@ function ode{T,Y,S<:AbstractStepper}(F, y0::Y, # determine if we have to unpack y extract = Y <: Number - tout = Array(T,0) - yout = Array(Y,0) + to = Array(T,0) + yo = Array(Y,0) for (t,y) in solver - push!(tout,t) - push!(yout, extract ? y[1] : copy(y)) + push!(to,t) + push!(yo, extract ? y[1] : copy(y)) end - return (tout,yout) + return (to,yo) end """ diff --git a/src/options.jl b/src/options.jl index 7a4b58ba6..26d00a960 100644 --- a/src/options.jl +++ b/src/options.jl @@ -31,8 +31,8 @@ immutable AdaptiveOptions{T,N<:Function,O<:Function} <: Options{T} end @compat function (::Type{AdaptiveOptions{T}}){T,N,O}(; - tspan = T[Inf], - tstop = tspan[end], + tout = T[Inf], + tstop = tout[end], reltol = eps(T)^T(1//3)/10, abstol = eps(T)^T(1//2)/10, minstep = 10*eps(T), @@ -63,8 +63,8 @@ immutable FixedOptions{T} <: Options{T} end @compat function (::Type{FixedOptions{T}}){T}(; - tspan = T[Inf], - tstop = tspan[end], + tout = T[Inf], + tstop = tout[end], initstep = 10*eps(T), kargs...) @assert initstep>=0 diff --git a/test/iterators.jl b/test/iterators.jl index 338f0ca2b..aa5a199ea 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -2,7 +2,7 @@ const testsets = [ Dict( :F! => (t,y,dy)->dy[1]=6.0, :y0 => [0.], - :tspan => [0:0.1:1;], + :tout => [0:0.1:1;], :jac => (t,y,dy)->dy[1]=0.0, :sol => t->[6t], :isscalar => true, @@ -11,7 +11,7 @@ const testsets = [ Dict( :F! => (t,y,dy)->dy[1]=2t, :y0 => [0.], - :tspan => [0:0.001:1;], + :tout => [0:0.001:1;], :jac => (t,y,dy)->dy[1]=0.0, :sol => t->[t^2], :isscalar => true, @@ -20,7 +20,7 @@ const testsets = [ Dict( :F! => (t,y,dy)->dy[1]=y[1], :y0 => [1.0], - :tspan => [0:0.001:1;], + :tout => [0:0.001:1;], :jac => (t,y,dy)->dy[1]=1.0, :sol => t->[exp(t)], :isscalar => true, @@ -29,7 +29,7 @@ const testsets = [ Dict( :F! => (t,y,dy)->dy[1]=y[1], :y0 => [1.0], - :tspan => [1:-0.001:0;], + :tout => [1:-0.001:0;], :jac => (t,y,dy)->dy[1]=1.0, :sol => t->[exp(t-1)], :isscalar => true, @@ -38,7 +38,7 @@ const testsets = [ Dict( :F! => (t,y,dy)->(dy[1]=-y[2];dy[2]=y[1]), :y0 => [1.0,2.0], - :tspan => [0:.1:1;], + :tout => [0:.1:1;], :jac => (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]]), :sol => t->[cos(t)-2*sin(t) 2*cos(t)+sin(t)], :isscalar => false, @@ -68,7 +68,7 @@ function test_ode() for ts in testsets println("Testing problem $(ts[:name])") - tspan, h0, stepper = ts[:tspan], ts[:initstep], rks + tout, h0, stepper = ts[:tout], ts[:initstep], rks y0, F!, jac!, sol = ts[:y0], ts[:F!], ts[:jac], ts[:sol] @@ -80,10 +80,10 @@ function test_ode() Fscal = (t,y)->F(t,[y])[1] y0scal = y0[1] # with jacobian - tj,yj = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0,J! = jac!) + tj,yj = ODE.ode(Fscal,y0scal,tout,stepper,points=points,initstep = h0,J! = jac!) @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol # without jacobian - t,y = ODE.ode(Fscal,y0scal,tspan,stepper,points=points,initstep = h0) + t,y = ODE.ode(Fscal,y0scal,tout,stepper,points=points,initstep = h0) @test_approx_eq_eps y map(x->sol(x)[1],tj) tol # results with and without jacobian should be exactly the same @@ -91,32 +91,32 @@ function test_ode() if points == :specified # test if we covered the whole timespan - @test length(tspan) == length(t) == length(tj) - @test_approx_eq tspan t - @test_approx_eq tspan tj + @test length(tout) == length(t) == length(tj) + @test_approx_eq tout t + @test_approx_eq tout tj end end # ODE.odeXX vector interface # with jacobian - tj,yj = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0,J! = jac!) + tj,yj = ODE.ode(F,y0,tout,stepper,points=points,initstep = h0,J! = jac!) @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol # without jacobian - t,y = ODE.ode(F,y0,tspan,stepper,points=points,initstep = h0) + t,y = ODE.ode(F,y0,tout,stepper,points=points,initstep = h0) @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol @test_approx_eq hcat(yj...) hcat(y...) if points == :specified # test if we covered the whole timespan - @test length(tspan) == length(t) == length(tj) - @test_approx_eq tspan t - @test_approx_eq tspan tj + @test length(tout) == length(t) == length(tj) + @test_approx_eq tout t + @test_approx_eq tout tj end # test the iterator interface - equation = ODE.ExplicitODE(tspan[1],y0,F!) - opts = Dict(:tspan => tspan, + equation = ODE.ExplicitODE(tout[1],y0,F!) + opts = Dict(:tout => tout, :initstep => h0, :points => points) From 69aebb86c6fbfd6237ac443ef90975cdaea18131 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 08:53:01 +0200 Subject: [PATCH 068/113] Tableau restructure Moved RK-tableaus to RK-solvers Marked files which define the core-interface in ODE.jl --- src/ODE.jl | 16 +++-- src/helpers.jl | 65 +++++++++--------- src/runge-kutta.jl | 160 +++++++++++++++++++++++++++++++++++++++++++- src/tableaus.jl | 161 --------------------------------------------- 4 files changed, 202 insertions(+), 200 deletions(-) diff --git a/src/ODE.jl b/src/ODE.jl index a78cfedf4..408db3eff 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -10,22 +10,28 @@ using ForwardDiff import Base.convert, Base.show import Base: start, next, done, call, collect -# basic type definitions +# Core infrastructure +# +# When wrapping a new solver it will need to use and conform to +# methods and types within these files. +# +# Note, if we go the MathProgBase.jl route, then these files would go +# into ODEBase.jl. include("types.jl") +include("tableaus.jl") include("options.jl") include("helpers.jl") -# dense output wrapper +# Dense output wrapper include("dense.jl") -# particular solvers +# Particular solvers include("ode23s.jl") include("runge-kutta.jl") -# include("multistep.jl") include("adams-bashford-moulton.jl") include("rosenbrock.jl") -# include("taylor.jl") +# User interface to solvers include("interfaces.jl") end # module ODE diff --git a/src/helpers.jl b/src/helpers.jl index d21548cf8..28afc685f 100644 --- a/src/helpers.jl +++ b/src/helpers.jl @@ -1,35 +1,36 @@ -""" - -A simple bisection algorithm for finding a root of a solution f(x)=0 -starting within the range x∈rng, the result is a point x₀ which is -located within the distance eps from the true root of f(x)=0. For -this algorithm to work we need f(rng[1]) to have a different sign then -f(rng[2]). - -""" -function findroot(f,rng,eps) - xl, xr = rng - fl, fr = f(xl), f(xr) - - if fl*fr > 0 || xl > xr - error("Inconsistent bracket") - end - - while xr-xl > eps - xm = (xl+xr)/2 - fm = f(xm) - - if fm*fr > 0 - xr = xm - fr = fm - else - xl = xm - fl = fm - end - end - - return (xr+xl)/2 -end +## TODO: reactivate when introducing events/rootfinding +# """ + +# A simple bisection algorithm for finding a root of a solution f(x)=0 +# starting within the range x∈rng, the result is a point x₀ which is +# located within the distance eps from the true root of f(x)=0. For +# this algorithm to work we need f(rng[1]) to have a different sign then +# f(rng[2]). + +# """ +# function findroot(f,rng,eps) +# xl, xr = rng +# fl, fr = f(xl), f(xr) + +# if fl*fr > 0 || xl > xr +# error("Inconsistent bracket") +# end + +# while xr-xl > eps +# xm = (xl+xr)/2 +# fm = f(xm) + +# if fm*fr > 0 +# xr = xm +# fr = fm +# else +# xl = xm +# fl = fm +# end +# end + +# return (xr+xl)/2 +# end # generate a jacobian using ForwardDiff function forward_jacobian(F,y0::AbstractArray) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 2f15f82d1..a39423399 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -1,10 +1,166 @@ # This file contains the implementation of explicit Runkge-Kutta # solver from (Hairer & Wanner 1992 p.134, p.165-169). -include("tableaus.jl") +########################################### +# Tableaus for explicit Runge-Kutta methods +########################################### +immutable TableauRKExplicit{T} <: Tableau{T} + order::(@compat(Tuple{Vararg{Int}})) # the order of the methods + a::Matrix{T} + # one or several row vectors. First row is used for the step, + # second for error calc. + b::Matrix{T} + c::Vector{T} + isFSAL::Bool + s::Int + name::String + function TableauRKExplicit(name,order,a,b,c) + s = length(c) + @assert c[1]==0 + @assert istril(a) + @assert s==size(a,1)==size(a,2)==size(b,2) + @assert size(b,1)==length(order) + @assert norm(sum(a,2)-c'',Inf) Date: Tue, 26 Jul 2016 14:14:49 +0200 Subject: [PATCH 069/113] improved tableau situation in RK --- src/runge-kutta.jl | 232 ++++++++++++++++++++++----------------------- 1 file changed, 111 insertions(+), 121 deletions(-) diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index a39423399..795b2ec83 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -26,19 +26,6 @@ immutable TableauRKExplicit{T} <: Tableau{T} end end - -function TableauRKExplicit{T}(name::AbstractString, order::(@compat(Tuple{Vararg{Int}})), - a::Matrix{T}, b::Matrix{T}, c::Vector{T}) - TableauRKExplicit{T}(name, order, a, b, c) -end - - -function TableauRKExplicit(name::AbstractString, order::(@compat(Tuple{Vararg{Int}})), T::Type, - a::Matrix, b::Matrix, c::Vector) - TableauRKExplicit{T}(name, order, convert(Matrix{T},a), - convert(Matrix{T},b), convert(Vector{T},c) ) -end - lengthks(tab::TableauRKExplicit) = length(tab.c) Base.convert{Tnew<:Real,T}(::Type{TableauRKExplicit{Tnew}}, tab::TableauRKExplicit{T}) = @@ -50,114 +37,6 @@ Base.convert{Tnew<:Real,T}(::Type{TableauRKExplicit{Tnew}}, tab::TableauRKExplic isexplicit(b::TableauRKExplicit) = istril(b.a) # Test whether it's an explicit method isadaptive(b::TableauRKExplicit) = size(b.b, 1)==2 -## Tableaus for explicit RK methods -# Fixed step: -const tableaus_rk_explicit = Dict{Symbol,ODE.TableauRKExplicit{Rational{Int}}}() - -tableaus_rk_explicit[:feuler] = - TableauRKExplicit("Forward Euler",(1,), Rational{Int64}, - zeros(Int,1,1), - [1]', - [0] - ) - -tableaus_rk_explicit[:midpoint] = - TableauRKExplicit("Midpoint",(2,), Rational{Int64}, - [0 0 - 1//2 0], - [0, 1]', - [0, 1//2] - ) - -tableaus_rk_explicit[:heun] = - TableauRKExplicit("Heun",(2,), Rational{Int64}, - [0 0 - 1 0], - [1//2, 1//2]', - [0, 1]) - -tableaus_rk_explicit[:rk4] = - TableauRKExplicit("Runge-Kutta(4)",(4,),Rational{Int64}, - [0 0 0 0 - 1//2 0 0 0 - 0 1//2 0 0 - 0 0 1 0], - [1//6, 1//3, 1//3, 1//6]', - [0, 1//2, 1//2, 1]) - -# Adaptive step: -# Heun Euler https://en.wikipedia.org/wiki/Runge–Kutta_methods -tableaus_rk_explicit[:rk21] = - TableauRKExplicit("Heun Euler",(2,1), Rational{Int64}, - [0 0 - 1 0], - [1//2 1//2 - 1 0], - [0, 1]) - -# Bogacki–Shampine coefficients -tableaus_rk_explicit[:rk23] = - TableauRKExplicit("Bogacki-Shampine",(2,3), Rational{Int64}, - [0 0 0 0 - 1/2 0 0 0 - 0 3/4 0 0 - 2/9 1/3 4/9 0], - [7/24 1/4 1/3 1/8 - 2/9 1/3 4/9 0], - [0, 1//2, 3//4, 1] - ) - -# Fehlberg https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta%E2%80%93Fehlberg_method -tableaus_rk_explicit[:rk45] = - TableauRKExplicit("Fehlberg",(4,5),Rational{Int64}, - [ 0 0 0 0 0 0 - 1//4 0 0 0 0 0 - 3//32 9//32 0 0 0 0 - 1932//2197 -7200//2197 7296//2197 0 0 0 - 439//216 -8 3680//513 -845//4104 0 0 - -8//27 2 -3544//2565 1859//4104 -11//40 0 ], -[25//216 0 1408//2565 2197//4104 -1//5 0 - 16//135 0 6656//12825 28561//56430 -9//50 2//55], -[0, 1//4, 3//8, 12//13, 1, 1//2]) - -# Dormand-Prince https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method -tableaus_rk_explicit[:dopri5] = - TableauRKExplicit("Dormand-Prince", (5,4), Rational{Int64}, - [0 0 0 0 0 0 0 - 1//5 0 0 0 0 0 0 - 3//40 9//40 0 0 0 0 0 - 44//45 -56//15 32//9 0 0 0 0 - 19372//6561 -25360//2187 64448//6561 -212//729 0 0 0 - 9017//3168 -355//33 46732//5247 49//176 -5103//18656 0 0 - 35//384 0 500//1113 125//192 -2187//6784 11//84 0], - [35//384 0 500//1113 125//192 -2187//6784 11//84 0 - 5179//57600 0 7571//16695 393//640 -92097//339200 187//2100 1//40], - [0, 1//5, 3//10, 4//5, 8//9, 1, 1] - ) - -# Fehlberg 7(8) coefficients -# Values from pag. 65, Fehlberg, Erwin. "Classical fifth-, sixth-, seventh-, and eighth-order Runge-Kutta formulas with stepsize control". -# National Aeronautics and Space Administration. -tableaus_rk_explicit[:feh78] = - TableauRKExplicit("Fehlberg(7,8)", (7,8), Rational{Int64}, - [ 0 0 0 0 0 0 0 0 0 0 0 0 0 - 2//27 0 0 0 0 0 0 0 0 0 0 0 0 - 1//36 1//12 0 0 0 0 0 0 0 0 0 0 0 - 1//24 0 1//8 0 0 0 0 0 0 0 0 0 0 - 5//12 0 -25//16 25//16 0 0 0 0 0 0 0 0 0 - 1//20 0 0 1//4 1//5 0 0 0 0 0 0 0 0 - -25//108 0 0 125//108 -65//27 125//54 0 0 0 0 0 0 0 - 31//300 0 0 0 61//225 -2//9 13//900 0 0 0 0 0 0 - 2 0 0 -53//6 704//45 -107//9 67//90 3 0 0 0 0 0 - -91//108 0 0 23//108 -976//135 311//54 -19//60 17//6 -1//12 0 0 0 0 - 2383//4100 0 0 -341//164 4496//1025 -301//82 2133//4100 45//82 45//164 18//41 0 0 0 - 3//205 0 0 0 0 -6//41 -3//205 -3//41 3//41 6//41 0 0 0 - -1777//4100 0 0 -341//164 4496//1025 -289//82 2193//4100 51//82 33//164 12//41 0 1 0], - [41//840 0 0 0 0 34//105 9//35 9//35 9//280 9//280 41//840 0 0 - 0 0 0 0 0 34//105 9//35 9//35 9//280 9//280 0 41//840 41//840], - [0, 2//27, 1//9, 1//6 , 5//12, 1//2 , 5//6 , 1//6 , 2//3 , 1//3 , 1 , 0, 1] - ) - ####################### # Solver implementation ####################### @@ -514,3 +393,114 @@ function calc_next_k!(work ::RKWorkArrays, ode.F!(t + c[i]*dt, work.y, work.ks[i]) return nothing end + +################################### +## Tableaus for explicit RK methods +################################### + +# Fixed step: +const tableaus_rk_explicit = Dict{Symbol,ODE.TableauRKExplicit{Rational{Int}}}() + +tableaus_rk_explicit[:feuler] = + TableauRKExplicit{Rational{Int64}}("Forward Euler",(1,), + zeros(Int,1,1), + [1]', + [0] + ) + +tableaus_rk_explicit[:midpoint] = + TableauRKExplicit{Rational{Int64}}("Midpoint",(2,), + [0 0 + 1//2 0], + [0, 1]', + [0, 1//2] + ) + +tableaus_rk_explicit[:heun] = + TableauRKExplicit{Rational{Int64}}("Heun",(2,), + [0 0 + 1 0], + [1//2, 1//2]', + [0, 1]) + +tableaus_rk_explicit[:rk4] = + TableauRKExplicit{Rational{Int64}}("Runge-Kutta(4)",(4,), + [0 0 0 0 + 1//2 0 0 0 + 0 1//2 0 0 + 0 0 1 0], + [1//6, 1//3, 1//3, 1//6]', + [0, 1//2, 1//2, 1]) + +# Adaptive step: +# Heun Euler https://en.wikipedia.org/wiki/Runge–Kutta_methods +tableaus_rk_explicit[:rk21] = + TableauRKExplicit{Rational{Int64}}("Heun Euler",(2,1), + [0 0 + 1 0], + [1//2 1//2 + 1 0], + [0, 1]) + +# Bogacki–Shampine coefficients +tableaus_rk_explicit[:rk23] = + TableauRKExplicit{Rational{Int64}}("Bogacki-Shampine",(2,3), + [0 0 0 0 + 1/2 0 0 0 + 0 3/4 0 0 + 2/9 1/3 4/9 0], + [7/24 1/4 1/3 1/8 + 2/9 1/3 4/9 0], + [0, 1//2, 3//4, 1] + ) + +# Fehlberg https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta%E2%80%93Fehlberg_method +tableaus_rk_explicit[:rk45] = + TableauRKExplicit{Rational{Int64}}("Fehlberg",(4,5), + [ 0 0 0 0 0 0 + 1//4 0 0 0 0 0 + 3//32 9//32 0 0 0 0 + 1932//2197 -7200//2197 7296//2197 0 0 0 + 439//216 -8 3680//513 -845//4104 0 0 + -8//27 2 -3544//2565 1859//4104 -11//40 0 ], +[25//216 0 1408//2565 2197//4104 -1//5 0 + 16//135 0 6656//12825 28561//56430 -9//50 2//55], +[0, 1//4, 3//8, 12//13, 1, 1//2]) + +# Dormand-Prince https://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method +tableaus_rk_explicit[:dopri5] = + TableauRKExplicit{Rational{Int64}}("Dormand-Prince", (5,4), + [0 0 0 0 0 0 0 + 1//5 0 0 0 0 0 0 + 3//40 9//40 0 0 0 0 0 + 44//45 -56//15 32//9 0 0 0 0 + 19372//6561 -25360//2187 64448//6561 -212//729 0 0 0 + 9017//3168 -355//33 46732//5247 49//176 -5103//18656 0 0 + 35//384 0 500//1113 125//192 -2187//6784 11//84 0], + [35//384 0 500//1113 125//192 -2187//6784 11//84 0 + 5179//57600 0 7571//16695 393//640 -92097//339200 187//2100 1//40], + [0, 1//5, 3//10, 4//5, 8//9, 1, 1] + ) + +# Fehlberg 7(8) coefficients +# Values from pag. 65, Fehlberg, Erwin. "Classical fifth-, sixth-, seventh-, and eighth-order Runge-Kutta formulas with stepsize control". +# National Aeronautics and Space Administration. +tableaus_rk_explicit[:feh78] = + TableauRKExplicit{Rational{Int64}}("Fehlberg(7,8)", (7,8), + [ 0 0 0 0 0 0 0 0 0 0 0 0 0 + 2//27 0 0 0 0 0 0 0 0 0 0 0 0 + 1//36 1//12 0 0 0 0 0 0 0 0 0 0 0 + 1//24 0 1//8 0 0 0 0 0 0 0 0 0 0 + 5//12 0 -25//16 25//16 0 0 0 0 0 0 0 0 0 + 1//20 0 0 1//4 1//5 0 0 0 0 0 0 0 0 + -25//108 0 0 125//108 -65//27 125//54 0 0 0 0 0 0 0 + 31//300 0 0 0 61//225 -2//9 13//900 0 0 0 0 0 0 + 2 0 0 -53//6 704//45 -107//9 67//90 3 0 0 0 0 0 + -91//108 0 0 23//108 -976//135 311//54 -19//60 17//6 -1//12 0 0 0 0 + 2383//4100 0 0 -341//164 4496//1025 -301//82 2133//4100 45//82 45//164 18//41 0 0 0 + 3//205 0 0 0 0 -6//41 -3//205 -3//41 3//41 6//41 0 0 0 + -1777//4100 0 0 -341//164 4496//1025 -289//82 2193//4100 51//82 33//164 12//41 0 1 0], + [41//840 0 0 0 0 34//105 9//35 9//35 9//280 9//280 41//840 0 0 + 0 0 0 0 0 34//105 9//35 9//35 9//280 9//280 0 41//840 41//840], + [0, 2//27, 1//9, 1//6 , 5//12, 1//2 , 5//6 , 1//6 , 2//3 , 1//3 , 1 , 0, 1] + ) From 29cfd4a2718d6cc729b741c5d5b7593fd6d50e59 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 09:04:30 +0200 Subject: [PATCH 070/113] starting on rename --- examples/ex_iter.jl | 6 +++--- examples/test.jl | 6 +++--- src/dense.jl | 16 ++++++++-------- src/interfaces.jl | 24 ++++++++++++------------ src/ode23s.jl | 26 +++++++++++++------------- src/runge-kutta.jl | 31 +++++++++++++++---------------- src/types.jl | 38 ++++++++++++++++++-------------------- test/iterators.jl | 22 +++++++++++----------- 8 files changed, 83 insertions(+), 86 deletions(-) diff --git a/examples/ex_iter.jl b/examples/ex_iter.jl index d0768af67..61b929b52 100644 --- a/examples/ex_iter.jl +++ b/examples/ex_iter.jl @@ -11,10 +11,10 @@ opts = Dict(:initstep=>0.1, :reltol=>1e-5, :abstol=>1e-5) # pick your solver -stepper = [ODE.RKStepperAdaptive{:rk45}, - ODE.ModifiedRosenbrockStepper][2] +stepper = [ODE.RKIntegratorAdaptive{:rk45}, + ODE.ModifiedRosenbrockIntegrator][2] -# create a Solver instance +# create a Problem instance sol = ODE.solve(ode,stepper;opts...) # iterate over the solution diff --git a/examples/test.jl b/examples/test.jl index ed1aea44c..ce7fc706b 100644 --- a/examples/test.jl +++ b/examples/test.jl @@ -9,9 +9,9 @@ Y = Vector{T} t0 = zero(T) y0 = T[one(T)] -steppers = [# ODE.RKStepperAdaptive{:rk45}, - # ODE.RKStepperFixed{:feuler}, - ODE.DenseStepper] +steppers = [# ODE.RKIntegratorAdaptive{:rk45}, + # ODE.RKIntegratorFixed{:feuler}, + ODE.DenseOutput] for st in steppers ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) diff --git a/src/dense.jl b/src/dense.jl index 84200f1db..ef63cc896 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -33,24 +33,24 @@ end """ A stepper specialized in dense output. It wraps around another -`Solver` and stores the subsequent steps generated by `Solver` and +`Problem` and stores the subsequent steps generated by `Problem` and interpolates the results on request (currently this means at the output times stored in `options.tout`). """ -immutable DenseStepper{S<:Solver,O<:DenseOptions} <: AbstractStepper +immutable DenseOutput{S<:Problem,O<:DenseOptions} <: AbstractIntegrator #TODO: <: is needed at the moment after all. Remove solver::S options::O end -function solve{T,S<:DenseStepper}(ode::ExplicitODE{T}, +function solve{T,S<:DenseOutput}(ode::ExplicitODE{T}, ::Type{S}; - method = RKStepperAdaptive{:rk45}, + method = RKIntegratorAdaptive{:rk45}, options...) - sol_orig = Solver(ode,method{T}(; options...)) + sol_orig = Problem(ode,method{T}(; options...)) dense_options = DenseOptions{T}(; options...) dense_stepper = S(sol_orig,dense_options) - return Solver(ode,dense_stepper) + return Problem(ode,dense_stepper) # TODO: this is where it is needed. end """ @@ -68,7 +68,7 @@ end output(ds::DenseState) = output(ds.step_out) function init(ode::ExplicitODE, - stepper::DenseStepper) + stepper::DenseOutput) ode = stepper.solver.ode solver_state = init(stepper.solver.ode, stepper.solver.stepper) dy0 = similar(ode.y0) @@ -89,7 +89,7 @@ wouldn't. """ function onestep!(ode::ExplicitODE, - stepper::DenseStepper, + stepper::DenseOutput, state::DenseState) i = state.tout_i if i > length(stepper.options.tout) diff --git a/src/interfaces.jl b/src/interfaces.jl index ec2bc810d..f17a83581 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -5,7 +5,7 @@ tspan[end] is the last integration time. """ -function ode{T,Y,S<:AbstractStepper}(F, y0::Y, +function ode{T,Y,S<:AbstractIntegrator}(F, y0::Y, tout::AbstractVector{T}, stepper::Type{S}; points = :all, @@ -20,7 +20,7 @@ function ode{T,Y,S<:AbstractStepper}(F, y0::Y, tout = tout, kargs...) elseif points == :specified - solver = solve(equation, DenseStepper; + solver = solve(equation, DenseOutput; mehtod = stepper, tout = tout, kargs...) @@ -47,17 +47,17 @@ end Solves an ODE `y'=F(t,y)` with initial conditions `y0` and `t0`. """ -ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockStepper; kargs...) -ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:feuler}; kargs...) -ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:midpoint}; kargs...) -ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:heun}; kargs...) -ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperFixed{:rk4}; kargs...) -ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk21}; kargs...) -ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk23}; kargs...) -ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:rk45}; kargs...) -ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:dopri5}; kargs...) +ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockIntegrator; kargs...) +ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:feuler}; kargs...) +ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:midpoint}; kargs...) +ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:heun}; kargs...) +ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:rk4}; kargs...) +ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:rk21}; kargs...) +ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:rk23}; kargs...) +ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:rk45}; kargs...) +ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:dopri5}; kargs...) const ode45 = ode45_dp -ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKStepperAdaptive{:feh78}; kargs...) +ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:feh78}; kargs...) function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) diff --git a/src/ode23s.jl b/src/ode23s.jl index cdbb99209..67feca3f9 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -3,27 +3,27 @@ # # [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 -immutable ModifiedRosenbrockStepper{T<:Number} <: AbstractStepper +immutable ModifiedRosenbrockIntegrator{T<:Number} <: AbstractIntegrator options::AdaptiveOptions{T} const_d::T const_e::T end -@compat function (::Type{ModifiedRosenbrockStepper{T}}){T}(;options...) +@compat function (::Type{ModifiedRosenbrockIntegrator{T}}){T}(;options...) const_d = 1/(2+sqrt(T(2))) const_e = 6+sqrt(T(2)) - ModifiedRosenbrockStepper( AdaptiveOptions{T}(;options...), const_d, const_e ) + ModifiedRosenbrockIntegrator( AdaptiveOptions{T}(;options...), const_d, const_e ) end -order(::ModifiedRosenbrockStepper) = 2 -name(::ModifiedRosenbrockStepper) = "Modified Rosenbrock Stepper" -isadaptive(::ModifiedRosenbrockStepper) = true -tdir(ode::ExplicitODE, stepper::ModifiedRosenbrockStepper) = sign(stepper.options.tstop - ode.t0) +order(::ModifiedRosenbrockIntegrator) = 2 +name(::ModifiedRosenbrockIntegrator) = "Modified Rosenbrock Integrator" +isadaptive(::ModifiedRosenbrockIntegrator) = true +tdir(ode::ExplicitODE, stepper::ModifiedRosenbrockIntegrator) = sign(stepper.options.tstop - ode.t0) # define the set of ODE problems with which this stepper can work -solve{T,S<:ModifiedRosenbrockStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = - Solver(ode,stepper{T}(;options...)) +solve{T,S<:ModifiedRosenbrockIntegrator}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = + Problem(ode,stepper{T}(;options...)) """ The state for the Rosenbrock stepper @@ -60,7 +60,7 @@ end function init{T}(ode::ExplicitODE{T}, - stepper::ModifiedRosenbrockStepper) + stepper::ModifiedRosenbrockIntegrator) t = ode.t0 dt = stepper.options.initstep y = ode.y0 @@ -90,7 +90,7 @@ end function trialstep!(ode::ExplicitODE, - stepper::ModifiedRosenbrockStepper, + stepper::ModifiedRosenbrockIntegrator, state::RosenbrockState) # unpack step = state.step @@ -138,7 +138,7 @@ function trialstep!(ode::ExplicitODE, end function errorcontrol!(ode::ExplicitODE, - stepper::ModifiedRosenbrockStepper, + stepper::ModifiedRosenbrockIntegrator, state::RosenbrockState) step = state.step @@ -167,7 +167,7 @@ function errorcontrol!(ode::ExplicitODE, end function accept!(ode::ExplicitODE, - stepper::ModifiedRosenbrockStepper, + stepper::ModifiedRosenbrockIntegrator, state::RosenbrockState) step = state.step # update the state diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 795b2ec83..9973f54e7 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -40,24 +40,23 @@ isadaptive(b::TableauRKExplicit) = size(b.b, 1)==2 ####################### # Solver implementation ####################### - """ A general Runge-Kutta stepper (it can represent either, a fixed step or an adaptive step algorithm). """ -immutable RKStepper{Kind,Name,T,O<:Options} <: AbstractStepper{T} +immutable RKIntegrator{Kind,Name,T,O<:Options} <: AbstractIntegrator{T} tableau::TableauRKExplicit{T} options::O end -typealias RKStepperFixed RKStepper{:fixed} -typealias RKStepperAdaptive RKStepper{:adaptive} +typealias RKIntegratorFixed RKIntegrator{:fixed} +typealias RKIntegratorAdaptive RKIntegrator{:adaptive} -@compat function (::Type{RKStepper{Kind,Name,T}}){Kind,Name,T}(;options...) +@compat function (::Type{RKIntegrator{Kind,Name,T}}){Kind,Name,T}(;options...) tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) if Kind == :fixed opts = FixedOptions{T}(;options...) @@ -70,18 +69,18 @@ typealias RKStepperAdaptive RKStepper{:adaptive} error("Cannot construct an adaptive step method from an fixed step tableau") end end - RKStepper{Kind,Name,T,typeof(opts)}(tab,opts) + RKIntegrator{Kind,Name,T,typeof(opts)}(tab,opts) end -order(stepper::RKStepper) = minimum(order(stepper.tableau)) +order(stepper::RKIntegrator) = minimum(order(stepper.tableau)) -name(stepper::RKStepper) = stepper.tableau.name +name(stepper::RKIntegrator) = stepper.tableau.name -tdir(ode::ExplicitODE, stepper::RKStepper) = sign(stepper.options.tstop - ode.t0) +tdir(ode::ExplicitODE, stepper::RKIntegrator) = sign(stepper.options.tstop - ode.t0) -solve{T,S<:RKStepper}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = - Solver(ode,stepper{T}(;options...)) +solve{T,S<:RKIntegrator}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = + Problem(ode,stepper{T}(;options...)) # lower level interface @@ -123,7 +122,7 @@ function show(io::IO, state::RKState) end -function init(ode::ExplicitODE,stepper::RKStepper) +function init(ode::ExplicitODE,stepper::RKIntegrator) t0, dt0, y0 = ode.t0, stepper.options.initstep, ode.y0 # clip the dt0 if t0+dt0 exceeds tstop @@ -155,7 +154,7 @@ end ##################### -function onestep!(ode::ExplicitODE, stepper::RKStepperFixed, state::RKState) +function onestep!(ode::ExplicitODE, stepper::RKIntegratorFixed, state::RKState) step = state.step work = state.work @@ -194,7 +193,7 @@ const timeout_const = 5 # `trialstep!` ends with a step computed for the stepsize `state.dt` # and stores it in `work.y`, so `work.y` contains a candidate for # `y(t+dt)` with `dt=state.dt`. -function trialstep!(ode::ExplicitODE, stepper::RKStepperAdaptive, state::RKState) +function trialstep!(ode::ExplicitODE, stepper::RKIntegratorAdaptive, state::RKState) work = state.work step = state.step tableau = stepper.tableau @@ -226,7 +225,7 @@ end # computes the error for the candidate solution `y(t+dt)` with # `dt=state.dt` and proposes a new time step function errorcontrol!(ode::ExplicitODE, - stepper::RKStepperAdaptive, + stepper::RKIntegratorAdaptive, state::RKState) work = state.work step = state.step @@ -254,7 +253,7 @@ end # called, that is `work.y` holds `y(t+dt)` with `dt=state.dt`, and # error was small enough for us to keep `y(t+dt)` as the next step. function accept!(ode::ExplicitODE, - stepper::RKStepperAdaptive, + stepper::RKIntegratorAdaptive, state::RKState) work = state.work step = state.step diff --git a/src/types.jl b/src/types.jl index 25f7a2eab..238b470dd 100644 --- a/src/types.jl +++ b/src/types.jl @@ -1,7 +1,7 @@ # The main types: # - IVP -- holds the mathematical aspects of a IVP -# - AbstractStepper -- an integrator/solver (maybe AbstractIntegrator?) -# - Solver -- holds IVP + Stepper (maybe ProblemSpec, Problem, Spec?) +# - AbstractIntegrator -- an integrator/solver (maybe AbstractIntegrator?) +# - Problem -- holds IVP + Integrator (maybe ProblemSpec, Problem, Spec?) # - AbstractState -- holds the iterator state # - Step -- holds the state at one time # - @@ -94,13 +94,13 @@ end The abstract type of the actual algorithm to solve an ODE. """ -abstract AbstractStepper{T} +abstract AbstractIntegrator{T} """ AbstractState keeps the temporary data (state) for the iterator -Solver{::AbstractStepper}. +Problem{::AbstractIntegrator}. """ abstract AbstractState{T,Y} @@ -121,7 +121,7 @@ output(st::AbstractState) = st.step.t, st.step.y, st.step.dy # (or something else consistent throughout, maybe nicer would be all # uppercase: ET, EFY, TT, TY). # - if find `Step` a bit confusing name, in particular combined with -# AbstractStepper, but not sure what's better. +# AbstractIntegrator, but not sure what's better. """ @@ -154,15 +154,13 @@ of a numerical solution to an ODE. """ -immutable Solver{O<:AbstractIVP,S<:AbstractStepper} +immutable Problem{O<:AbstractIVP,S<:AbstractIntegrator} ode ::O stepper ::S end -#m3: -# - calling this `Solver` still trips me up -Base.eltype{O}(::Type{Solver{O}}) = eltype(O) -Base.eltype{O}(::Solver{O}) = eltype(O) +Base.eltype{O}(::Type{Problem{O}}) = eltype(O) +Base.eltype{O}(::Problem{O}) = eltype(O) # filter the wrong combinations of ode and stepper solve{O,S}(ode::O, stepper::Type{S}, options...) = @@ -170,7 +168,7 @@ solve{O,S}(ode::O, stepper::Type{S}, options...) = # In Julia 0.5 the collect needs length to be defined, we cannot do # that for a solver but we can implement our own collect -function collect(s::Solver) +function collect(s::Problem) T,Y = eltype(s) pairs = Array(Tuple{T,Y},0) for (t,y) in s @@ -197,11 +195,11 @@ end # TODO: this implementation fails to return the zeroth step (t0,y0) # # TODO: store the current Step outside of the actual state -# Base.start(sol::Solver) = (init(sol), Step(ode.sol)) +# Base.start(sol::Problem) = (init(sol), Step(ode.sol)) -Base.start(sol::Solver) = init(sol.ode,sol.stepper) +Base.start(sol::Problem) = init(sol.ode,sol.stepper) -function Base.done(s::Solver, st) +function Base.done(s::Problem, st) # Determine whether the next step can be made by calling the # stepping routine. onestep! will take the step in-place. status = onestep!(s.ode, s.stepper, st) @@ -218,7 +216,7 @@ function Base.done(s::Solver, st) end end -function Base.next(sol::Solver, st) +function Base.next(sol::Problem, st) # Output the step (we know that `done` allowed it, so we are safe # to do it) return output(st), st @@ -274,7 +272,7 @@ to implement the sub-step functions `trialstep!`, `errorcontrol!` and Input: -- sol::Solver, state::AbstractState +- sol::Problem, state::AbstractState Output: @@ -282,7 +280,7 @@ Output: substeps. """ -function onestep!(ode::IVP, stepper::AbstractStepper, state::AbstractState) +function onestep!(ode::IVP, stepper::AbstractIntegrator, state::AbstractState) opt = stepper.options while true status = trialstep!(ode, stepper, state) @@ -325,7 +323,7 @@ compute the magnitude of its error. If the error is small enough Returns `Status`. """ -trialstep!{S<:AbstractStepper}(::IVP, ::S, ::AbstractState) = +trialstep!{S<:AbstractIntegrator}(::IVP, ::S, ::AbstractState) = error("Function `trialstep!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") """ @@ -340,7 +338,7 @@ If the `status==abort` then the integration is aborted, status values of `cont` and `finish` are ignored. """ -errorcontrol!{S<:AbstractStepper}(::IVP, ::S, ::AbstractState) = +errorcontrol!{S<:AbstractIntegrator}(::IVP, ::S, ::AbstractState) = error("Function `errorcontrol!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") """ @@ -351,5 +349,5 @@ a small enough error. Returns `Status`. """ -accept!{S<:AbstractStepper}(::IVP, ::S, ::AbstractState) = +accept!{S<:AbstractIntegrator}(::IVP, ::S, ::AbstractState) = error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") diff --git a/test/iterators.jl b/test/iterators.jl index aa5a199ea..f2f389833 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -48,16 +48,16 @@ const testsets = [ # Testing function ode -const steppers = [ODE.RKStepperFixed{:feuler}, - ODE.RKStepperFixed{:midpoint}, - ODE.RKStepperFixed{:heun}, - ODE.RKStepperFixed{:rk4}, - ODE.RKStepperAdaptive{:rk21}, - ODE.RKStepperAdaptive{:rk23}, - ODE.RKStepperAdaptive{:rk45}, - ODE.RKStepperAdaptive{:dopri5}, - ODE.RKStepperAdaptive{:feh78}, - ODE.ModifiedRosenbrockStepper +const steppers = [ODE.RKIntegratorFixed{:feuler}, + ODE.RKIntegratorFixed{:midpoint}, + ODE.RKIntegratorFixed{:heun}, + ODE.RKIntegratorFixed{:rk4}, + ODE.RKIntegratorAdaptive{:rk21}, + ODE.RKIntegratorAdaptive{:rk23}, + ODE.RKIntegratorAdaptive{:rk45}, + ODE.RKIntegratorAdaptive{:dopri5}, + ODE.RKIntegratorAdaptive{:feh78}, + ODE.ModifiedRosenbrockIntegrator ] function test_ode() @@ -126,7 +126,7 @@ function test_ode() @test_approx_eq_eps y sol(t) tol end - for (t,y) in ODE.solve(equation,ODE.DenseStepper; + for (t,y) in ODE.solve(equation,ODE.DenseOutput; method = stepper, opts...) @test_approx_eq_eps y sol(t) tol end From 880878e3cd248cb188a230063d008ba1167712ed Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 12:33:54 +0200 Subject: [PATCH 071/113] ode->ivp --- src/ODE.jl | 18 +++++++++++++-- src/dense.jl | 55 +++++++++++++++++++++++----------------------- src/ode23s.jl | 30 ++++++++++++------------- src/runge-kutta.jl | 42 +++++++++++++++++------------------ src/types.jl | 42 +++++++++++++++++++++-------------- 5 files changed, 105 insertions(+), 82 deletions(-) diff --git a/src/ODE.jl b/src/ODE.jl index 408db3eff..3582ca3b9 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -1,5 +1,20 @@ # Ordinary Differential Equation Solvers +""" +Coding conventions: + +- use t::T, y::Y, dy::Y +- p::Problem, ::P +- ivp::IVP, ::O +- integrator::AbstractIntegrator, ::I +- opts::AbstactOptions + +Variables and Type variables: +- T -> t::T +- Y -> y::Y TODO: or Vector{Y}? + + +""" module ODE using Polynomials @@ -7,8 +22,7 @@ using Compat import Compat.String using ForwardDiff -import Base.convert, Base.show -import Base: start, next, done, call, collect +import Base: start, next, done, collect, show, convert # Core infrastructure # diff --git a/src/dense.jl b/src/dense.jl index ef63cc896..9475c1ff5 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -38,19 +38,20 @@ interpolates the results on request (currently this means at the output times stored in `options.tout`). """ -immutable DenseOutput{S<:Problem,O<:DenseOptions} <: AbstractIntegrator #TODO: <: is needed at the moment after all. Remove - solver::S - options::O +immutable DenseOutput{P<:Problem,OP<:DenseOptions} <: AbstractSolver + prob::P + options::OP end -function solve{T,S<:DenseOutput}(ode::ExplicitODE{T}, - ::Type{S}; - method = RKIntegratorAdaptive{:rk45}, - options...) - sol_orig = Problem(ode,method{T}(; options...)) +function solve{S<:DenseOutput}(ivp::IVP, + ::Type{S}; + method = RKIntegratorAdaptive{:rk45}, + options...) + T = eltype(ivp)[1] + sol_orig = Problem(ivp,method{T}(; options...)) dense_options = DenseOptions{T}(; options...) dense_stepper = S(sol_orig,dense_options) - return Problem(ode,dense_stepper) # TODO: this is where it is needed. + return Problem(ivp,dense_stepper) # TODO: this is where it is needed. end """ @@ -62,20 +63,20 @@ type DenseState{St<:AbstractState,T,Y} <: AbstractState{T,Y} tout_i::Int step_prev::Step{T,Y} step_out::Step{T,Y} - solver_state::St + integrator_state::St end output(ds::DenseState) = output(ds.step_out) -function init(ode::ExplicitODE, +function init(ivp::IVP, stepper::DenseOutput) - ode = stepper.solver.ode - solver_state = init(stepper.solver.ode, stepper.solver.stepper) - dy0 = similar(ode.y0) - ode.F!(ode.t0,ode.y0,dy0) - step_prev = Step(ode.t0,copy(ode.y0),dy0) - step_out = Step(ode.t0,similar(ode.y0),similar(ode.y0)) - return DenseState(1,step_prev,step_out,solver_state) + ivp = stepper.prob.ivp + integrator_state = init(stepper.prob.ivp, stepper.prob.stepper) + dy0 = similar(ivp.y0) + ivp.F!(ivp.t0,ivp.y0,dy0) + step_prev = Step(ivp.t0,copy(ivp.y0),dy0) + step_out = Step(ivp.t0,similar(ivp.y0),similar(ivp.y0)) + return DenseState(1,step_prev,step_out,integrator_state) end @@ -88,7 +89,7 @@ wouldn't. """ -function onestep!(ode::ExplicitODE, +function onestep!(ivp::IVP, stepper::DenseOutput, state::DenseState) i = state.tout_i @@ -99,13 +100,13 @@ function onestep!(ode::ExplicitODE, # our next output time ti = stepper.options.tout[i] - sol = stepper.solver # this looks weird - sol_state = state.solver_state + sol = stepper.prob # this looks weird + sol_state = state.integrator_state # try to get a new set of steps enclosing `ti`, if all goes # right we end up with t∈[t1,t2] with # t1,_=output(state.step_prev) - # t2,_=output(state.solver_state) + # t2,_=output(state.integrator_state) status = next_interval!(sol,sol_state,state.step_prev,ti) if status == abort # we failed to get enough steps @@ -116,7 +117,7 @@ function onestep!(ode::ExplicitODE, # the state.step_out with y(ti) and y'(ti) according to an # interpolation algorithm specific for a method (defaults to # hermite O(3)). - interpolate!(state.solver_state,state.step_prev,ti,state.step_out) + interpolate!(state.integrator_state,state.step_prev,ti,state.step_out) # increase the counter state.tout_i += 1 @@ -126,7 +127,7 @@ end """ -Pulls the results from the (solver,state) pair using `onestep!` until +Pulls the results from the (prob,state) pair using `onestep!` until we reach a first step such that `t>=tout`. It fills the `steps` variable with (Step(t1,y(t1),dy(t1)),Step(t2,y(t2),dy(t2))), where `t1` is is the step before `tout` and `t2` is `>=tout`. In @@ -135,7 +136,7 @@ other words `tout∈[t1,t2]`. TODO: tdir """ -function next_interval!(solver,state,step_prev,tout) +function next_interval!(prob,state,step_prev,tout) while true # get the current time @@ -153,8 +154,8 @@ function next_interval!(solver,state,step_prev,tout) copy!(step_prev.y,y) copy!(step_prev.dy,dy) - # try to perform a single step with the solver - status = onestep!(solver.ode, solver.stepper, state) + # try to perform a single step with the prob + status = onestep!(prob.ivp, prob.stepper, state) if status != cont return status diff --git a/src/ode23s.jl b/src/ode23s.jl index 67feca3f9..20fd317f4 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -19,11 +19,11 @@ end order(::ModifiedRosenbrockIntegrator) = 2 name(::ModifiedRosenbrockIntegrator) = "Modified Rosenbrock Integrator" isadaptive(::ModifiedRosenbrockIntegrator) = true -tdir(ode::ExplicitODE, stepper::ModifiedRosenbrockIntegrator) = sign(stepper.options.tstop - ode.t0) +tdir(ivp::ExplicitODE, stepper::ModifiedRosenbrockIntegrator) = sign(stepper.options.tstop - ivp.t0) # define the set of ODE problems with which this stepper can work -solve{T,S<:ModifiedRosenbrockIntegrator}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = - Problem(ode,stepper{T}(;options...)) +solve{T,S<:ModifiedRosenbrockIntegrator}(ivp::ExplicitODE{T}, stepper::Type{S}; options...) = + Problem(ivp,stepper{T}(;options...)) """ The state for the Rosenbrock stepper @@ -59,11 +59,11 @@ function show(io::IO, state::RosenbrockState) end -function init{T}(ode::ExplicitODE{T}, +function init{T}(ivp::ExplicitODE{T}, stepper::ModifiedRosenbrockIntegrator) - t = ode.t0 + t = ivp.t0 dt = stepper.options.initstep - y = ode.y0 + y = ivp.y0 dy = zero(y) J = Array(eltype(y),length(y),length(y)) @@ -82,14 +82,14 @@ function init{T}(ode::ExplicitODE{T}, 0) # iters # initialize the derivative and the Jacobian - ode.F!(t,y,step.dy) - ode.J!(t,y,state.J) + ivp.F!(t,y,step.dy) + ivp.J!(t,y,state.J) return state end -function trialstep!(ode::ExplicitODE, +function trialstep!(ivp::ExplicitODE, stepper::ModifiedRosenbrockIntegrator, state::RosenbrockState) # unpack @@ -98,10 +98,10 @@ function trialstep!(ode::ExplicitODE, F1, F2, J = state.F1, state.F2, state.J k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy - F! = ode.F! + F! = ivp.F! F0 = dy - td = tdir(ode,stepper) + td = tdir(ivp,stepper) # see whether we're done if td*t >= td*opts.tstop @@ -137,7 +137,7 @@ function trialstep!(ode::ExplicitODE, return cont end -function errorcontrol!(ode::ExplicitODE, +function errorcontrol!(ivp::ExplicitODE, stepper::ModifiedRosenbrockIntegrator, state::RosenbrockState) @@ -147,7 +147,7 @@ function errorcontrol!(ode::ExplicitODE, k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy - td = tdir(ode,stepper) + td = tdir(ivp,stepper) # allowable error delta = max(opts.reltol*max(opts.norm(y), opts.norm(ynew)),opts.abstol) @@ -166,7 +166,7 @@ function errorcontrol!(ode::ExplicitODE, return err, cont end -function accept!(ode::ExplicitODE, +function accept!(ivp::ExplicitODE, stepper::ModifiedRosenbrockIntegrator, state::RosenbrockState) step = state.step @@ -174,7 +174,7 @@ function accept!(ode::ExplicitODE, step.t = step.t+state.dtold copy!(step.y, state.ynew) copy!(step.dy, state.F2) - ode.J!(step.t,step.y,state.J) + ivp.J!(step.t,step.y,state.J) return cont end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index 9973f54e7..b91a2ff53 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -77,10 +77,10 @@ order(stepper::RKIntegrator) = minimum(order(stepper.tableau)) name(stepper::RKIntegrator) = stepper.tableau.name -tdir(ode::ExplicitODE, stepper::RKIntegrator) = sign(stepper.options.tstop - ode.t0) +tdir(ivp::ExplicitODE, stepper::RKIntegrator) = sign(stepper.options.tstop - ivp.t0) -solve{T,S<:RKIntegrator}(ode::ExplicitODE{T}, stepper::Type{S}; options...) = - Problem(ode,stepper{T}(;options...)) +solve{T,S<:RKIntegrator}(ivp::ExplicitODE{T}, stepper::Type{S}; options...) = + Problem(ivp,stepper{T}(;options...)) # lower level interface @@ -122,11 +122,11 @@ function show(io::IO, state::RKState) end -function init(ode::ExplicitODE,stepper::RKIntegrator) - t0, dt0, y0 = ode.t0, stepper.options.initstep, ode.y0 +function init(ivp::ExplicitODE,stepper::RKIntegrator) + t0, dt0, y0 = ivp.t0, stepper.options.initstep, ivp.y0 # clip the dt0 if t0+dt0 exceeds tstop - dt0 = tdir(ode,stepper)*min(abs(dt0),abs(stepper.options.tstop-t0)) + dt0 = tdir(ivp,stepper)*min(abs(dt0),abs(stepper.options.tstop-t0)) lk = lengthks(stepper.tableau) work = RKWorkArrays(zero(y0), # y @@ -140,7 +140,7 @@ function init(ode::ExplicitODE,stepper::RKIntegrator) end # pre-initialize work.ks[1] - ode.F!(t0,y0,work.ks[1]) + ivp.F!(t0,y0,work.ks[1]) step = Step(t0,copy(y0),copy(work.ks[1])) @@ -154,11 +154,11 @@ end ##################### -function onestep!(ode::ExplicitODE, stepper::RKIntegratorFixed, state::RKState) +function onestep!(ivp::ExplicitODE, stepper::RKIntegratorFixed, state::RKState) step = state.step work = state.work - td = tdir(ode,stepper) + td = tdir(ivp,stepper) if td*step.t >= td*stepper.options.tstop # nothing left to integrate @@ -172,7 +172,7 @@ function onestep!(ode::ExplicitODE, stepper::RKIntegratorFixed, state::RKState) copy!(work.ynew,step.y) for k=1:length(b) - calc_next_k!(work, k, ode, stepper.tableau, step, dt) + calc_next_k!(work, k, ivp, stepper.tableau, step, dt) for d=1:dof work.ynew[d] += dt * b[k]*work.ks[k][d] end @@ -193,13 +193,13 @@ const timeout_const = 5 # `trialstep!` ends with a step computed for the stepsize `state.dt` # and stores it in `work.y`, so `work.y` contains a candidate for # `y(t+dt)` with `dt=state.dt`. -function trialstep!(ode::ExplicitODE, stepper::RKIntegratorAdaptive, state::RKState) +function trialstep!(ivp::ExplicitODE, stepper::RKIntegratorAdaptive, state::RKState) work = state.work step = state.step tableau = stepper.tableau options = stepper.options - td = tdir(ode,stepper) + td = tdir(ivp,stepper) # use the proposed step size to perform the computations state.dt = state.newdt @@ -217,14 +217,14 @@ function trialstep!(ode::ExplicitODE, stepper::RKIntegratorAdaptive, state::RKSt end # work.y and work.yerr and work.ks are updated after this step - rk_embedded_step!(work, ode, tableau, step, dt) + rk_embedded_step!(work, ivp, tableau, step, dt) return cont end # computes the error for the candidate solution `y(t+dt)` with # `dt=state.dt` and proposes a new time step -function errorcontrol!(ode::ExplicitODE, +function errorcontrol!(ivp::ExplicitODE, stepper::RKIntegratorAdaptive, state::RKState) work = state.work @@ -235,7 +235,7 @@ function errorcontrol!(ode::ExplicitODE, err, state.newdt, state.timeout = stepsize_hw92!(work, step, tableau, state.dt, state.timeout, options) - td = tdir(ode,stepper) + td = tdir(ivp,stepper) # trim in case newdt > dt state.newdt = td*min(abs(state.newdt), abs(options.tstop-(state.step.t+state.dt))) @@ -252,7 +252,7 @@ end # Here we assume that trialstep! and errorcontrol! have already been # called, that is `work.y` holds `y(t+dt)` with `dt=state.dt`, and # error was small enough for us to keep `y(t+dt)` as the next step. -function accept!(ode::ExplicitODE, +function accept!(ivp::ExplicitODE, stepper::RKIntegratorAdaptive, state::RKState) work = state.work @@ -263,7 +263,7 @@ function accept!(ode::ExplicitODE, if tableau.isFSAL copy!(work.ks[1],work.ks[end]) else - ode.F!(step.t+state.dt, work.ynew, work.ks[1]) + ivp.F!(step.t+state.dt, work.ynew, work.ks[1]) end # Swap bindings of y and ytrial, avoids one copy @@ -280,7 +280,7 @@ end ########################## function rk_embedded_step!(work ::RKWorkArrays, - ode ::ExplicitODE, + ivp ::ExplicitODE, tableau ::Tableau, last_step ::Step, dt) @@ -299,7 +299,7 @@ function rk_embedded_step!(work ::RKWorkArrays, # we skip the first step beacause we assume that work.ks[1] is # already computed if s > 1 - calc_next_k!(work, s, ode, tableau, last_step, dt) + calc_next_k!(work, s, ivp, tableau, last_step, dt) end for d=1:dof work.ynew[d] += b[1,s]*work.ks[s][d] @@ -376,7 +376,7 @@ end # this is the only part of state that can be changed here function calc_next_k!(work ::RKWorkArrays, i ::Int, - ode ::ExplicitODE, + ivp ::ExplicitODE, tableau ::Tableau, last_step ::Step, dt) @@ -389,7 +389,7 @@ function calc_next_k!(work ::RKWorkArrays, work.y[d] += dt * work.ks[j][d] * a[i,j] end end - ode.F!(t + c[i]*dt, work.y, work.ks[i]) + ivp.F!(t + c[i]*dt, work.y, work.ks[i]) return nothing end diff --git a/src/types.jl b/src/types.jl index 238b470dd..8468d49b1 100644 --- a/src/types.jl +++ b/src/types.jl @@ -91,10 +91,18 @@ end """ -The abstract type of the actual algorithm to solve an ODE. +The supertype of anything which can get you to a solution of a IVP. +Subtypes include: `AbstractIntegrator`s but also `DenseOutput` """ -abstract AbstractIntegrator{T} +abstract AbstractSolver + +""" + +The abstract type of the actual algorithm to solve an IVP. + +""" +abstract AbstractIntegrator{T} <: AbstractSolver """ @@ -126,7 +134,7 @@ output(st::AbstractState) = st.step.t, st.step.y, st.step.dy """ Holds a value of a function and its derivative at time t. This is -usually used to store the solution of an ODE at particular times. +usually used to store the solution of an IVP at particular times. """ type Step{T,Y} @@ -147,23 +155,23 @@ end """ This is an iterable type, each call to next(...) produces a next step -of a numerical solution to an ODE. +of a numerical solution to an IVP. -- ode: is the prescrived ode, along with the initial data +- ivp: is the prescrived ivp, along with the initial data - stepper: the algorithm used to produce subsequent steps """ -immutable Problem{O<:AbstractIVP,S<:AbstractIntegrator} - ode ::O +immutable Problem{O<:AbstractIVP,S<:AbstractSolver} + ivp ::O stepper ::S end Base.eltype{O}(::Type{Problem{O}}) = eltype(O) Base.eltype{O}(::Problem{O}) = eltype(O) -# filter the wrong combinations of ode and stepper -solve{O,S}(ode::O, stepper::Type{S}, options...) = +# filter the wrong combinations of ivp and stepper +solve{O,S}(ivp::O, stepper::Type{S}, options...) = error("The $S doesn't support $O") # In Julia 0.5 the collect needs length to be defined, we cannot do @@ -178,7 +186,7 @@ function collect(s::Problem) end -# Iteration: take one step on a ODE/DAE `Problem` +# Iteration: take one step on a IVP `Problem` # # Defines: # start(iter) -> state @@ -195,14 +203,14 @@ end # TODO: this implementation fails to return the zeroth step (t0,y0) # # TODO: store the current Step outside of the actual state -# Base.start(sol::Problem) = (init(sol), Step(ode.sol)) +# Base.start(sol::Problem) = (init(sol), Step(ivp.sol)) -Base.start(sol::Problem) = init(sol.ode,sol.stepper) +Base.start(sol::Problem) = init(sol.ivp,sol.stepper) function Base.done(s::Problem, st) # Determine whether the next step can be made by calling the # stepping routine. onestep! will take the step in-place. - status = onestep!(s.ode, s.stepper, st) + status = onestep!(s.ivp, s.stepper, st) # can't this be a function on a status? if status==cont return false @@ -280,15 +288,15 @@ Output: substeps. """ -function onestep!(ode::IVP, stepper::AbstractIntegrator, state::AbstractState) +function onestep!(ivp::IVP, stepper::AbstractIntegrator, state::AbstractState) opt = stepper.options while true - status = trialstep!(ode, stepper, state) - err, status_err = errorcontrol!(ode, stepper, state) + status = trialstep!(ivp, stepper, state) + err, status_err = errorcontrol!(ivp, stepper, state) status &= status_err if err<=1 # a successful step - status &= accept!(ode, stepper, state) + status &= accept!(ivp, stepper, state) return status elseif status==abort || status==finish return status From 06b3322b9ad34af5e0812aaac2da934fca7efe8c Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 14:03:06 +0200 Subject: [PATCH 072/113] Rest of the renames --- examples/ex_iter.jl | 14 +++--- examples/test.jl | 10 ++-- src/ODE.jl | 11 +++-- src/dense.jl | 68 ++++++++++++++-------------- src/interfaces.jl | 27 ++++++----- src/ode23s.jl | 60 ++++++++++++------------ src/runge-kutta.jl | 108 ++++++++++++++++++++++---------------------- src/types.jl | 62 ++++++++++--------------- test/iterators.jl | 8 ++-- 9 files changed, 178 insertions(+), 190 deletions(-) diff --git a/examples/ex_iter.jl b/examples/ex_iter.jl index 61b929b52..dfd5a8085 100644 --- a/examples/ex_iter.jl +++ b/examples/ex_iter.jl @@ -11,20 +11,20 @@ opts = Dict(:initstep=>0.1, :reltol=>1e-5, :abstol=>1e-5) # pick your solver -stepper = [ODE.RKIntegratorAdaptive{:rk45}, +integ = [ODE.RKIntegratorAdaptive{:rk45}, ODE.ModifiedRosenbrockIntegrator][2] # create a Problem instance -sol = ODE.solve(ode,stepper;opts...) +prob = ODE.solve(ode,integ;opts...) # iterate over the solution println("t, y, err") -for (t,y) in sol +for (t,y) in prob println((t,y[1],abs(y[1]-e.^t))) end # or collect it -println(collect(sol)) +println(collect(prob)) ### Reverse time integration, rest as above t0 = 1.0 @@ -35,11 +35,11 @@ opts = Dict(:initstep=>0.1, :reltol=>1e-5, :abstol=>1e-5) -sol = ODE.solve(ode,stepper;opts...) +prob = ODE.solve(ode,integ;opts...) println("t, y, err") -for (t,y) in sol # iterate over the solution +for (t,y) in prob # iterate over the solution println((t,y[1],abs(y[1]-e.^(t-1)))) end -println(collect(sol)) +println(collect(prob)) diff --git a/examples/test.jl b/examples/test.jl index ce7fc706b..fd078100e 100644 --- a/examples/test.jl +++ b/examples/test.jl @@ -9,11 +9,11 @@ Y = Vector{T} t0 = zero(T) y0 = T[one(T)] -steppers = [# ODE.RKIntegratorAdaptive{:rk45}, +solvers = [# ODE.RKIntegratorAdaptive{:rk45}, # ODE.RKIntegratorFixed{:feuler}, ODE.DenseOutput] -for st in steppers +for st in solvers ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) opts = Dict(:initstep=>0.1, :tspan=>[0.,0.5,1.], @@ -21,14 +21,14 @@ for st in steppers :reltol=>1e-5, :abstol=>1e-5) - sol = ODE.solve(ode,st;opts...) + prob = ODE.solve(ode,st;opts...) println("Raw iterator") - for (t,y) in sol + for (t,y) in prob println((t,y,norm(y-[exp(t)]))) end - println(collect(sol)) + println(collect(prob)) end end diff --git a/src/ODE.jl b/src/ODE.jl index 3582ca3b9..eedcdfd28 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -3,11 +3,12 @@ """ Coding conventions: -- use t::T, y::Y, dy::Y -- p::Problem, ::P -- ivp::IVP, ::O -- integrator::AbstractIntegrator, ::I -- opts::AbstactOptions +- use `t,y,dy`, use type/function parameters `T` and `Y` +- `p::Problem`, use parameter `P` +- `ivp::IVP`, use parameter `O` + - if referring to a ODE or DAE, use `ode` or `dae` instead +- `integ::AbstractIntegrator`, use parameter `I` +- `opts::AbstactOptions`, , use parameter `OP` Variables and Type variables: - T -> t::T diff --git a/src/dense.jl b/src/dense.jl index 9475c1ff5..facbd1c10 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -1,4 +1,4 @@ -# A higher level stepper, defined as a wrapper around another stepper. +# A higher level solver, defined as a wrapper around an integrator. """ @@ -32,31 +32,31 @@ end """ -A stepper specialized in dense output. It wraps around another +A solver specialized in dense output. It wraps around a `Problem` and stores the subsequent steps generated by `Problem` and interpolates the results on request (currently this means at the -output times stored in `options.tout`). +output times stored in `opts.tout`). """ immutable DenseOutput{P<:Problem,OP<:DenseOptions} <: AbstractSolver prob::P - options::OP + opts::OP end function solve{S<:DenseOutput}(ivp::IVP, ::Type{S}; method = RKIntegratorAdaptive{:rk45}, - options...) + opts...) T = eltype(ivp)[1] - sol_orig = Problem(ivp,method{T}(; options...)) - dense_options = DenseOptions{T}(; options...) - dense_stepper = S(sol_orig,dense_options) - return Problem(ivp,dense_stepper) # TODO: this is where it is needed. + sol_orig = Problem(ivp,method{T}(; opts...)) + dense_opts = DenseOptions{T}(; opts...) + dense_solver = S(sol_orig,dense_opts) + return Problem(ivp,dense_solver) # TODO: this is where it is needed. end """ -The state of the dense stepper +The state of the dense solver """ type DenseState{St<:AbstractState,T,Y} <: AbstractState{T,Y} @@ -69,9 +69,9 @@ end output(ds::DenseState) = output(ds.step_out) function init(ivp::IVP, - stepper::DenseOutput) - ivp = stepper.prob.ivp - integrator_state = init(stepper.prob.ivp, stepper.prob.stepper) + solver::DenseOutput) + ivp = solver.prob.ivp + integrator_state = init(solver.prob.ivp, solver.prob.solver) dy0 = similar(ivp.y0) ivp.F!(ivp.t0,ivp.y0,dy0) step_prev = Step(ivp.t0,copy(ivp.y0),dy0) @@ -90,44 +90,44 @@ wouldn't. """ function onestep!(ivp::IVP, - stepper::DenseOutput, - state::DenseState) - i = state.tout_i - if i > length(stepper.options.tout) + solver::DenseOutput, + dstate::DenseState) + i = dstate.tout_i + if i > length(solver.opts.tout) return finish end # our next output time - ti = stepper.options.tout[i] + ti = solver.opts.tout[i] - sol = stepper.prob # this looks weird - sol_state = state.integrator_state + prob = solver.prob # this looks weird + istate = dstate.integrator_state # try to get a new set of steps enclosing `ti`, if all goes # right we end up with t∈[t1,t2] with - # t1,_=output(state.step_prev) - # t2,_=output(state.integrator_state) - status = next_interval!(sol,sol_state,state.step_prev,ti) + # t1,_=output(dstate.step_prev) + # t2,_=output(dstate.integrator_state) + status = next_interval!(prob, istate, dstate.step_prev, ti) if status == abort # we failed to get enough steps warn("Iterator was exhausted before the dense output could produce the output.") return abort else # we got the steps, proceed with the interpolation, this fills - # the state.step_out with y(ti) and y'(ti) according to an + # the dstate.step_out with y(ti) and y'(ti) according to an # interpolation algorithm specific for a method (defaults to # hermite O(3)). - interpolate!(state.integrator_state,state.step_prev,ti,state.step_out) + interpolate!(istate, dstate.step_prev, ti, dstate.step_out) # increase the counter - state.tout_i += 1 + dstate.tout_i += 1 return cont end end """ -Pulls the results from the (prob,state) pair using `onestep!` until +Pulls the results from the `(prob,istate)` pair using `onestep!` until we reach a first step such that `t>=tout`. It fills the `steps` variable with (Step(t1,y(t1),dy(t1)),Step(t2,y(t2),dy(t2))), where `t1` is is the step before `tout` and `t2` is `>=tout`. In @@ -136,12 +136,12 @@ other words `tout∈[t1,t2]`. TODO: tdir """ -function next_interval!(prob,state,step_prev,tout) +function next_interval!(prob, istate, step_prev, tout) while true # get the current time t1 = step_prev.t - t2,_ = output(state) + t2,_ = output(istate) t1, t2 = sort([t1,t2]) if t1 <= tout <= t2 # we found the enclosing times @@ -149,13 +149,13 @@ function next_interval!(prob,state,step_prev,tout) end # save the current state of solution - t, y, dy = output(state) + t, y, dy = output(istate) step_prev.t = t copy!(step_prev.y,y) copy!(step_prev.dy,dy) # try to perform a single step with the prob - status = onestep!(prob.ivp, prob.stepper, state) + status = onestep!(prob.ivp, prob.solver, istate) if status != cont return status @@ -171,7 +171,7 @@ end Make dense output using Hermite interpolation of order O(3). Updates yout in-place. Only needs y and dy at t1 and t2. Input -- state::AbstractState -- state of a stepper at time t2 +- istate::AbstractState -- istate of the integrator at time t2 - step_prev::Step -- solution at time t1 respectively - tout -- time of requested output - yout -- inplace y output @@ -185,9 +185,9 @@ TODO: fill dy TODO: arbitrary order method (change step_prev::Step to step_prevs::Tuple{Step,N}) """ -function interpolate!{T,Y}(state::AbstractState,step_prev::Step{T,Y},tout::T,step_out::Step{T,Y}) +function interpolate!{T,Y}(istate::AbstractState,step_prev::Step{T,Y},tout::T,step_out::Step{T,Y}) t1,y1,dy1 = output(step_prev) - t2,y2,dy2 = output(state) + t2,y2,dy2 = output(istate) if tout==t1 copy!(step_out.y,y1) elseif tout==t2 diff --git a/src/interfaces.jl b/src/interfaces.jl index f17a83581..92df0801e 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -4,26 +4,25 @@ We assume that the initial data y0 is given at tspan[1], and that tspan[end] is the last integration time. """ - -function ode{T,Y,S<:AbstractIntegrator}(F, y0::Y, +function ode{T,Y,I<:AbstractIntegrator}(F, y0::Y, tout::AbstractVector{T}, - stepper::Type{S}; + integ::Type{I}; points = :all, kargs...) t0 = tout[1] - # construct a solver + # construct a Problem equation = explicit_ineff(t0,y0,F;kargs...) if points == :all - solver = solve(equation, stepper; - tout = tout, - kargs...) + prob = solve(equation, integ; + tout = tout, + kargs...) elseif points == :specified - solver = solve(equation, DenseOutput; - mehtod = stepper, - tout = tout, - kargs...) + prob = solve(equation, DenseOutput; + method = integ, + tout = tout, + kargs...) else error("Unsupported points value (should be :all or :specified)") end @@ -33,7 +32,7 @@ function ode{T,Y,S<:AbstractIntegrator}(F, y0::Y, to = Array(T,0) yo = Array(Y,0) - for (t,y) in solver + for (t,y) in prob push!(to,t) push!(yo, extract ? y[1] : copy(y)) end @@ -60,7 +59,7 @@ const ode45 = ode45_dp ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:feh78}; kargs...) -function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) +function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},integ;kargs...) if !isleaftype(T) error("The output times have to be of a concrete type.") @@ -72,7 +71,7 @@ function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},stepper;kargs...) error("The initial data has to be of a concrete type (or an array)") end - ode(F,y0,t0,stepper;kargs...) + ode(F,y0,t0,integ;kargs...) end diff --git a/src/ode23s.jl b/src/ode23s.jl index 20fd317f4..5dfc85bf7 100644 --- a/src/ode23s.jl +++ b/src/ode23s.jl @@ -4,29 +4,29 @@ # [SR97] L.F. Shampine and M.W. Reichelt: "The MATLAB ODE Suite," SIAM Journal on Scientific Computing, Vol. 18, 1997, pp. 1–22 immutable ModifiedRosenbrockIntegrator{T<:Number} <: AbstractIntegrator - options::AdaptiveOptions{T} + opts::AdaptiveOptions{T} const_d::T const_e::T end -@compat function (::Type{ModifiedRosenbrockIntegrator{T}}){T}(;options...) +@compat function (::Type{ModifiedRosenbrockIntegrator{T}}){T}(;opts...) const_d = 1/(2+sqrt(T(2))) const_e = 6+sqrt(T(2)) - ModifiedRosenbrockIntegrator( AdaptiveOptions{T}(;options...), const_d, const_e ) + ModifiedRosenbrockIntegrator( AdaptiveOptions{T}(;opts...), const_d, const_e ) end order(::ModifiedRosenbrockIntegrator) = 2 name(::ModifiedRosenbrockIntegrator) = "Modified Rosenbrock Integrator" isadaptive(::ModifiedRosenbrockIntegrator) = true -tdir(ivp::ExplicitODE, stepper::ModifiedRosenbrockIntegrator) = sign(stepper.options.tstop - ivp.t0) +tdir(ode::ExplicitODE, integ::ModifiedRosenbrockIntegrator) = sign(integ.opts.tstop - ode.t0) -# define the set of ODE problems with which this stepper can work -solve{T,S<:ModifiedRosenbrockIntegrator}(ivp::ExplicitODE{T}, stepper::Type{S}; options...) = - Problem(ivp,stepper{T}(;options...)) +# define the set of ODE problems with which this integrator can work +solve{T,I<:ModifiedRosenbrockIntegrator}(ode::ExplicitODE{T}, integ::Type{I}; opts...) = + Problem(ode, integ{T}(;opts...)) """ -The state for the Rosenbrock stepper +The state for the Rosenbrock integrator - step: Last successful step - F1,F2: Work arrays for storing the intermediate values of y' @@ -59,11 +59,11 @@ function show(io::IO, state::RosenbrockState) end -function init{T}(ivp::ExplicitODE{T}, - stepper::ModifiedRosenbrockIntegrator) - t = ivp.t0 - dt = stepper.options.initstep - y = ivp.y0 +function init{T}(ode::ExplicitODE{T}, + integ::ModifiedRosenbrockIntegrator) + t = ode.t0 + dt = integ.opts.initstep + y = ode.y0 dy = zero(y) J = Array(eltype(y),length(y),length(y)) @@ -82,26 +82,26 @@ function init{T}(ivp::ExplicitODE{T}, 0) # iters # initialize the derivative and the Jacobian - ivp.F!(t,y,step.dy) - ivp.J!(t,y,state.J) + ode.F!(t,y,step.dy) + ode.J!(t,y,state.J) return state end -function trialstep!(ivp::ExplicitODE, - stepper::ModifiedRosenbrockIntegrator, +function trialstep!(ode::ExplicitODE, + integ::ModifiedRosenbrockIntegrator, state::RosenbrockState) # unpack step = state.step - opts = stepper.options + opts = integ.opts F1, F2, J = state.F1, state.F2, state.J k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy - F! = ivp.F! + F! = ode.F! F0 = dy - td = tdir(ivp,stepper) + td = tdir(ode,integ) # see whether we're done if td*t >= td*opts.tstop @@ -116,12 +116,12 @@ function trialstep!(ivp::ExplicitODE, return abort end - W = lufact!( eye(J) - dt*stepper.const_d*J ) + W = lufact!( eye(J) - dt*integ.const_d*J ) # Approximate time-derivative of F, we are using F1 as a # temporary array F!(t+dt/100,y,F1) - tder = 100*stepper.const_d*(F1-F0) + tder = 100*integ.const_d*(F1-F0) # modified Rosenbrock formula # TODO: update k1,k2,k3 in-place @@ -132,22 +132,22 @@ function trialstep!(ivp::ExplicitODE, ynew[i] = y[i] + dt*k2[i] end F!(t+dt, ynew, F2) - k3[:] = W \ (F2 - stepper.const_e*(k2 - F1) - 2*(k1 - F0) + tder ) + k3[:] = W \ (F2 - integ.const_e*(k2 - F1) - 2*(k1 - F0) + tder ) return cont end -function errorcontrol!(ivp::ExplicitODE, - stepper::ModifiedRosenbrockIntegrator, +function errorcontrol!(ode::ExplicitODE, + integ::ModifiedRosenbrockIntegrator, state::RosenbrockState) step = state.step - opts = stepper.options + opts = integ.opts k1,k2,k3 = state.k1, state.k2, state.k3 k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy - td = tdir(ivp,stepper) + td = tdir(ode,integ) # allowable error delta = max(opts.reltol*max(opts.norm(y), opts.norm(ynew)),opts.abstol) @@ -166,15 +166,15 @@ function errorcontrol!(ivp::ExplicitODE, return err, cont end -function accept!(ivp::ExplicitODE, - stepper::ModifiedRosenbrockIntegrator, +function accept!(ode::ExplicitODE, + integ::ModifiedRosenbrockIntegrator, state::RosenbrockState) step = state.step # update the state step.t = step.t+state.dtold copy!(step.y, state.ynew) copy!(step.dy, state.F2) - ivp.J!(step.t,step.y,state.J) + ode.J!(step.t,step.y,state.J) return cont end diff --git a/src/runge-kutta.jl b/src/runge-kutta.jl index b91a2ff53..b0548eba2 100644 --- a/src/runge-kutta.jl +++ b/src/runge-kutta.jl @@ -42,13 +42,13 @@ isadaptive(b::TableauRKExplicit) = size(b.b, 1)==2 ####################### """ -A general Runge-Kutta stepper (it can represent either, a fixed step +A general Runge-Kutta integrator (it can represent either, a fixed step or an adaptive step algorithm). """ -immutable RKIntegrator{Kind,Name,T,O<:Options} <: AbstractIntegrator{T} +immutable RKIntegrator{Kind,Name,T,OP<:Options} <: AbstractIntegrator{T} tableau::TableauRKExplicit{T} - options::O + opts::OP end @@ -56,15 +56,15 @@ typealias RKIntegratorFixed RKIntegrator{:fixed} typealias RKIntegratorAdaptive RKIntegrator{:adaptive} -@compat function (::Type{RKIntegrator{Kind,Name,T}}){Kind,Name,T}(;options...) +@compat function (::Type{RKIntegrator{Kind,Name,T}}){Kind,Name,T}(;opts...) tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) if Kind == :fixed - opts = FixedOptions{T}(;options...) + opts = FixedOptions{T}(;opts...) if isadaptive(tab) error("Cannot construct a fixed step method from an adaptive step tableau") end elseif Kind == :adaptive - opts = AdaptiveOptions{T}(;options...) + opts = AdaptiveOptions{T}(;opts...) if !isadaptive(tab) error("Cannot construct an adaptive step method from an fixed step tableau") end @@ -73,23 +73,23 @@ typealias RKIntegratorAdaptive RKIntegrator{:adaptive} end -order(stepper::RKIntegrator) = minimum(order(stepper.tableau)) +order(integ::RKIntegrator) = minimum(order(integ.tableau)) -name(stepper::RKIntegrator) = stepper.tableau.name +name(integ::RKIntegrator) = integ.tableau.name -tdir(ivp::ExplicitODE, stepper::RKIntegrator) = sign(stepper.options.tstop - ivp.t0) +tdir(ode::ExplicitODE, integ::RKIntegrator) = sign(integ.opts.tstop - ode.t0) -solve{T,S<:RKIntegrator}(ivp::ExplicitODE{T}, stepper::Type{S}; options...) = - Problem(ivp,stepper{T}(;options...)) +solve{T,I<:RKIntegrator}(ode::ExplicitODE{T}, integ::Type{I}; opts...) = + Problem(ode,integ{T}(;opts...)) # lower level interface -# explicit RK stepper +# explicit RK integrator """ Pre allocated arrays to store temporary data. Used only by -Runge-Kutta stepper. +Runge-Kutta integrator. """ type RKWorkArrays{Y} @@ -101,7 +101,7 @@ end """ -State for the Runge-Kutta stepper. +State for the Runge-Kutta integrator. """ type RKState{T,Y} <: AbstractState{T,Y} step ::Step{T,Y} @@ -122,13 +122,13 @@ function show(io::IO, state::RKState) end -function init(ivp::ExplicitODE,stepper::RKIntegrator) - t0, dt0, y0 = ivp.t0, stepper.options.initstep, ivp.y0 +function init(ode::ExplicitODE,integ::RKIntegrator) + t0, dt0, y0 = ode.t0, integ.opts.initstep, ode.y0 # clip the dt0 if t0+dt0 exceeds tstop - dt0 = tdir(ivp,stepper)*min(abs(dt0),abs(stepper.options.tstop-t0)) + dt0 = tdir(ode,integ)*min(abs(dt0),abs(integ.opts.tstop-t0)) - lk = lengthks(stepper.tableau) + lk = lengthks(integ.tableau) work = RKWorkArrays(zero(y0), # y zero(y0), # ynew zero(y0), # yerr @@ -140,7 +140,7 @@ function init(ivp::ExplicitODE,stepper::RKIntegrator) end # pre-initialize work.ks[1] - ivp.F!(t0,y0,work.ks[1]) + ode.F!(t0,y0,work.ks[1]) step = Step(t0,copy(y0),copy(work.ks[1])) @@ -154,25 +154,25 @@ end ##################### -function onestep!(ivp::ExplicitODE, stepper::RKIntegratorFixed, state::RKState) +function onestep!(ode::ExplicitODE, integ::RKIntegratorFixed, state::RKState) step = state.step work = state.work - td = tdir(ivp,stepper) + td = tdir(ode,integ) - if td*step.t >= td*stepper.options.tstop + if td*step.t >= td*integ.opts.tstop # nothing left to integrate return finish end dof = length(step.y) - b = stepper.tableau.b - dt = td*min(abs(state.dt),abs(stepper.options.tstop-step.t)) + b = integ.tableau.b + dt = td*min(abs(state.dt),abs(integ.opts.tstop-step.t)) copy!(work.ynew,step.y) for k=1:length(b) - calc_next_k!(work, k, ivp, stepper.tableau, step, dt) + calc_next_k!(work, k, ode, integ.tableau, step, dt) for d=1:dof work.ynew[d] += dt * b[k]*work.ks[k][d] end @@ -193,52 +193,52 @@ const timeout_const = 5 # `trialstep!` ends with a step computed for the stepsize `state.dt` # and stores it in `work.y`, so `work.y` contains a candidate for # `y(t+dt)` with `dt=state.dt`. -function trialstep!(ivp::ExplicitODE, stepper::RKIntegratorAdaptive, state::RKState) +function trialstep!(ode::ExplicitODE, integ::RKIntegratorAdaptive, state::RKState) work = state.work step = state.step - tableau = stepper.tableau - options = stepper.options + tableau = integ.tableau + opts = integ.opts - td = tdir(ivp,stepper) + td = tdir(ode,integ) # use the proposed step size to perform the computations state.dt = state.newdt dt = state.dt - if td*step.t >= td*options.tstop + if td*step.t >= td*opts.tstop # nothing left to integrate return finish end - if abs(dt) < options.minstep + if abs(dt) < opts.minstep # TODO: use some sort of logging system warn("Minimum step size reached") return abort end # work.y and work.yerr and work.ks are updated after this step - rk_embedded_step!(work, ivp, tableau, step, dt) + rk_embedded_step!(work, ode, tableau, step, dt) return cont end # computes the error for the candidate solution `y(t+dt)` with # `dt=state.dt` and proposes a new time step -function errorcontrol!(ivp::ExplicitODE, - stepper::RKIntegratorAdaptive, +function errorcontrol!(ode::ExplicitODE, + integ::RKIntegratorAdaptive, state::RKState) work = state.work step = state.step - tableau = stepper.tableau + tableau = integ.tableau timeout = state.timeout - options = stepper.options + opts = integ.opts err, state.newdt, state.timeout = - stepsize_hw92!(work, step, tableau, state.dt, state.timeout, options) + stepsize_hw92!(work, step, tableau, state.dt, state.timeout, opts) - td = tdir(ivp,stepper) + td = tdir(ode,integ) # trim in case newdt > dt - state.newdt = td*min(abs(state.newdt), abs(options.tstop-(state.step.t+state.dt))) + state.newdt = td*min(abs(state.newdt), abs(opts.tstop-(state.step.t+state.dt))) if err > 1 # The error is too large, the step will be rejected. We reset @@ -252,18 +252,18 @@ end # Here we assume that trialstep! and errorcontrol! have already been # called, that is `work.y` holds `y(t+dt)` with `dt=state.dt`, and # error was small enough for us to keep `y(t+dt)` as the next step. -function accept!(ivp::ExplicitODE, - stepper::RKIntegratorAdaptive, +function accept!(ode::ExplicitODE, + integ::RKIntegratorAdaptive, state::RKState) work = state.work step = state.step - tableau = stepper.tableau + tableau = integ.tableau # preload ks[1] for the next step if tableau.isFSAL copy!(work.ks[1],work.ks[end]) else - ivp.F!(step.t+state.dt, work.ynew, work.ks[1]) + ode.F!(step.t+state.dt, work.ynew, work.ks[1]) end # Swap bindings of y and ytrial, avoids one copy @@ -280,7 +280,7 @@ end ########################## function rk_embedded_step!(work ::RKWorkArrays, - ivp ::ExplicitODE, + ode ::ExplicitODE, tableau ::Tableau, last_step ::Step, dt) @@ -299,7 +299,7 @@ function rk_embedded_step!(work ::RKWorkArrays, # we skip the first step beacause we assume that work.ks[1] is # already computed if s > 1 - calc_next_k!(work, s, ivp, tableau, last_step, dt) + calc_next_k!(work, s, ode, tableau, last_step, dt) end for d=1:dof work.ynew[d] += b[1,s]*work.ks[s][d] @@ -321,7 +321,7 @@ function stepsize_hw92!{T}(work, tableau ::TableauRKExplicit, dt ::T, timeout ::Int, - options ::Options{T}) + opts ::Options{T}) # Estimates the error and a new step size following Hairer & # Wanner 1992, p167 (with some modifications) # @@ -345,23 +345,23 @@ function stepsize_hw92!{T}(work, # in-place calculate yerr./tol for d=1:dof - # TODO: for some reason calling options.isoutofdomain + # TODO: for some reason calling opts.isoutofdomain # generates a lot of allocations - # if options.isoutofdomain(work.y[d])::Bool + # if opts.isoutofdomain(work.y[d])::Bool if isnan(work.y[d]) return T(10), dt*facmin, timout_after_nan end y0 = last_step.y[d] # TODO: is this supposed to be the last successful step? y1 = work.ynew[d] # the approximation to the next step - sci = (options.abstol + options.reltol*max(norm(y0),norm(y1))) + sci = (opts.abstol + opts.reltol*max(norm(y0),norm(y1))) work.yerr[d] ./= sci # Eq 4.10 end - # TOOD: should we use options.norm here as well? - err = options.norm(work.yerr) # Eq. 4.11 - newdt = sign(dt)*min(options.maxstep, abs(dt)*clamp(fac*(1/err)^(1/(ord+1)),facmin,facmax)) # Eq 4.13 modified + # TOOD: should we use opts.norm here as well? + err = opts.norm(work.yerr) # Eq. 4.11 + newdt = sign(dt)*min(opts.maxstep, abs(dt)*clamp(fac*(1/err)^(1/(ord+1)),facmin,facmax)) # Eq 4.13 modified if timeout > 0 newdt = sign(dt)*min(abs(newdt), abs(dt)) @@ -376,7 +376,7 @@ end # this is the only part of state that can be changed here function calc_next_k!(work ::RKWorkArrays, i ::Int, - ivp ::ExplicitODE, + ode ::ExplicitODE, tableau ::Tableau, last_step ::Step, dt) @@ -389,7 +389,7 @@ function calc_next_k!(work ::RKWorkArrays, work.y[d] += dt * work.ks[j][d] * a[i,j] end end - ivp.F!(t + c[i]*dt, work.y, work.ks[i]) + ode.F!(t + c[i]*dt, work.y, work.ks[i]) return nothing end diff --git a/src/types.jl b/src/types.jl index 8468d49b1..ab7948561 100644 --- a/src/types.jl +++ b/src/types.jl @@ -158,28 +158,27 @@ This is an iterable type, each call to next(...) produces a next step of a numerical solution to an IVP. - ivp: is the prescrived ivp, along with the initial data -- stepper: the algorithm used to produce subsequent steps - +- solver: the algorithm used to produce subsequent values from the ivp """ immutable Problem{O<:AbstractIVP,S<:AbstractSolver} - ivp ::O - stepper ::S + ivp ::O + solver ::S end Base.eltype{O}(::Type{Problem{O}}) = eltype(O) Base.eltype{O}(::Problem{O}) = eltype(O) # filter the wrong combinations of ivp and stepper -solve{O,S}(ivp::O, stepper::Type{S}, options...) = - error("The $S doesn't support $O") +solve{O,S}(ivp::O, solver::Type{S}, opts...) = + error("The solver $S doesn't support IVP of form $O") # In Julia 0.5 the collect needs length to be defined, we cannot do -# that for a solver but we can implement our own collect -function collect(s::Problem) - T,Y = eltype(s) +# that for a Problem but we can implement our own collect +function collect(prob::Problem) + T,Y = eltype(prob) pairs = Array(Tuple{T,Y},0) - for (t,y) in s + for (t,y) in prob push!(pairs,(t,copy(y))) end return pairs @@ -203,15 +202,13 @@ end # TODO: this implementation fails to return the zeroth step (t0,y0) # # TODO: store the current Step outside of the actual state -# Base.start(sol::Problem) = (init(sol), Step(ivp.sol)) -Base.start(sol::Problem) = init(sol.ivp,sol.stepper) +Base.start(prob::Problem) = init(prob.ivp, prob.solver) -function Base.done(s::Problem, st) +function Base.done(prob::Problem, st) # Determine whether the next step can be made by calling the # stepping routine. onestep! will take the step in-place. - status = onestep!(s.ivp, s.stepper, st) - # can't this be a function on a status? + status = onestep!(prob.ivp, prob.solver, st) if status==cont return false elseif status==finish @@ -224,21 +221,12 @@ function Base.done(s::Problem, st) end end -function Base.next(sol::Problem, st) +function Base.next(prob::Problem, st) # Output the step (we know that `done` allowed it, so we are safe # to do it) return output(st), st end -#m3: I don't think it makes sense to type-fy this. TODO: delete -# """ -# TODO: Holds the solver status after onestep. -# """ -# type Status{T} end -# successful(status::Status) = status == StatusContinue -# const StatusContinue = Status{:cont}() -# const StatusFailed = Status{:failed}() -# const StatusFinished = Status{:finished}() """ Holds the solver status, used inside of `onestep!`. @@ -280,7 +268,7 @@ to implement the sub-step functions `trialstep!`, `errorcontrol!` and Input: -- sol::Problem, state::AbstractState +- prob::Problem, state::AbstractState Output: @@ -288,15 +276,15 @@ Output: substeps. """ -function onestep!(ivp::IVP, stepper::AbstractIntegrator, state::AbstractState) - opt = stepper.options +function onestep!(ivp::IVP, integ::AbstractIntegrator, state::AbstractState) + opt = integ.opts while true - status = trialstep!(ivp, stepper, state) - err, status_err = errorcontrol!(ivp, stepper, state) + status = trialstep!(ivp, integ, state) + err, status_err = errorcontrol!(ivp, integ, state) status &= status_err if err<=1 # a successful step - status &= accept!(ivp, stepper, state) + status &= accept!(ivp, integ, state) return status elseif status==abort || status==finish return status @@ -331,8 +319,8 @@ compute the magnitude of its error. If the error is small enough Returns `Status`. """ -trialstep!{S<:AbstractIntegrator}(::IVP, ::S, ::AbstractState) = - error("Function `trialstep!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") +trialstep!{I<:AbstractIntegrator}(::IVP, ::I, ::AbstractState) = + error("Function `trialstep!` and companions (or alternatively `onestep!`) need to be implemented for adaptive integrator $I") """ @@ -346,8 +334,8 @@ If the `status==abort` then the integration is aborted, status values of `cont` and `finish` are ignored. """ -errorcontrol!{S<:AbstractIntegrator}(::IVP, ::S, ::AbstractState) = - error("Function `errorcontrol!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") +errorcontrol!{I<:AbstractIntegrator}(::IVP, ::I, ::AbstractState) = + error("Function `errorcontrol!` and companions (or alternatively `onestep!`) need to be implemented for adaptive integrator $I") """ @@ -357,5 +345,5 @@ a small enough error. Returns `Status`. """ -accept!{S<:AbstractIntegrator}(::IVP, ::S, ::AbstractState) = - error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive solver $S") +accept!{I<:AbstractIntegrator}(::IVP, ::I, ::AbstractState) = + error("Function `accept!` and companions (or alternatively `onestep!`) need to be implemented for adaptive integrator $I") diff --git a/test/iterators.jl b/test/iterators.jl index f2f389833..5e1ac802c 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -48,7 +48,7 @@ const testsets = [ # Testing function ode -const steppers = [ODE.RKIntegratorFixed{:feuler}, +const integrators = [ODE.RKIntegratorFixed{:feuler}, ODE.RKIntegratorFixed{:midpoint}, ODE.RKIntegratorFixed{:heun}, ODE.RKIntegratorFixed{:rk4}, @@ -63,12 +63,12 @@ const steppers = [ODE.RKIntegratorFixed{:feuler}, function test_ode() tol = 0.002 - for rks in steppers - println("Testing $rks") + for integ in integrators + println("Testing $integ") for ts in testsets println("Testing problem $(ts[:name])") - tout, h0, stepper = ts[:tout], ts[:initstep], rks + tout, h0, stepper = ts[:tout], ts[:initstep], integ y0, F!, jac!, sol = ts[:y0], ts[:F!], ts[:jac], ts[:sol] From fd1ce43f993d60351652c853e99d827c0b66709a Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 14:54:49 +0200 Subject: [PATCH 073/113] Fixed inconsistency in RK-tableau conversion --- src/tableaus.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tableaus.jl b/src/tableaus.jl index 1e6cd1562..097d59488 100644 --- a/src/tableaus.jl +++ b/src/tableaus.jl @@ -32,4 +32,4 @@ Base.eltype{T}(b::Tableau{T}) = T order(b::Tableau) = b.order # Subtypes need to define a convert method to convert to a different # eltype with signature: -Base.convert{Tnew<:Real}(::Type{Tnew}, tab::Tableau) = error("Define convert method for concrete Tableau types") +Base.convert{T<:Tableau}(::Type{T}, tab::Tableau) = error("Define convert method for concrete Tableau types") From f4c7557c5e40007219b1b221220c989a1c9c3f0b Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 16:05:00 +0200 Subject: [PATCH 074/113] Updated dense output to wrap the Integrator --- examples/ex_iter.jl | 26 +++++++++++++++++++++++--- src/dense.jl | 40 ++++++++++++++++++++++------------------ src/interfaces.jl | 4 ++-- test/iterators.jl | 3 +-- 4 files changed, 48 insertions(+), 25 deletions(-) diff --git a/examples/ex_iter.jl b/examples/ex_iter.jl index dfd5a8085..3847d2a2b 100644 --- a/examples/ex_iter.jl +++ b/examples/ex_iter.jl @@ -1,18 +1,22 @@ using ODE +# pick your solver +integ = [ODE.RKIntegratorAdaptive{:rk45}, + ODE.ModifiedRosenbrockIntegrator][2] + # Define IVP-instance which holds the mathematical problem definition: t0 = 0.0 y0 = [1.0] ode = ODE.ExplicitODE(t0,y0,(t,y,dy)->dy[1]=y[1]) + +### Forward time integration + # options for the solver opts = Dict(:initstep=>0.1, :tstop=>1.0, :reltol=>1e-5, :abstol=>1e-5) -# pick your solver -integ = [ODE.RKIntegratorAdaptive{:rk45}, - ODE.ModifiedRosenbrockIntegrator][2] # create a Problem instance prob = ODE.solve(ode,integ;opts...) @@ -43,3 +47,19 @@ for (t,y) in prob # iterate over the solution end println(collect(prob)) + + +### Dense output +opts = Dict(:initstep=>0.1, + :tstop=>0.0, + :reltol=>1e-5, + :abstol=>1e-5, + :tout=>[t0:-0.1:0;]) + +prob = ODE.solve(ode,ODE.DenseOutput{integ};opts...) +println("t, y, err") +for (t,y) in prob # iterate over the solution + println((t,y[1],abs(y[1]-e.^(t-1)))) +end + +println(collect(prob)) diff --git a/src/dense.jl b/src/dense.jl index facbd1c10..1ed3a0ad0 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -13,7 +13,7 @@ Dense output options: """ immutable DenseOptions{T<:Number} <: Options{T} - tout::Vector{T} + tout::Vector{T} # TODO: this should an AbstractVector # points ::Symbol # stopevent::S # roottol ::T @@ -38,20 +38,22 @@ interpolates the results on request (currently this means at the output times stored in `opts.tout`). """ -immutable DenseOutput{P<:Problem,OP<:DenseOptions} <: AbstractSolver - prob::P +immutable DenseOutput{I<:AbstractIntegrator,OP<:DenseOptions} <: AbstractSolver + integ::I # TODO: Maybe this should be relaxed to a AbstractSolver? + # Then we could have a DenseOutput{DenseOutput{RK}}, say! opts::OP -end +End -function solve{S<:DenseOutput}(ivp::IVP, - ::Type{S}; - method = RKIntegratorAdaptive{:rk45}, - opts...) +function solve{I}(ivp::IVP, + ::Type{DenseOutput{I}}; + opts...) T = eltype(ivp)[1] - sol_orig = Problem(ivp,method{T}(; opts...)) + # create integrator + integ = I{T}(; opts...) + # create dense solver dense_opts = DenseOptions{T}(; opts...) - dense_solver = S(sol_orig,dense_opts) - return Problem(ivp,dense_solver) # TODO: this is where it is needed. + dense_solver = DenseOutput(integ, dense_opts) + return Problem(ivp, dense_solver) end """ @@ -70,8 +72,7 @@ output(ds::DenseState) = output(ds.step_out) function init(ivp::IVP, solver::DenseOutput) - ivp = solver.prob.ivp - integrator_state = init(solver.prob.ivp, solver.prob.solver) + integrator_state = init(ivp, solver.integ) dy0 = similar(ivp.y0) ivp.F!(ivp.t0,ivp.y0,dy0) step_prev = Step(ivp.t0,copy(ivp.y0),dy0) @@ -97,17 +98,20 @@ function onestep!(ivp::IVP, return finish end + # the underlying integrator + integ = solver.integ + # our next output time ti = solver.opts.tout[i] - prob = solver.prob # this looks weird istate = dstate.integrator_state + # try to get a new set of steps enclosing `ti`, if all goes # right we end up with t∈[t1,t2] with # t1,_=output(dstate.step_prev) # t2,_=output(dstate.integrator_state) - status = next_interval!(prob, istate, dstate.step_prev, ti) + status = next_interval!(ivp, integ, istate, dstate.step_prev, ti) if status == abort # we failed to get enough steps warn("Iterator was exhausted before the dense output could produce the output.") @@ -136,7 +140,7 @@ other words `tout∈[t1,t2]`. TODO: tdir """ -function next_interval!(prob, istate, step_prev, tout) +function next_interval!(ivp, integ, istate, step_prev, tout) while true # get the current time @@ -154,8 +158,8 @@ function next_interval!(prob, istate, step_prev, tout) copy!(step_prev.y,y) copy!(step_prev.dy,dy) - # try to perform a single step with the prob - status = onestep!(prob.ivp, prob.solver, istate) + # try to perform a single step: + status = onestep!(ivp, integ, istate) if status != cont return status diff --git a/src/interfaces.jl b/src/interfaces.jl index 92df0801e..7fa3e52bc 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -19,8 +19,8 @@ function ode{T,Y,I<:AbstractIntegrator}(F, y0::Y, tout = tout, kargs...) elseif points == :specified - prob = solve(equation, DenseOutput; - method = integ, + prob = solve(equation, + DenseOutput{integ}; tout = tout, kargs...) else diff --git a/test/iterators.jl b/test/iterators.jl index 5e1ac802c..844e62168 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -126,8 +126,7 @@ function test_ode() @test_approx_eq_eps y sol(t) tol end - for (t,y) in ODE.solve(equation,ODE.DenseOutput; - method = stepper, opts...) + for (t,y) in ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) @test_approx_eq_eps y sol(t) tol end From 0a55d991d06640c93ef9a0f0d2f1b02e980fa093 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 16:11:59 +0200 Subject: [PATCH 075/113] Added tdir to dense --- src/dense.jl | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 1ed3a0ad0..c551b3568 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -42,7 +42,7 @@ immutable DenseOutput{I<:AbstractIntegrator,OP<:DenseOptions} <: AbstractSolver integ::I # TODO: Maybe this should be relaxed to a AbstractSolver? # Then we could have a DenseOutput{DenseOutput{RK}}, say! opts::OP -End +end function solve{I}(ivp::IVP, ::Type{DenseOutput{I}}; @@ -131,23 +131,20 @@ end """ -Pulls the results from the `(prob,istate)` pair using `onestep!` until -we reach a first step such that `t>=tout`. It fills the `steps` -variable with (Step(t1,y(t1),dy(t1)),Step(t2,y(t2),dy(t2))), where -`t1` is is the step before `tout` and `t2` is `>=tout`. In -other words `tout∈[t1,t2]`. - -TODO: tdir - +Takes steps using the underlying integrator until it reaches a first +step such that `t>=tout`. It fills the `steps` variable with +(Step(t1,y(t1),dy(t1)),Step(t2,y(t2),dy(t2))), where `t1` is is the +step before `tout` and `t2` is `>=tout`. In other words +`tout∈[t1,t2]`. """ function next_interval!(ivp, integ, istate, step_prev, tout) - + td = tdir(ivp, integ) while true # get the current time t1 = step_prev.t t2,_ = output(istate) - t1, t2 = sort([t1,t2]) - if t1 <= tout <= t2 + + if td*t1 <= td*tout <= td*t2 # we found the enclosing times return cont end From eb4af91e24726355dd5198e8acda54e2044f903c Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 16:14:48 +0200 Subject: [PATCH 076/113] Big file rename. Fixes #10 --- src/ODE.jl | 12 ++++++------ src/{types.jl => base.jl} | 0 src/{ => integrators}/adams-bashford-moulton.jl | 0 src/{ => integrators}/ode23s.jl | 0 src/{ => integrators}/rosenbrock.jl | 0 src/{ => integrators}/runge-kutta.jl | 0 src/{interfaces.jl => top-interface.jl} | 0 7 files changed, 6 insertions(+), 6 deletions(-) rename src/{types.jl => base.jl} (100%) rename src/{ => integrators}/adams-bashford-moulton.jl (100%) rename src/{ => integrators}/ode23s.jl (100%) rename src/{ => integrators}/rosenbrock.jl (100%) rename src/{ => integrators}/runge-kutta.jl (100%) rename src/{interfaces.jl => top-interface.jl} (100%) diff --git a/src/ODE.jl b/src/ODE.jl index eedcdfd28..046940908 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -32,7 +32,7 @@ import Base: start, next, done, collect, show, convert # # Note, if we go the MathProgBase.jl route, then these files would go # into ODEBase.jl. -include("types.jl") +include("base.jl") include("tableaus.jl") include("options.jl") include("helpers.jl") @@ -41,12 +41,12 @@ include("helpers.jl") include("dense.jl") # Particular solvers -include("ode23s.jl") -include("runge-kutta.jl") -include("adams-bashford-moulton.jl") -include("rosenbrock.jl") +include("integrators/ode23s.jl") +include("integrators/runge-kutta.jl") +include("integrators/adams-bashford-moulton.jl") +include("integrators/rosenbrock.jl") # User interface to solvers -include("interfaces.jl") +include("top-interface.jl") end # module ODE diff --git a/src/types.jl b/src/base.jl similarity index 100% rename from src/types.jl rename to src/base.jl diff --git a/src/adams-bashford-moulton.jl b/src/integrators/adams-bashford-moulton.jl similarity index 100% rename from src/adams-bashford-moulton.jl rename to src/integrators/adams-bashford-moulton.jl diff --git a/src/ode23s.jl b/src/integrators/ode23s.jl similarity index 100% rename from src/ode23s.jl rename to src/integrators/ode23s.jl diff --git a/src/rosenbrock.jl b/src/integrators/rosenbrock.jl similarity index 100% rename from src/rosenbrock.jl rename to src/integrators/rosenbrock.jl diff --git a/src/runge-kutta.jl b/src/integrators/runge-kutta.jl similarity index 100% rename from src/runge-kutta.jl rename to src/integrators/runge-kutta.jl diff --git a/src/interfaces.jl b/src/top-interface.jl similarity index 100% rename from src/interfaces.jl rename to src/top-interface.jl From 3227dee133d0d744fc749eed981d1de9f757e92e Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 16:47:02 +0200 Subject: [PATCH 077/113] Update interpolate! --- src/dense.jl | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index c551b3568..9401cb6de 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -121,7 +121,7 @@ function onestep!(ivp::IVP, # the dstate.step_out with y(ti) and y'(ti) according to an # interpolation algorithm specific for a method (defaults to # hermite O(3)). - interpolate!(istate, dstate.step_prev, ti, dstate.step_out) + interpolate!(dstate, ti, dstate.step_out) # increase the counter dstate.tout_i += 1 @@ -167,26 +167,38 @@ function next_interval!(ivp, integ, istate, step_prev, tout) return abort end + """ +Makes dense output + +interpolate!{ST}(dstate::DenseState{ST}, tout, step_out) + +Input: -Make dense output using Hermite interpolation of order O(3). Updates -yout in-place. Only needs y and dy at t1 and t2. -Input -- istate::AbstractState -- istate of the integrator at time t2 -- step_prev::Step -- solution at time t1 respectively +- `dstate::DenseState{ST}` + - `ST` is the `AbstractIntegrator` state which can be used for dispatch - tout -- time of requested output -- yout -- inplace y output -Ref: Hairer & Wanner p.190 +- step_out::Step -- inplace output step + +Output: nothing -TODO: tdir (I think this works for any order of t1 and t2 but needs -verifying. +TODO: output dy too -TODO: fill dy +TOOD: provide arbitrary order dense output. Maybe use work of @obiajulu on A-B-M methods. +""" +function interpolate! end -TODO: arbitrary order method (change step_prev::Step to step_prevs::Tuple{Step,N}) +""" +Make dense output using Hermite interpolation of order O(3), should +work for most integrators and is used as default. This only needs y +and dy at t1 and t2. +Ref: Hairer & Wanner p.190 """ -function interpolate!{T,Y}(istate::AbstractState,step_prev::Step{T,Y},tout::T,step_out::Step{T,Y}) +function interpolate!(dstate::DenseState, tout, step_out::Step) + step_prev = dstate.step_prev + istate = dstate.integrator_state + t1,y1,dy1 = output(step_prev) t2,y2,dy2 = output(istate) if tout==t1 From 2769081d8d52cb24f490bde512003ee1ba5e6b46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Tue, 26 Jul 2016 18:13:19 +0200 Subject: [PATCH 078/113] Implementing integrator, tutorial --- examples/iterator-implementation.jl | 173 ++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 examples/iterator-implementation.jl diff --git a/examples/iterator-implementation.jl b/examples/iterator-implementation.jl new file mode 100644 index 000000000..68c469d66 --- /dev/null +++ b/examples/iterator-implementation.jl @@ -0,0 +1,173 @@ +type EulerIntegrator <: AbstractIntegrator + # nothing in here yet +end + +type EulerState + t + y + dy +end + +output(state::EulerState) = state.t, state.y, state.dy + +# see we don't need a fancy constructor +function solver(ode::ExplicitODE, + ::Type{EulerIntegrator}; + options...) + return Problem(ode,EulerIntegrator()) +end + +function init(ode::ExplicitODE, integ::EulerIntegrator) + t0, y0 = ode.t0, ode.y0 + dy0 = similar(ode.dy0) + ode.F!(t0,y0,dy0) # fill in the values of the derivative + EulerState(t0,y0,dy0) +end + +function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) + t, y, dy = output(state) + dt = 0.1 + y += dt*dy + t += dt + ode.F!(t0,y0,dy0) # update the derivative + return cont +end + + +""" + +There are several problems with the above implementation. First of +all, it has a constant prescribed step size. This could easily be +fixed by changing the type definition and the `solver` to + +""" + +type EulerIntegrator <: AbstractIntegrator + initstep +end + +function solver(ode::ExplicitODE, + ::Type{EulerIntegrator}; + initstep = 0.1, + options...) + return Problem(ode,EulerIntegrator(initstep)) +end + +""" + +we should also change the line `dt = 0.1` in the `onestep!` function +to `dt = stepper.initstep`. Now we can run our integrator with a +custom step size! + +""" + +sol = solver(ode,EulerIntegrator,initstep = 0.01) +for (t,y) in sol + if t > 1 + print(t,y) + break + end +end + +""" + +Another issue is type stability, to make `EulerIntegrator` perform +better we should explicitly annotate the fields in both +`EulerIntegrator` and `EulerState` like this + +""" + +type EulerIntegrator{T,Y} <: AbstractIntegrator{T,Y} + initstep::T +end + +type EulerState{T,Y} + t::T + y::Y + dy::Y +end + +function solver(ode::ExplicitODE{T,Y}, + ::Type{EulerIntegrator}; + initstep::T = T(0.1), + options...) + return Problem(ode,EulerIntegrator{T,Y}(initstep)) +end + + +""" + +But the `EulerState{T,Y}` is exactly the same as `Step` from +`base.jl`, so we can simplify it a bit more + +""" + +type EulerState{T,Y} + step::Step{T,Y} +end + +""" + +Once we do that, in the `init` we should change +`EulerState(t0,y0,dy0)` to `EulerState(Step(t0,y0,dy0))` and redefine +`output` to `output(state::EulerState)=output(state.step)` +(`output(::Step)` is already defined in `base.jl`). + +One could even replace `EulerState` with `Step` completely, but this +won't allow us to extend the state with some additional variables and +storage space in the future. + +The last thing is that our stepper will continue the integration +forever: it doesn't have a stopping condition. We could add one as an +option. + +""" + +type EulerIntegrator{T,Y} <: AbstractIntegrator{T,Y} + initstep::T + tstop::T +end + +function solver(ode::ExplicitODE{T,Y}, + ::Type{EulerIntegrator}; + initstep::T = T(0.1), + tstop::T = T(Inf) + options...) + return Problem(ode,EulerIntegrator{T,Y}(initstep,tstop)) +end + +function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) + t, y, dy = output(state) + + if t > integ.tstop + return finished + end + + dt = integ.initstep + y += dt*dy + t += dt + ode.F!(t0,y0,dy0) # update the derivative + return cont +end + +""" + +As a final improvement, we can (although this is not necessary) use a +structure `FixedOptions` from `options.jl` to keep our options in one +structure. A corresponding options type for adaptive solver is +`AdaptiveOptions`. This way we can use the standarized defaults for +most options and keep our solver in line with the standard +keywords. Naturally, we have to update `onestep!` to use the subtype. + +""" + +type EulerIntegrator{T,Y} <: AbstractIntegrator{T,Y} + options::FixedOptions{T,Y} +end + +function solver(ode::ExplicitODE{T,Y}, + ::Type{EulerIntegrator}; + options...) + options = FixedOptions{T}(;options...) + return Problem(ode,EulerIntegrator{T,Y}(options)) +end From 0d5141e42e37ccfac7503543a5b1612438f480b7 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 20:52:05 +0200 Subject: [PATCH 079/113] Changed interpolate! signature back --- src/dense.jl | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 9401cb6de..1f86a78d2 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -4,11 +4,14 @@ Dense output options: -- tspan ::Vector{T} output times +- tout ::Vector{T} output times + +TODO options: + - points ::Symbol which points are returned: `:specified` only the ones in tspan or `:all` which includes also the step-points of the solver. -- stopevent Stop integration at a zero of this function -- roottol TODO +- stopevent Stop integration at a zero of this function +- roottol """ @@ -32,10 +35,10 @@ end """ -A solver specialized in dense output. It wraps around a -`Problem` and stores the subsequent steps generated by `Problem` and -interpolates the results on request (currently this means at the -output times stored in `opts.tout`). +A solver specialized in dense output; it wraps an integrator. It +stores the subsequent steps generated by `Problem` and interpolates +the results (currently this means at the output times stored in +`opts.tout`). """ immutable DenseOutput{I<:AbstractIntegrator,OP<:DenseOptions} <: AbstractSolver @@ -58,7 +61,7 @@ end """ -The state of the dense solver +The state of the dense solver `DenseOutput`. """ type DenseState{St<:AbstractState,T,Y} <: AbstractState{T,Y} @@ -121,7 +124,7 @@ function onestep!(ivp::IVP, # the dstate.step_out with y(ti) and y'(ti) according to an # interpolation algorithm specific for a method (defaults to # hermite O(3)). - interpolate!(dstate, ti, dstate.step_out) + interpolate!(istate, dstate.step_prev, ti, dstate.step_out) # increase the counter dstate.tout_i += 1 @@ -171,12 +174,12 @@ end """ Makes dense output -interpolate!{ST}(dstate::DenseState{ST}, tout, step_out) +interpolate!(istate::AbstractState, step_prev::Step, tout, step_out::Step) Input: -- `dstate::DenseState{ST}` - - `ST` is the `AbstractIntegrator` state which can be used for dispatch +- `istate::AbstractState` state of the integrator +- `step_prev` the previous step, part of `dstate` - tout -- time of requested output - step_out::Step -- inplace output step @@ -195,10 +198,10 @@ and dy at t1 and t2. Ref: Hairer & Wanner p.190 """ -function interpolate!(dstate::DenseState, tout, step_out::Step) - step_prev = dstate.step_prev - istate = dstate.integrator_state - +function interpolate!(istate::AbstractState, + step_prev::Step, + tout, + step_out::Step) t1,y1,dy1 = output(step_prev) t2,y2,dy2 = output(istate) if tout==t1 From 8fda07ce93b7848acc8df57ec84df645a89a4e42 Mon Sep 17 00:00:00 2001 From: Mauro Werder Date: Tue, 26 Jul 2016 21:24:41 +0200 Subject: [PATCH 080/113] doc-string for [ci skip] --- src/base.jl | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/src/base.jl b/src/base.jl index ab7948561..9c9f1355a 100644 --- a/src/base.jl +++ b/src/base.jl @@ -170,7 +170,31 @@ Base.eltype{O}(::Type{Problem{O}}) = eltype(O) Base.eltype{O}(::Problem{O}) = eltype(O) # filter the wrong combinations of ivp and stepper -solve{O,S}(ivp::O, solver::Type{S}, opts...) = +""" + +Solve creates an iterable `Problem` instance from an `IVP` instance +(specifying the math) and from a `Type{AbstractSolver}` (the numerical +integrator). + +Notes: + +- usually a solvers requires the ivp to be in a certain form, say an + `ExplicitODE`. +- the second argument it the *Type* of the solver and not an instance. + The instance of the solve can only be created together with the + `ivp` as their type parameters need to match. + +Input: + +- `ivp::IVP` +- `S::Type{AbstractSolver}` + +Output: + +- `::Problem` + +""" +solve{O,S}(ivp::O, ::Type{S}, opts...) = error("The solver $S doesn't support IVP of form $O") # In Julia 0.5 the collect needs length to be defined, we cannot do From 0c0a6c1addc2f3240967861777cb85e62475fb70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 27 Jul 2016 16:43:53 +0200 Subject: [PATCH 081/113] Minor changes, added `integ` as a keyword argument --- src/base.jl | 34 ++++++++++++++++++++++++++-------- src/dense.jl | 5 +++++ src/options.jl | 4 ++-- src/top-interface.jl | 32 ++++++++++++++++---------------- test/iterators.jl | 22 ++++++++++++++++++---- test/runtests.jl | 2 +- 6 files changed, 68 insertions(+), 31 deletions(-) diff --git a/src/base.jl b/src/base.jl index 9c9f1355a..d989f9949 100644 --- a/src/base.jl +++ b/src/base.jl @@ -8,7 +8,7 @@ abstract AbstractIVP{T,Y} -Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) = T,Y +Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) = T,Y,Y """ @@ -169,12 +169,24 @@ end Base.eltype{O}(::Type{Problem{O}}) = eltype(O) Base.eltype{O}(::Problem{O}) = eltype(O) -# filter the wrong combinations of ivp and stepper """ + solve(ivp::IVP, solver::Type{AbstractSolver}, opts...) + solve(ivp::IVP; solver=RKIntegratorAdaptive{:rk45}, opts...) Solve creates an iterable `Problem` instance from an `IVP` instance (specifying the math) and from a `Type{AbstractSolver}` (the numerical -integrator). +integrator). The simplest use case is + +```julia +for (t,y,dy) in solver(...) + # do something with t, y an dy +end +``` + +If the integration interval, defined by the keyword argument `tstop`, +is finite you can request all the results at once by calling +``` +collect(solver(...)) # => Vector{Tuple{T,Y,Y}} Notes: @@ -194,16 +206,22 @@ Output: - `::Problem` """ -solve{O,S}(ivp::O, ::Type{S}, opts...) = - error("The solver $S doesn't support IVP of form $O") +solve(ivp::IVP, solver; opts...) = + error("The solver $(typeof(solver)) doesn't support IVP of form $(typeof(ivp))") + +function solve{S<:AbstractSolver}(ivp::IVP; + solver::Type{S} = RKIntegratorAdaptive{:rk45}, + opts...) + solve(ivp, solver; opts...) +end # In Julia 0.5 the collect needs length to be defined, we cannot do # that for a Problem but we can implement our own collect function collect(prob::Problem) T,Y = eltype(prob) - pairs = Array(Tuple{T,Y},0) - for (t,y) in prob - push!(pairs,(t,copy(y))) + pairs = Array(Tuple{T,Y,Y},0) + for (t,y,dy) in prob + push!(pairs,(t,copy(y),copy(dy))) end return pairs end diff --git a/src/dense.jl b/src/dense.jl index 1f86a78d2..0eb9b7522 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -44,9 +44,14 @@ the results (currently this means at the output times stored in immutable DenseOutput{I<:AbstractIntegrator,OP<:DenseOptions} <: AbstractSolver integ::I # TODO: Maybe this should be relaxed to a AbstractSolver? # Then we could have a DenseOutput{DenseOutput{RK}}, say! + # pwl: Or DenseOutput{StiffnessSwitching{whatever}} opts::OP end +# TODO: this is confusing, firs you call `solve` with `DenseOutput{I}` +# and then you call construct it as `DenseOutput{T}`. Also this goes +# against the convention that we pass as much as possible as +# options. What if a Solver takes more than one parameter? function solve{I}(ivp::IVP, ::Type{DenseOutput{I}}; opts...) diff --git a/src/options.jl b/src/options.jl index 26d00a960..346f79a6d 100644 --- a/src/options.jl +++ b/src/options.jl @@ -37,8 +37,8 @@ end abstol = eps(T)^T(1//2)/10, minstep = 10*eps(T), maxstep = 1/minstep, - initstep = minstep, - norm::N = Base.norm, + initstep = eps(T)^T(1//3), + norm::N = y->Base.norm(y,Inf), maxiters = T(Inf), isoutofdomain::O = Base.isnan, kargs...) diff --git a/src/top-interface.jl b/src/top-interface.jl index 7fa3e52bc..127bc08b5 100644 --- a/src/top-interface.jl +++ b/src/top-interface.jl @@ -5,10 +5,10 @@ tspan[end] is the last integration time. """ function ode{T,Y,I<:AbstractIntegrator}(F, y0::Y, - tout::AbstractVector{T}, - integ::Type{I}; - points = :all, - kargs...) + tout::AbstractVector{T}; + integ::Type{I} = RKIntegratorAdaptive{:rk45}, + points = :all, + kargs...) t0 = tout[1] @@ -46,20 +46,20 @@ end Solves an ODE `y'=F(t,y)` with initial conditions `y0` and `t0`. """ -ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0,ModifiedRosenbrockIntegrator; kargs...) -ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:feuler}; kargs...) -ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:midpoint}; kargs...) -ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:heun}; kargs...) -ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorFixed{:rk4}; kargs...) -ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:rk21}; kargs...) -ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:rk23}; kargs...) -ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:rk45}; kargs...) -ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:dopri5}; kargs...) +ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = ModifiedRosenbrockIntegrator, kargs...) +ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:feuler}, kargs...) +ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:midpoint}, kargs...) +ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:heun}, kargs...) +ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:rk4}, kargs...) +ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:rk21}, kargs...) +ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:rk23}, kargs...) +ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:rk45}, kargs...) +ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:dopri5}, kargs...) const ode45 = ode45_dp -ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0,RKIntegratorAdaptive{:feh78}; kargs...) +ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:feh78}, kargs...) -function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},integ;kargs...) +function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T};kargs...) if !isleaftype(T) error("The output times have to be of a concrete type.") @@ -71,7 +71,7 @@ function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T},integ;kargs...) error("The initial data has to be of a concrete type (or an array)") end - ode(F,y0,t0,integ;kargs...) + ode(F,y0,t0;kargs...) end diff --git a/test/iterators.jl b/test/iterators.jl index 844e62168..9b81f50c1 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -80,10 +80,17 @@ function test_ode() Fscal = (t,y)->F(t,[y])[1] y0scal = y0[1] # with jacobian - tj,yj = ODE.ode(Fscal,y0scal,tout,stepper,points=points,initstep = h0,J! = jac!) + tj,yj = ODE.ode(Fscal,y0scal,tout, + integ = stepper, + points = points, + initstep = h0, + J! = jac!) @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol # without jacobian - t,y = ODE.ode(Fscal,y0scal,tout,stepper,points=points,initstep = h0) + t,y = ODE.ode(Fscal,y0scal,tout, + integ = stepper, + points = points, + initstep = h0) @test_approx_eq_eps y map(x->sol(x)[1],tj) tol # results with and without jacobian should be exactly the same @@ -99,10 +106,17 @@ function test_ode() # ODE.odeXX vector interface # with jacobian - tj,yj = ODE.ode(F,y0,tout,stepper,points=points,initstep = h0,J! = jac!) + tj,yj = ODE.ode(F,y0,tout, + integ = stepper, + points = points, + initstep = h0, + J! = jac!) @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol # without jacobian - t,y = ODE.ode(F,y0,tout,stepper,points=points,initstep = h0) + t,y = ODE.ode(F,y0,tout, + integ = stepper, + points = points, + initstep = h0) @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol @test_approx_eq hcat(yj...) hcat(y...) diff --git a/test/runtests.jl b/test/runtests.jl index 9f75e3898..a890f442c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -107,7 +107,7 @@ let refsol = [0.2083340149701255e-07, 0.8333360770334713e-13, 0.9999999791665050] # reference solution at tspan[2] - @test norm(refsol-y[end], Inf) < 2e-10 + @test norm(refsol-y[end], Inf) < 2.1e-10 end include("interface-tests.jl") From 3ad4c42a26bf2c43bd28d5df656968613d2e1cb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 27 Jul 2016 17:33:52 +0200 Subject: [PATCH 082/113] Docs --- .gitignore | 3 + .travis.yml | 4 + docs/make.jl | 3 + docs/mkdocs.yml | 31 +++++ docs/src/index.md | 158 +++++++++++++++++++++++ docs/src/man/base.md | 4 + docs/src/man/basics.md | 3 + docs/src/tutorials/euler_integrator.md | 165 +++++++++++++++++++++++++ 8 files changed, 371 insertions(+) create mode 100644 .gitignore create mode 100644 docs/make.jl create mode 100644 docs/mkdocs.yml create mode 100644 docs/src/index.md create mode 100644 docs/src/man/base.md create mode 100644 docs/src/man/basics.md create mode 100644 docs/src/tutorials/euler_integrator.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..a6b2f497b --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +build +docs/build +docs/site diff --git a/.travis.yml b/.travis.yml index 7ef74c555..94720fd6a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,4 +12,8 @@ notifications: script: - if [[ -a .git/shallow ]]; then git fetch --unshallow; fi - julia -e 'Pkg.clone("https://github.com/JuliaODE/ODE.jl.git"); Pkg.build("ODE"); Pkg.test("ODE"; coverage=true)'; + +after_success: - julia -e 'cd(Pkg.dir("ODE")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(Coveralls.process_folder()); Codecov.submit(process_folder())'; + - julia -e 'Pkg.add("Documenter")' + - julia -e 'cd(Pkg.dir("ODE")); include(joinpath("docs", "make.jl"))' diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 000000000..6af21d2b6 --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,3 @@ +using Documenter, ODE + +makedocs() diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 000000000..2150fbbd6 --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,31 @@ +site_name: ODE.jl +repo_url: https://github.com/JuliaODE/ODE.jl +site_description: Julia package for solving differential equations + +extra_css: + - assets/Documenter.css + +markdown_extensions: + - codehilite + - extra + - tables + - fenced_code + - admonition + +extra_javascript: + - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML + - assets/mathjaxhelper.js + +docs_dir: 'build' + +pages: +- Home: index.md +- Manual: + - Basics: man/basics.md + - Base: man/base.md + # - Integrators: + # - Solvers: + # - Options: +- Tutorials: + - Fixed step integrator: tutorials/euler_integrator.md + # - Adaptive step integrator: diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 000000000..99641f21f --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,158 @@ +```@contents +Pages = [ + "tutorials/euler_integrator.md", + "man/basics.md", + "man/base.md" + ] +``` + +# ODE.jl + +## Top level interface + +If you are looking to getting a solution without the additional hussle +of handling an iterator we provide the wrappers `ODE.odeXX`. They +provide a simplistic way of handling explicit differential equations +of the form `y'=F(t,y)` with `y` being either a `Number` or an +`AbstractArray` of any dimension. Below we solve a simple initial +value problem given by `y'=y` with initial data at `t0=0.0` and +`y0=1.0` on the interval `[t0,1]`. + +```@example ode +using ODE +tspan = [0.0,1.0] +y0 = 1.0 +F(t,y) = y +(t,y) = ODE.ode(F,y0,tspan) +``` + +The vectors `t` and `y` store the time and solution values at the +corresponding times. + +You might find the basic interface limiting. First of all, it stores +all the results, so if you are only interested in the final value of +`y` it still stores all the intermediate steps. Secondly, you cannot +process the results on the fly (e.g. plot the current state of a +solution). If you need more control you should consider using the +iterator interface. + +## Iterator interface + +To offeset the limitations of the `ODE.ode` interface we implemented a +general. First we define an initial value problem, in our case this is +an explicit differential equation `y'=y` with inital data `y0=[1.0]` +given at the time `t0=0.0`. + +```@example iterator +using ODE +t0 = 0.0 +y0 = [1.0] +F!(t,y,dy) = dy[1]=y[1] +ode = ODE.ExplicitODE(t0,y0,F!) +``` + +Note that unlike in `ODE.ode` we now have to supply an in place +function `F!` instead of an explicit function `F`. Now we are ready +to produce the iterator that solvese to our problem. + +```@example iterator +sol = ODE.solve(ode) +for (t,y) in sol + @show (t,y) + if t > 1 + break + end +end +``` + +Note that we had to interrupt the loop because `sol` would be +producing solutions ad infinitum (in theory, in practice we will get +to the point where the solver won't be able to produce reasonable +solution anymore). To set the final integration time and other +parameters of the integrator `integ` we can pass optional arguments to +`ODE.solver`. + +```@example iterator +sol = ODE.solve(ode; tstop = 1) +for (t,y) in sol + @show (t,y) +end +``` + +This approach has the added benefit of the solution never exceeding +the final time. Apart from the time and value `(t,y)` the `ODE.solve` +returns also the derivative, you can retrive it as the third argument +in the returned tuple. In the following example we use it compute the +absolute error. + +```@example iterator +sol = ODE.solve(ode; tstop = 1) +for (t,y,dy) in sol + err = norm(y-dy) + @show err +end +``` + +With `tstop` specified we can also get all results at once using +`collect`. + +```@example iterator +res = collect(sol) +``` + +Note that `collect` returns a vector of triples `(t,y,dy)`. + +## Options + +Both `ODE.ode` and `ODE.solve` accept the following keyword arguments. + +- `integ`: the type of integrator to use, defaults to a adaptive + Runge-Kutta method of order 4/5. To see the list of available + integrators see [`Integrators`](@ref). + +- `initstep`: The initial step size, defaults to `eps(T)^(1/3)`. + +- `tstop`: The final integration time, never exceeded by the + integrator. In case of `ODE.ode(F,y0,tspan)` this option defaults + to the last element of `tspan` if it is a vector. In `ODE.solve` + the default is `tstop=Inf`. If `tstop` is smaller then `t0` the + integration runs backwards in time. + +Apart from these general options, each integrator has its own keyword +arguments. In particular all integrators with adaptive step size +can be cotrolled with + +- `reltol`, `abstol`: The relative and absolute error tolerances. The + solution guarantees that at each step we have + `norm((y-yc)*reltol.+abstol)<=1`, where `yc` is a true solution to + and IVP. Defaults are `reltol=eps(T)^(1/3)/10`, + `abstol=eps(T)^(1/2)/10`. + +- `norm`: The norm used to measure error in the formula above, + defaults to `y->Base.norm(y,Inf)`. You can specify it to assign + different weights to different components of `y`. + +- `minstep`, `maxstep`: Minimal and maximal stepsize for the + integrator. If at any point the stepsize exceeds these limits the + integrator will yield an error and cease producing + results. Deafaults are `minstep=10*eps(T)` and `maxstep=1/minstep`. + +- `maxiters`: The number of iterations before the integrator ceases to + work, defaults to `Inf`. Useful as a safeguard from iterator + continuing ad infinitum. + +- `isoutofdomain`: Applied to each component of `y`, if + `isoutofdomain(y[i])==true` the integrator stops. Defaults to + `Base.isnan`. + +Apart from these, each integrator may support additional options. + +## Integrators + +### Explicit Runge-Kutta integrators + +### Rosenbrock methods + +### Backwards differential formula (BDF) methods + +### ??? diff --git a/docs/src/man/base.md b/docs/src/man/base.md new file mode 100644 index 000000000..9eddc3585 --- /dev/null +++ b/docs/src/man/base.md @@ -0,0 +1,4 @@ +```@autodocs +Modules = [ODE] +Order = [:function, :type] +``` diff --git a/docs/src/man/basics.md b/docs/src/man/basics.md new file mode 100644 index 000000000..18248a870 --- /dev/null +++ b/docs/src/man/basics.md @@ -0,0 +1,3 @@ +# Basic usage + +Consider an ODE $y'=y$ diff --git a/docs/src/tutorials/euler_integrator.md b/docs/src/tutorials/euler_integrator.md new file mode 100644 index 000000000..09d1d5214 --- /dev/null +++ b/docs/src/tutorials/euler_integrator.md @@ -0,0 +1,165 @@ +Below you will find the simplest implementation of a reasonable +generic solver that finds solutions to explicit ODE equations. + +```julia +type EulerIntegrator <: AbstractIntegrator + # nothing in here yet +end + +type EulerState + t + y + dy +end + +output(state::EulerState) = state.t, state.y, state.dy + +# see we don't need a fancy constructor +function solver(ode::ExplicitODE, + ::Type{EulerIntegrator}; + options...) + return Problem(ode,EulerIntegrator()) +end + +function init(ode::ExplicitODE, integ::EulerIntegrator) + t0, y0 = ode.t0, ode.y0 + dy0 = similar(ode.dy0) + ode.F!(t0,y0,dy0) # fill in the values of the derivative + EulerState(t0,y0,dy0) +end + +function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) + t, y, dy = output(state) + dt = 0.1 + y += dt*dy + t += dt + ode.F!(t0,y0,dy0) # update the derivative + return cont +end +``` + +There are several problems with the above implementation. First of +all, it has a constant prescribed step size. This could easily be +fixed by changing the type definition and the `solver` to + + +```julia +type EulerIntegrator <: AbstractIntegrator + initstep +end + +function solver(ode::ExplicitODE, + ::Type{EulerIntegrator}; + initstep = 0.1, + options...) + return Problem(ode,EulerIntegrator(initstep)) +end +``` + +we should also change the line `dt = 0.1` in the `onestep!` function +to `dt = stepper.initstep`. Now we can run our integrator with a +custom step size! + +```julia +sol = solver(ode,EulerIntegrator,initstep = 0.01) +for (t,y) in sol + if t > 1 + print(t,y) + break + end +end +``` + +Another issue is type stability, to make `EulerIntegrator` perform +better we should explicitly annotate the fields in both +`EulerIntegrator` and `EulerState` like this + +```julia +type EulerIntegrator{T,Y} <: AbstractIntegrator{T,Y} + initstep::T +end + +type EulerState{T,Y} + t::T + y::Y + dy::Y +end + +function solver(ode::ExplicitODE{T,Y}, + ::Type{EulerIntegrator}; + initstep::T = T(0.1), + options...) + return Problem(ode,EulerIntegrator{T,Y}(initstep)) +end +``` + +But the `EulerState{T,Y}` is exactly the same as `Step` from +`base.jl`, so we can simplify it a bit more + +```julia +type EulerState{T,Y} + step::Step{T,Y} +end +``` + +Once we do that, in the `init` we should change +`EulerState(t0,y0,dy0)` to `EulerState(Step(t0,y0,dy0))` and redefine +`output` to `output(state::EulerState)=output(state.step)` +(`output(::Step)` is already defined in `base.jl`). + +One could even replace `EulerState` with `Step` completely, but this +won't allow us to extend the state with some additional variables and +storage space in the future. + +The last thing is that our stepper will continue the integration +forever: it doesn't have a stopping condition. We could add one as an +option. + +```julia +type EulerIntegrator{T,Y} <: AbstractIntegrator{T,Y} + initstep::T + tstop::T +end + +function solver(ode::ExplicitODE{T,Y}, + ::Type{EulerIntegrator}; + initstep::T = T(0.1), + tstop::T = T(Inf) + options...) + return Problem(ode,EulerIntegrator{T,Y}(initstep,tstop)) +end + +function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) + t, y, dy = output(state) + + if t > integ.tstop + return finished + end + + dt = integ.initstep + y += dt*dy + t += dt + ode.F!(t0,y0,dy0) # update the derivative + return cont +end +``` + +As a final improvement, we can (although this is not necessary) use a +structure `FixedOptions` from `options.jl` to keep our options in one +structure. A corresponding options type for adaptive solver is +`AdaptiveOptions`. This way we can use the standarized defaults for +most options and keep our solver in line with the standard +keywords. Naturally, we have to update `onestep!` to use the subtype. + +```julia +type EulerIntegrator{T,Y} <: AbstractIntegrator{T,Y} + options::FixedOptions{T,Y} +end + +function solver(ode::ExplicitODE{T,Y}, + ::Type{EulerIntegrator}; + options...) + options = FixedOptions{T}(;options...) + return Problem(ode,EulerIntegrator{T,Y}(options)) +end +``` From f6b7c9c1e0ea217ab76e5e42ba272f4ffd9f5353 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 27 Jul 2016 18:13:03 +0200 Subject: [PATCH 083/113] Removed the old docs --- doc/api.md | 35 ----------------------------------- doc/solvers.md | 10 ---------- 2 files changed, 45 deletions(-) delete mode 100644 doc/api.md delete mode 100644 doc/solvers.md diff --git a/doc/api.md b/doc/api.md deleted file mode 100644 index ae94aad44..000000000 --- a/doc/api.md +++ /dev/null @@ -1,35 +0,0 @@ -#General API for all solvers -**This is a working draft for how the API might look like in the future** - -Please open pull requests or issues to propose changes or clarifications. - -##Basic interface -The general interface for all ODE solvers is: - -```julia -t_out, y_out = odeXX(F, y0, tspan; kwargs...) -``` - -Each (adaptive) solver accepts 3 arguments - -- `F`: the RHS of the ODE `dy/dt = F(t,y)`, which is a function of `t` and `y(t)` and returns `dy/dt::typeof(y/t)` -- `y0`: initial value for `y`. The type of `y0`, promoted as necessary according to the numeric type used for the times, determines the element type of the `yout` vector (`yout::Vector{typeof(y0*one(t))}`) -- `tspan`: Any iterable of sorted `t` values at which the solution (`y`) is requested. Most solvers will only consider `tspan[1]` and `tspan[end]`, and intermediary points will be interpolated. If `tspan[1] > tspan[end]` the integration is performed backwards. The times are promoted as necessary to a common floating-point type. - -The solver returns two arrays - -- `tout`: Vector of points at which solutions were obtained (also see keyword `points`) -- `yout`: solutions at times `tout`, stored as a vector `yout` as described above. Note that if `y0` is a vector, you can get a matlab-like matrix with `hcat(yout...)`. - -Each solver might implement its own keywords, but the following keywords have a standardized interpretation across all solvers. Solvers should raise an error if a unrecognized keyword argument is used. - -- `norm`: user-supplied norm for determining the error `E` (default `Base.vecnorm`) -- `abstol` and/or `reltol`: an integration step is accepted if `E <= abstol || E <= reltol*abs(y)` (ideally we want both criteria for all solvers, **done** in #13) -- `points=:all | :specified`: controls the type of output according to - * `points==:all` (default) output is given for each value in `tspan` as well as for each intermediate point the solver used. - * `points==:specified` output is given only for each value in `tspan`. -- `maxstep`, `minstep` and `initstep`: determine the maximal, minimal and initial integration step. -- `retries = 0` Sometimes an integration step takes you out of the region where `F(t,y)` has a valid solution and F might throw `DomainError` or other exceptions. `retries` sets a limit to the number of times the solver might try with a smaller step. - -##Iterator interface -Under construction #27 diff --git a/doc/solvers.md b/doc/solvers.md deleted file mode 100644 index 735018de8..000000000 --- a/doc/solvers.md +++ /dev/null @@ -1,10 +0,0 @@ -#Currently implemented ODE solvers - -##ODE45 -* Status? -* Special considerations - - -##ODE23 -* Status -* Special considerations \ No newline at end of file From 31a1ec3b9359ede7b10b00baec273e6240404d6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 27 Jul 2016 19:00:30 +0200 Subject: [PATCH 084/113] Travis docs --- README.md | 2 ++ docs/make.jl | 5 +++++ docs/mkdocs.yml | 7 ++++--- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e3c894341..3af94c65f 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,8 @@ Various basic Ordinary Differential Equation solvers implemented in Julia. [![Coverage Status](https://img.shields.io/coveralls/JuliaODE/ODE.jl.svg)](https://coveralls.io/r/JuliaODE/ODE.jl) [![ODE](http://pkg.julialang.org/badges/ODE_0.4.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.4) [![ODE](http://pkg.julialang.org/badges/ODE_0.5.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.5) +[![](https://img.shields.io/badge/docs-stable-blue.svg)](https://JuliaODE.github.io/ODE.jl/stable) +[![](https://img.shields.io/badge/docs-latest-blue.svg)](https://JuliaODE.github.io/ODE.jl/latest) Pull requests are always highly welcome to fix bugs, add solvers, or anything else! diff --git a/docs/make.jl b/docs/make.jl index 6af21d2b6..4b09c9b45 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,3 +1,8 @@ using Documenter, ODE makedocs() + +deploydocs( + repo = "github.com/JuliaODE/ODE.jl.git", + deps = Deps.pip("pygments", "mkdocs", "mkdocs-material", "python-markdown-math") + ) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 2150fbbd6..81dcbad65 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -2,18 +2,19 @@ site_name: ODE.jl repo_url: https://github.com/JuliaODE/ODE.jl site_description: Julia package for solving differential equations +theme: readthedocs + extra_css: - assets/Documenter.css markdown_extensions: - - codehilite - extra - tables - fenced_code - - admonition + - mdx_math extra_javascript: - - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML + - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML - assets/mathjaxhelper.js docs_dir: 'build' From 8f38391db1d60db896a50ee5309d83a4be1e8e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 28 Jul 2016 11:55:01 +0200 Subject: [PATCH 085/113] Minor fixes and docs --- docs/src/index.md | 2 +- src/base.jl | 2 ++ src/dense.jl | 28 +++++++++++++-------------- src/integrators/ode23s.jl | 38 +++++++++++++++++++------------------ src/options.jl | 2 +- src/top-interface.jl | 40 ++++++++++++++++++++------------------- test/iterators.jl | 26 +++++++++++++++++++++---- 7 files changed, 81 insertions(+), 57 deletions(-) diff --git a/docs/src/index.md b/docs/src/index.md index 99641f21f..4eaa20a7a 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -129,7 +129,7 @@ can be cotrolled with `abstol=eps(T)^(1/2)/10`. - `norm`: The norm used to measure error in the formula above, - defaults to `y->Base.norm(y,Inf)`. You can specify it to assign + defaults to `y->Base.vecnorm(y,Inf)`. You can specify it to assign different weights to different components of `y`. - `minstep`, `maxstep`: Minimal and maximal stepsize for the diff --git a/src/base.jl b/src/base.jl index d989f9949..e6620c7af 100644 --- a/src/base.jl +++ b/src/base.jl @@ -48,6 +48,8 @@ Base.eltype(t::IVP) = eltype(typeof(t)) """ + ODE.ExplicitODE(t0,y0,F!;J!=jacobian)) + Explicit ODE representing the problem `dy = F(t,y)` with `y(t0)=y0` diff --git a/src/dense.jl b/src/dense.jl index 0eb9b7522..6f4d672fa 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -15,21 +15,21 @@ TODO options: """ -immutable DenseOptions{T<:Number} <: Options{T} - tout::Vector{T} # TODO: this should an AbstractVector +immutable DenseOptions{T<:Number,TO<:AbstractVector} <: Options{T} + tout::TO # points ::Symbol # stopevent::S # roottol ::T end @compat function (::Type{DenseOptions{T}}){T}(; - tstop = T(Inf), - tout::Vector = T[tstop], + tstop = T(Inf), + tout::AbstractVector{T} = T[tstop], # points::Symbol= :all, # stopevent::S = (t,y)->false, # roottol = eps(T)^T(1//3), kargs...) - DenseOptions{T}(tout) + DenseOptions{T,typeof(tout)}(tout) end @@ -41,23 +41,23 @@ the results (currently this means at the output times stored in `opts.tout`). """ -immutable DenseOutput{I<:AbstractIntegrator,OP<:DenseOptions} <: AbstractSolver - integ::I # TODO: Maybe this should be relaxed to a AbstractSolver? - # Then we could have a DenseOutput{DenseOutput{RK}}, say! - # pwl: Or DenseOutput{StiffnessSwitching{whatever}} +immutable DenseOutput{I<:AbstractSolver,OP<:DenseOptions} <: AbstractSolver + integ::I opts::OP end # TODO: this is confusing, firs you call `solve` with `DenseOutput{I}` -# and then you call construct it as `DenseOutput{T}`. Also this goes +# and then you call construct it as `DenseOutput{T,OP}`. Also this goes # against the convention that we pass as much as possible as # options. What if a Solver takes more than one parameter? -function solve{I}(ivp::IVP, - ::Type{DenseOutput{I}}; - opts...) - T = eltype(ivp)[1] +function solve{I,T}(ivp::IVP{T}, + ::Type{DenseOutput{I}}; + opts...) # create integrator integ = I{T}(; opts...) + # TODO: a nasty workaround: this triggers an error if the method + # is not registered supported, we ignore the output + solver = solve(ivp,I;opts...) # create dense solver dense_opts = DenseOptions{T}(; opts...) dense_solver = DenseOutput(integ, dense_opts) diff --git a/src/integrators/ode23s.jl b/src/integrators/ode23s.jl index 5dfc85bf7..d2d96b7a7 100644 --- a/src/integrators/ode23s.jl +++ b/src/integrators/ode23s.jl @@ -22,8 +22,10 @@ isadaptive(::ModifiedRosenbrockIntegrator) = true tdir(ode::ExplicitODE, integ::ModifiedRosenbrockIntegrator) = sign(integ.opts.tstop - ode.t0) # define the set of ODE problems with which this integrator can work -solve{T,I<:ModifiedRosenbrockIntegrator}(ode::ExplicitODE{T}, integ::Type{I}; opts...) = - Problem(ode, integ{T}(;opts...)) +solve{T,Y<:AbstractVector,I<:ModifiedRosenbrockIntegrator}(ode::ExplicitODE{T,Y}, + integ::Type{I}; + opts...) = + Problem(ode, integ{T}(;opts...)) """ The state for the Rosenbrock integrator @@ -34,17 +36,17 @@ The state for the Rosenbrock integrator - iters: Number of successful steps made """ -type RosenbrockState{T,Y} <: AbstractState - step ::Step{T,Vector{Y}} +type RosenbrockState{T,Y,J<:AbstractMatrix} <: AbstractState + step ::Step{T,Y} dt ::T - F1 ::Vector{Y} - F2 ::Vector{Y} - k1 ::Vector{Y} - k2 ::Vector{Y} - k3 ::Vector{Y} - ynew ::Vector{Y} + F1 ::Y + F2 ::Y + k1 ::Y + k2 ::Y + k3 ::Y + ynew ::Y dtold::T - J ::Matrix{Y} + jac ::J iters::Int end @@ -55,7 +57,7 @@ function show(io::IO, state::RosenbrockState) println("dt =$(state.dt)") println("F1 =$(state.F1)") println("F2 =$(state.F2)") - println("J =$(state.J)") + println("jac =$(state.jac)") end @@ -66,7 +68,7 @@ function init{T}(ode::ExplicitODE{T}, y = ode.y0 dy = zero(y) - J = Array(eltype(y),length(y),length(y)) + jac = Array(eltype(y),length(y),length(y)) step = Step(t,copy(y),copy(dy)) state = RosenbrockState(step, @@ -78,12 +80,12 @@ function init{T}(ode::ExplicitODE{T}, zero(y), # k3 zero(y), # ynew dt*0, # dtnew - J, # J + jac, # jac 0) # iters # initialize the derivative and the Jacobian ode.F!(t,y,step.dy) - ode.J!(t,y,state.J) + ode.J!(t,y,state.jac) return state end @@ -95,7 +97,7 @@ function trialstep!(ode::ExplicitODE, # unpack step = state.step opts = integ.opts - F1, F2, J = state.F1, state.F2, state.J + F1, F2, jac = state.F1, state.F2, state.jac k1,k2,k3,ynew = state.k1, state.k2, state.k3, state.ynew t, dt, y, dy = step.t, state.dt, step.y, step.dy F! = ode.F! @@ -116,7 +118,7 @@ function trialstep!(ode::ExplicitODE, return abort end - W = lufact!( eye(J) - dt*integ.const_d*J ) + W = lufact!( eye(jac) - dt*integ.const_d*jac ) # Approximate time-derivative of F, we are using F1 as a # temporary array @@ -174,7 +176,7 @@ function accept!(ode::ExplicitODE, step.t = step.t+state.dtold copy!(step.y, state.ynew) copy!(step.dy, state.F2) - ode.J!(step.t,step.y,state.J) + ode.J!(step.t,step.y,state.jac) return cont end diff --git a/src/options.jl b/src/options.jl index 346f79a6d..4bf6c442f 100644 --- a/src/options.jl +++ b/src/options.jl @@ -38,7 +38,7 @@ end minstep = 10*eps(T), maxstep = 1/minstep, initstep = eps(T)^T(1//3), - norm::N = y->Base.norm(y,Inf), + norm::N = y->vecnorm(y,Inf), maxiters = T(Inf), isoutofdomain::O = Base.isnan, kargs...) diff --git a/src/top-interface.jl b/src/top-interface.jl index 127bc08b5..3e9e85ef4 100644 --- a/src/top-interface.jl +++ b/src/top-interface.jl @@ -4,23 +4,23 @@ We assume that the initial data y0 is given at tspan[1], and that tspan[end] is the last integration time. """ -function ode{T,Y,I<:AbstractIntegrator}(F, y0::Y, - tout::AbstractVector{T}; - integ::Type{I} = RKIntegratorAdaptive{:rk45}, - points = :all, - kargs...) +function ode{T,Y,M<:AbstractSolver}(F, y0::Y, + tout::AbstractVector{T}; + solver::Type{M} = RKIntegratorAdaptive{:rk45}, + points = :all, + kargs...) t0 = tout[1] # construct a Problem equation = explicit_ineff(t0,y0,F;kargs...) if points == :all - prob = solve(equation, integ; + prob = solve(equation, solver; tout = tout, kargs...) elseif points == :specified prob = solve(equation, - DenseOutput{integ}; + DenseOutput{solver}; tout = tout, kargs...) else @@ -46,17 +46,17 @@ end Solves an ODE `y'=F(t,y)` with initial conditions `y0` and `t0`. """ -ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = ModifiedRosenbrockIntegrator, kargs...) -ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:feuler}, kargs...) -ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:midpoint}, kargs...) -ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:heun}, kargs...) -ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorFixed{:rk4}, kargs...) -ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:rk21}, kargs...) -ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:rk23}, kargs...) -ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:rk45}, kargs...) -ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:dopri5}, kargs...) +ode23s(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = ModifiedRosenbrockIntegrator, kargs...) +ode1(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorFixed{:feuler}, kargs...) +ode2_midpoint(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorFixed{:midpoint}, kargs...) +ode2_heun(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorFixed{:heun}, kargs...) +ode4(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorFixed{:rk4}, kargs...) +ode21(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorAdaptive{:rk21}, kargs...) +ode23(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorAdaptive{:rk23}, kargs...) +ode45_fe(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorAdaptive{:rk45}, kargs...) +ode45_dp(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorAdaptive{:dopri5}, kargs...) const ode45 = ode45_dp -ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0;integ = RKIntegratorAdaptive{:feh78}, kargs...) +ode78(F,y0,t0;kargs...) = ode_conv(F,y0,t0;solver = RKIntegratorAdaptive{:feh78}, kargs...) function ode_conv{Ty,T}(F,y0::Ty,t0::AbstractVector{T};kargs...) @@ -84,7 +84,7 @@ ExplicitODE. As the name suggests, the result is not going to be very efficient. """ -function explicit_ineff{T,Y}(t0::T, y0::AbstractVector{Y}, F::Function; kargs...) +function explicit_ineff{T,Y}(t0::T, y0::AbstractArray{Y}, F::Function; kargs...) F!(t,y,dy) =copy!(dy,F(t,y)) return ExplicitODE(t0,y0,F!; kargs...) end @@ -97,5 +97,7 @@ end # conversion is necessary. function explicit_ineff{T,Y}(t0::T, y0::Y, F::Function; kargs...) F!(t,y,dy) =(dy[1]=F(t,y[1])) - return ExplicitODE(t0,[y0],F!; kargs...) + new_y0 = Array(Y,1) + new_y0[1] = y0 + return ExplicitODE(t0,new_y0,F!; kargs...) end diff --git a/test/iterators.jl b/test/iterators.jl index 9b81f50c1..7ee8ee316 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -81,14 +81,14 @@ function test_ode() y0scal = y0[1] # with jacobian tj,yj = ODE.ode(Fscal,y0scal,tout, - integ = stepper, + solver = stepper, points = points, initstep = h0, J! = jac!) @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol # without jacobian t,y = ODE.ode(Fscal,y0scal,tout, - integ = stepper, + solver = stepper, points = points, initstep = h0) @test_approx_eq_eps y map(x->sol(x)[1],tj) tol @@ -107,20 +107,38 @@ function test_ode() # ODE.odeXX vector interface # with jacobian tj,yj = ODE.ode(F,y0,tout, - integ = stepper, + solver = stepper, points = points, initstep = h0, J! = jac!) @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol # without jacobian t,y = ODE.ode(F,y0,tout, - integ = stepper, + solver = stepper, points = points, initstep = h0) @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol @test_approx_eq hcat(yj...) hcat(y...) + # TODO: tests for `y::AbstractArray` + # # ODE.odeXX array interface for arrays + # # with jacobian + # tj,yj = ODE.ode(F,reshape(y0,length(y0),1,1),tout, + # solver = stepper, + # points = points, + # initstep = h0, + # J! = jac!) + # @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol + # # without jacobian + # t,y = ODE.ode(F,reshape(y0,length(y0),1,1),tout, + # solver = stepper, + # points = points, + # initstep = h0) + # @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol + + # @test_approx_eq hcat(yj...) hcat(y...) + if points == :specified # test if we covered the whole timespan @test length(tout) == length(t) == length(tj) From 588d2e52fefee191ba7e87caf1394d2c70c2e3b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 28 Jul 2016 14:27:29 +0200 Subject: [PATCH 086/113] Got rid of the specialized `solve` --- src/base.jl | 9 +++++++-- src/dense.jl | 9 ++++----- src/integrators/ode23s.jl | 8 +------- src/integrators/runge-kutta.jl | 5 +---- 4 files changed, 13 insertions(+), 18 deletions(-) diff --git a/src/base.jl b/src/base.jl index e6620c7af..81d9eddba 100644 --- a/src/base.jl +++ b/src/base.jl @@ -99,6 +99,10 @@ Subtypes include: `AbstractIntegrator`s but also `DenseOutput` """ abstract AbstractSolver +@compat (::Type{S}){S<:AbstractSolver}(ivp;opts...) = + error("The solver $S doesn't support IVP of form $(typeof(ivp))") + + """ The abstract type of the actual algorithm to solve an IVP. @@ -208,8 +212,9 @@ Output: - `::Problem` """ -solve(ivp::IVP, solver; opts...) = - error("The solver $(typeof(solver)) doesn't support IVP of form $(typeof(ivp))") +function solve(ivp::IVP, solver; opts...) + Problem(ivp,solver(ivp;opts...)) +end function solve{S<:AbstractSolver}(ivp::IVP; solver::Type{S} = RKIntegratorAdaptive{:rk45}, diff --git a/src/dense.jl b/src/dense.jl index 6f4d672fa..034757dde 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -50,18 +50,17 @@ end # and then you call construct it as `DenseOutput{T,OP}`. Also this goes # against the convention that we pass as much as possible as # options. What if a Solver takes more than one parameter? -function solve{I,T}(ivp::IVP{T}, - ::Type{DenseOutput{I}}; - opts...) +@compat function (::Type{DenseOutput{I}}){T,I}(ivp::IVP{T}; + opts...) # create integrator - integ = I{T}(; opts...) + integ = I(ivp; opts...) # TODO: a nasty workaround: this triggers an error if the method # is not registered supported, we ignore the output solver = solve(ivp,I;opts...) # create dense solver dense_opts = DenseOptions{T}(; opts...) dense_solver = DenseOutput(integ, dense_opts) - return Problem(ivp, dense_solver) + return dense_solver end """ diff --git a/src/integrators/ode23s.jl b/src/integrators/ode23s.jl index d2d96b7a7..ee2f9fe75 100644 --- a/src/integrators/ode23s.jl +++ b/src/integrators/ode23s.jl @@ -9,7 +9,7 @@ immutable ModifiedRosenbrockIntegrator{T<:Number} <: AbstractIntegrator const_e::T end -@compat function (::Type{ModifiedRosenbrockIntegrator{T}}){T}(;opts...) +function ModifiedRosenbrockIntegrator{T}(ode::ExplicitODE{T};opts...) const_d = 1/(2+sqrt(T(2))) const_e = 6+sqrt(T(2)) @@ -21,12 +21,6 @@ name(::ModifiedRosenbrockIntegrator) = "Modified Rosenbrock Integrator" isadaptive(::ModifiedRosenbrockIntegrator) = true tdir(ode::ExplicitODE, integ::ModifiedRosenbrockIntegrator) = sign(integ.opts.tstop - ode.t0) -# define the set of ODE problems with which this integrator can work -solve{T,Y<:AbstractVector,I<:ModifiedRosenbrockIntegrator}(ode::ExplicitODE{T,Y}, - integ::Type{I}; - opts...) = - Problem(ode, integ{T}(;opts...)) - """ The state for the Rosenbrock integrator diff --git a/src/integrators/runge-kutta.jl b/src/integrators/runge-kutta.jl index b0548eba2..1fa29e7e7 100644 --- a/src/integrators/runge-kutta.jl +++ b/src/integrators/runge-kutta.jl @@ -56,7 +56,7 @@ typealias RKIntegratorFixed RKIntegrator{:fixed} typealias RKIntegratorAdaptive RKIntegrator{:adaptive} -@compat function (::Type{RKIntegrator{Kind,Name,T}}){Kind,Name,T}(;opts...) +@compat function (::Type{RKIntegrator{Kind,Name}}){Kind,Name,T}(ode::ExplicitODE{T};opts...) tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) if Kind == :fixed opts = FixedOptions{T}(;opts...) @@ -79,9 +79,6 @@ name(integ::RKIntegrator) = integ.tableau.name tdir(ode::ExplicitODE, integ::RKIntegrator) = sign(integ.opts.tstop - ode.t0) -solve{T,I<:RKIntegrator}(ode::ExplicitODE{T}, integ::Type{I}; opts...) = - Problem(ode,integ{T}(;opts...)) - # lower level interface # explicit RK integrator From 65acc9456c8e19c46436e17b309c99b7ab3c2c99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 31 Jul 2016 10:53:34 +0200 Subject: [PATCH 087/113] New example of a custom integrator --- examples/custom_integrator.jl | 109 ++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 examples/custom_integrator.jl diff --git a/examples/custom_integrator.jl b/examples/custom_integrator.jl new file mode 100644 index 000000000..c4f087abb --- /dev/null +++ b/examples/custom_integrator.jl @@ -0,0 +1,109 @@ +""" + +Here we demonstrate how to implement an integrator so that it is +compatible with `ODE.jl`. The implementation can be contained in an +external package, but if the API conforms to our backend one could use +this integrator to solve an explicit ODE with `solve(...)`. + +""" + +module MyIntegrator + +using ODE +# We have to define these methods on our integrator +import ODE: init, onestep! +import ODE: AbstractIntegrator, AbstractState, ExplicitODE, Step + +# The options are stored in the integrator type. Integrator is +# immutable: we can't change the options once we start the +# integration. +immutable EulerIntegrator{T} <: AbstractIntegrator{T} + tstop::T + initstep::T +end + +# This is necessary for the `solve` to construct the iterator. Every +# iterator has to have a constructor of the form +# `Iterator(::IVP;opts...)`, here we implement an interator that only +# works with explicit differential equations, hence we restrict the +# constructor to `ExplicitODE`. If someone tries to use our iterator +# to solve an equation of unsupported type `solve` will throw an error +# (there is a fallback constructor for `AbstractIntegrator` that +# throws an error). +function EulerIntegrator{T}(ode::ExplicitODE{T}; + tstop = T(Inf), + initstep = T(1//10), + opts...) + EulerIntegrator(tstop,initstep) +end + +# The state of the integrator, it stores the current values for time +# `t` and the solution `y` itself, along with its derivative, +# `dy`. For convenience we used the already available type to store +# this kind of information, `Step`, that has three fields: `t`, `y` +# and `dy`. See the end of the module for an alternative definition +# of a state. +type EulerState{T,Y} <: AbstractState{T,Y} + step::ODE.Step{T,Y} +end + +# Generates the state given an ODE and an integrator, this method has +# to be specialized for `EulerIntegrator`, we don't have to specialize +# it for `ExplicitODE` as the type of an ODE is already filtered by +# the specialized the construtor, but we do it here for clarity. +function init(ode::ExplicitODE, integ::EulerIntegrator) + t0, y0 = ode.t0, ode.y0 + dy0 = similar(ode.dy0) + ode.F!(t0,y0,dy0) # fill in the values of the derivative + EulerState(ODE.Step(t0,y0,dy0)) +end + +function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) + # as mentioned before, this function unpacks the variables + # `t,y,dy` from the current state. It is not necessary, you could + # access the fields directly but we use it here for convenience. + t, y, dy = ODE.output(state) + + # the only stop condition our solver has + if t >= integ.tstop + # this flag finalizes the iterator + return ODE.finish + else + # trim the stepsize to match the `tstop`, prevents + # overshooting + dt = min(integ.initstep,integ.tstop-t) + + # update the time, + state.step.t += dt + # the function (this is the `dy` from the previous step) + state.step.y .+= dt*dy + # and its derivative + ode.F!(t,y,dy) + # return a flag to continue the integration + return ODE.cont + end +end + +# Another possiblity to implement state would be to declare +type EulerState2{T,Y} <: ODE.AbstractState{T,Y} + t::T + y::Y + dy::Y +end +# but then we have to define the method `output` +output(state::EulerState2) = (state.t, state.y, state.dy) +# normally `output` falls back to `output(state)=output(state.step)` +# with `output(step)=step.t,step.y,step.dy`. + +end + +# Usage example +using ODE +import MyIntegrator: EulerIntegrator + +# declare the ODE as usual +ode =ODE.ExplicitODE(0.0,[1.0],(t,y,dy)->copy!(dy,y)) +# solve the `ode` with our integrator, note that we can pass options to `solve` +sol =ODE.solve(ode,EulerIntegrator;tstop=1.0,initstep=0.001) +# print the last step of the solution +collect(sol)[end] From 280ba4765b7237170f22ad331a6925f6f3a303a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 31 Jul 2016 11:54:14 +0200 Subject: [PATCH 088/113] Minor fixes --- src/base.jl | 52 +++++++++++++++++++++++++++++----- src/dense.jl | 20 +++---------- src/integrators/runge-kutta.jl | 27 +++++++++++++++++- 3 files changed, 75 insertions(+), 24 deletions(-) diff --git a/src/base.jl b/src/base.jl index 81d9eddba..99aa2293d 100644 --- a/src/base.jl +++ b/src/base.jl @@ -99,6 +99,8 @@ Subtypes include: `AbstractIntegrator`s but also `DenseOutput` """ abstract AbstractSolver +legnth(s::AbstractSolver) = error("`length` is not defined for $(typeof(s)).") + @compat (::Type{S}){S<:AbstractSolver}(ivp;opts...) = error("The solver $S doesn't support IVP of form $(typeof(ivp))") @@ -172,6 +174,8 @@ immutable Problem{O<:AbstractIVP,S<:AbstractSolver} solver ::S end +Base.length(prob::Problem) = length(prob.solver) + Base.eltype{O}(::Type{Problem{O}}) = eltype(O) Base.eltype{O}(::Problem{O}) = eltype(O) @@ -183,16 +187,14 @@ Solve creates an iterable `Problem` instance from an `IVP` instance (specifying the math) and from a `Type{AbstractSolver}` (the numerical integrator). The simplest use case is -```julia -for (t,y,dy) in solver(...) - # do something with t, y an dy -end -``` + for (t,y,dy) in solver(...) + # do something with t, y an dy + end If the integration interval, defined by the keyword argument `tstop`, is finite you can request all the results at once by calling -``` -collect(solver(...)) # => Vector{Tuple{T,Y,Y}} + + collect(solver(...)) # => Vector{Tuple{T,Y,Y}} Notes: @@ -234,6 +236,39 @@ function collect(prob::Problem) end +""" + + collect_vectors(prob::Problem) + +Input: + +- iterator constructed by `solve` + +Output: + +- `(tout,yout,dyout)` with `tout::Array{T}` containing subsequent + times, `yout::Vector{Y}` and `dyout::Vector{Y}` containig the vector + of solution and derivative respectively at corresponding `tout` + times. In other words `yout[i]` approximates `y(tout[i])` where `y` + is the true solution to an ODE. It could be interpreted as a + transpose of "`collect(prob)`". + +""" +function collect_vectors(prob::Problem) + T,Y = eltype(prob) + tout = Array(T,0) + yout = Array(Y,0) + dyout = Array(Y,0) + for (t,y,dy) in prob + push!(tout,t) + push!(yout,copy(y)) + push!(dyout,copy(dy)) + end + return (tout,yout,dyout) +end + + + # Iteration: take one step on a IVP `Problem` # # Defines: @@ -277,6 +312,7 @@ function Base.next(prob::Problem, st) end """ + Holds the solver status, used inside of `onestep!`. Values: @@ -290,6 +326,7 @@ Statuses can be combined with &: - finish&cont == finish - abort&cont == abort - abort&finish = abort + """ @enum Status cont=1 abort=0 finish=-1 # The values of Statuses are chose to turn & into a *: @@ -324,6 +361,7 @@ Output: - Bool: `false`: continue iteration, `true`: terminate iteration. substeps. + """ function onestep!(ivp::IVP, integ::AbstractIntegrator, state::AbstractState) opt = integ.opts diff --git a/src/dense.jl b/src/dense.jl index 034757dde..b1005f1ab 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -46,17 +46,12 @@ immutable DenseOutput{I<:AbstractSolver,OP<:DenseOptions} <: AbstractSolver opts::OP end -# TODO: this is confusing, firs you call `solve` with `DenseOutput{I}` -# and then you call construct it as `DenseOutput{T,OP}`. Also this goes -# against the convention that we pass as much as possible as -# options. What if a Solver takes more than one parameter? +Base.length(dense::DenseOutput) = length(dense.opts.tout) + @compat function (::Type{DenseOutput{I}}){T,I}(ivp::IVP{T}; opts...) # create integrator integ = I(ivp; opts...) - # TODO: a nasty workaround: this triggers an error if the method - # is not registered supported, we ignore the output - solver = solve(ivp,I;opts...) # create dense solver dense_opts = DenseOptions{T}(; opts...) dense_solver = DenseOutput(integ, dense_opts) @@ -75,8 +70,10 @@ type DenseState{St<:AbstractState,T,Y} <: AbstractState{T,Y} integrator_state::St end + output(ds::DenseState) = output(ds.step_out) + function init(ivp::IVP, solver::DenseOutput) integrator_state = init(ivp, solver.integ) @@ -88,15 +85,6 @@ function init(ivp::IVP, end -""" - -TODO: rename `tout` to `tout` and drop the support for -`points=:all` outside of the `odeXX`? Maybe even -`odeXX(;tout=[...])` would use dense output while `odeXX(;)` -wouldn't. - -""" - function onestep!(ivp::IVP, solver::DenseOutput, dstate::DenseState) diff --git a/src/integrators/runge-kutta.jl b/src/integrators/runge-kutta.jl index 1fa29e7e7..d6f5905aa 100644 --- a/src/integrators/runge-kutta.jl +++ b/src/integrators/runge-kutta.jl @@ -51,11 +51,36 @@ immutable RKIntegrator{Kind,Name,T,OP<:Options} <: AbstractIntegrator{T} opts::OP end - typealias RKIntegratorFixed RKIntegrator{:fixed} typealias RKIntegratorAdaptive RKIntegrator{:adaptive} +""" + +A constructor for an explicit Runge-Kutta method. Works only for +explicit differential equations. + +Notes: + +- `Kind` is either `:adaptive` or `:fixed`, corresponding to adaptive + step size method or a fixed step size method + +- `Name` is the name of a Butcher tableau based on which the method is + constructed. The kind (adaptive or fixed) of the Butcher tableau + has to correspond to `Kind` (`:adaptive` or `:fixed`). +Input: + +- `ode::ExplicitODE` + +- `opts` options for the method, supports the same basic options as + other adaptive steppers (see `AdaptiveOptions` for the complete + list). + +Output: + +- `::RKIntegrator{Kind,Name}` + +""" @compat function (::Type{RKIntegrator{Kind,Name}}){Kind,Name,T}(ode::ExplicitODE{T};opts...) tab = convert(TableauRKExplicit{T},tableaus_rk_explicit[Name]) if Kind == :fixed From 123901414253ff8428c6d47a02528f0c5095f617 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 31 Jul 2016 11:56:43 +0200 Subject: [PATCH 089/113] Added .documenter.enc --- docs/.documenter.enc | Bin 0 -> 1680 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/.documenter.enc diff --git a/docs/.documenter.enc b/docs/.documenter.enc new file mode 100644 index 0000000000000000000000000000000000000000..12d46f201efea1c33f7b8b1f6cd3e2289bba4e49 GIT binary patch literal 1680 zcmV;B25(6e@Cz{KOgLC0 zU7F$d6;r#?NP>ifAD~^&sF+xvB!2tAJFzY?%YaO*p2vs?ch@y|;00_CYOsAA3EdOk zCnl7?OnO^eyexQCBC$ksa(7O2Qj$2bz0Rc|NI4b$!LPJqVJ_wBM z(+@#z98PPiMQ{HI%@wyR3J{?OD()KV2Q32@Vsy+uNcbW7$04rc^$!1W~I5nRT@3az(4-=dBl?IMog$*m%6GVW9<+{A+% z$*I&4+(s?jQQ_yLIBK@xHxi)+3iJ5Fqf7q;>m}M5s(!^r+RZm7SlAh8yI$yK%?jR$ zxL42*ghM;z^i8oGK>c)WN#(=BOQVua$?9h*E+7@Al9^RtPP+zj@<@$Mr~w9&xkwhLshesA>%DET!kq6wSoD7`Bb-EdjhMzWQtyP1s>9>fUZhft|inkG25% z2Yh@!e{P!)D4%^Y4zVBkB!K`fU)kj^7Pu8V(Pm*S+8A_jKqCx6hzc=?TG8qgifuK* zsnHY}9E({HfV8-BnL!-D-Y<)+^>#~pf}ce!w}jOaYCl31;j|WXzS0>Ix4?K|yN~`$ zhPSJ+w)0a3Ya5lKVJzt=55Yz$5{-PG6MF7ulNOQ>dn|!9V88Ake3I5?^syIg;+_qk zdV+aa`b$|1Da`b?Y{7wDM79{KXh>7jI>er1qCOw=fr8jUG&v>a00me7>DhDTMtJTL3@AvA1Vj^=;$x`tF zgZxotCnmejlhYap$oMmdhY`61O5VVW`FZ%T)3Q)N!s1}jP-7X?R^NQxjd?62R%{&g zhpOH?HpkI}Bzv;}Fn6ami4yxF1k%IyVWy3ShN3 za`lx3MiE7I2KL*(XM4*Z=~xA4@P0-&xWmZg1X^Vvs4in3)U)#~P54Zbn$5S}4YmaQ zpp{arilwk#A2IL1D7o-l9M%s^y-?uGVWwdJZ$=(C2#QiRzUTeQbAy5gCtDmXB;itu zyMaOkZn0xTUiDD&yf+u?vo8%PbgiT-YPT6g$}b!BT0WWyMU^tU+kcvu;B~u1)p>N^ zDZv*3wYuzZWE+x%>$&~9vR4Ji31u|wZu`zGCy~oPiRj~ld@lc{LWX}q*KPh6gK0*3 z4&h56v>Q}){Sy?=G+Q+iAD?z-wLM@wZC}a?RSAk$7O`sCors#Bil~|GUV#$du-kv~ ziQw5M6%QatW`l2!Gjej+KB!Us2+8O`Gm#q-Op{pi&1w!5F64JO2RQC7*6HMyj~gwm z$Gi5qwp3gMp%kj4{4L3>{wHslwcMPhAW zMZJtNVe4TFpcwRrjqR9Lp1v@=xcYgj(tp=^STlb-+(T>7!#o$#5ZoWK0< zNNuZrYeS?V!^Iu<42vhO_Hzf Date: Sun, 31 Jul 2016 14:16:51 +0200 Subject: [PATCH 090/113] More fixes, removed `tdir` from dense output --- src/base.jl | 10 +++------- src/dense.jl | 29 +++++++++++++++++++++++++---- 2 files changed, 28 insertions(+), 11 deletions(-) diff --git a/src/base.jl b/src/base.jl index 99aa2293d..4f6ddc7b4 100644 --- a/src/base.jl +++ b/src/base.jl @@ -8,7 +8,7 @@ abstract AbstractIVP{T,Y} -Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) = T,Y,Y +Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) = Tuple{T,Y,Y} """ @@ -42,8 +42,6 @@ type IVP{T,Y,F,G,J} <: AbstractIVP{T,Y} G! ::G J! ::J end -@compat Base.eltype(t::Type{IVP}) = eltype(supertype(t)) -Base.eltype(t::IVP) = eltype(typeof(t)) """ @@ -176,8 +174,7 @@ end Base.length(prob::Problem) = length(prob.solver) -Base.eltype{O}(::Type{Problem{O}}) = eltype(O) -Base.eltype{O}(::Problem{O}) = eltype(O) +Base.eltype{O,S}(::Type{Problem{O,S}}) = eltype(O) """ solve(ivp::IVP, solver::Type{AbstractSolver}, opts...) @@ -227,8 +224,7 @@ end # In Julia 0.5 the collect needs length to be defined, we cannot do # that for a Problem but we can implement our own collect function collect(prob::Problem) - T,Y = eltype(prob) - pairs = Array(Tuple{T,Y,Y},0) + pairs = Array(eltype(prob),0) for (t,y,dy) in prob push!(pairs,(t,copy(y),copy(dy))) end diff --git a/src/dense.jl b/src/dense.jl index b1005f1ab..ad657065a 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -48,12 +48,28 @@ end Base.length(dense::DenseOutput) = length(dense.opts.tout) +tdir(ivp, solver::DenseOutput) = sign(solver.opts.tstop - ivp.t0) + @compat function (::Type{DenseOutput{I}}){T,I}(ivp::IVP{T}; + tstop = T(Inf), + tout::AbstractVector{T} = T[tstop], opts...) + if all(tout.>=ivp.t0) + tout = sort(tout) + elseif all(tout.<=ivp.t0) + tout = reverse(sort(tout)) + else + error("Elements of tout should all be either to the right or to the left of `t0`.") + end + + # normalize `tstop` to the last element of `tout` + tstop = tout[end] + # create integrator - integ = I(ivp; opts...) + integ = I(ivp; tstop=tstop, opts...) + # create dense solver - dense_opts = DenseOptions{T}(; opts...) + dense_opts = DenseOptions{T}(; tout=tout, opts...) dense_solver = DenseOutput(integ, dense_opts) return dense_solver end @@ -133,13 +149,18 @@ step before `tout` and `t2` is `>=tout`. In other words `tout∈[t1,t2]`. """ function next_interval!(ivp, integ, istate, step_prev, tout) - td = tdir(ivp, integ) while true # get the current time t1 = step_prev.t t2,_ = output(istate) - if td*t1 <= td*tout <= td*t2 + # in case we are integrating backwards in time reverse the + # time interval + if t2 < t1 + t1, t2 = t2, t1 + end + + if t1 <= tout <= t2 # we found the enclosing times return cont end From d4ef5a72515b7bf410be9fd5c16f4544b5bb3a60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 31 Jul 2016 14:17:58 +0200 Subject: [PATCH 091/113] Fixes for the custom integrator --- examples/custom_integrator.jl | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/examples/custom_integrator.jl b/examples/custom_integrator.jl index c4f087abb..96ea16dfa 100644 --- a/examples/custom_integrator.jl +++ b/examples/custom_integrator.jl @@ -52,8 +52,8 @@ end # it for `ExplicitODE` as the type of an ODE is already filtered by # the specialized the construtor, but we do it here for clarity. function init(ode::ExplicitODE, integ::EulerIntegrator) - t0, y0 = ode.t0, ode.y0 - dy0 = similar(ode.dy0) + t0, y0 = ode.t0, copy(ode.y0) + dy0 = similar(y0) ode.F!(t0,y0,dy0) # fill in the values of the derivative EulerState(ODE.Step(t0,y0,dy0)) end @@ -84,6 +84,15 @@ function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) end end +# OPTIONAL: +# Define properties of this integrator: order, name and +# whether it is adaptive or not. At this point the information +# supplied here is not used. +order{T}(::Type{EulerIntegrator{T}}) = 1 +name{T}(::Type{EulerIntegrator{T}}) = "My own Euler integrator" +isadaptive{T}(::Type{EulerIntegrator{T}}) = false + +# OPTIONAL: # Another possiblity to implement state would be to declare type EulerState2{T,Y} <: ODE.AbstractState{T,Y} t::T From 3b6a70ea1dd88d8b0ac7ddfb036b5524b7f27fe2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Sun, 31 Jul 2016 14:18:16 +0200 Subject: [PATCH 092/113] Prototype of a test function for custom integrators --- examples/custom_integrator.jl | 3 ++ src/ODE.jl | 3 ++ src/test_solver.jl | 63 +++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 src/test_solver.jl diff --git a/examples/custom_integrator.jl b/examples/custom_integrator.jl index 96ea16dfa..6c1f70c13 100644 --- a/examples/custom_integrator.jl +++ b/examples/custom_integrator.jl @@ -116,3 +116,6 @@ ode =ODE.ExplicitODE(0.0,[1.0],(t,y,dy)->copy!(dy,y)) sol =ODE.solve(ode,EulerIntegrator;tstop=1.0,initstep=0.001) # print the last step of the solution collect(sol)[end] + +# test the integrator +ODE.test_integrator(EulerIntegrator) diff --git a/src/ODE.jl b/src/ODE.jl index 046940908..445a14d2a 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -49,4 +49,7 @@ include("integrators/rosenbrock.jl") # User interface to solvers include("top-interface.jl") +# Test function for the iterator interface +include("test_solver.jl") + end # module ODE diff --git a/src/test_solver.jl b/src/test_solver.jl new file mode 100644 index 000000000..fdc469069 --- /dev/null +++ b/src/test_solver.jl @@ -0,0 +1,63 @@ +import Base.Test: @test, @test_approx_eq_eps + +const testset = [ + Dict(:ivp => ExplicitODE(0.0, + [0.0], + (t,y,dy)->dy[1]=6.0, + J! =(t,y,dy)->dy[1]=0.0), + :sol => t->[6t], + :isscalar => true, + :name => "y'=6", + :options => Dict(:initstep => 0.1, + :tstop => 1.0) + ) + ] + + +function test_integrator{I<:AbstractIntegrator}(integrator::Type{I}; + params = nothing, + eqntypes = [ExplicitODE]) + + const tol = 0.02 + + # filter the tests + tests = filter(x->in(typeof(x[:ivp]),eqntypes),testset) + + for test in testset + println("Testing problem $(test[:name])") + + ivp, opts, sol = test[:ivp], test[:options], test[:sol] + + # 1) test the constructor + @test integrator <: AbstractIntegrator + integ=integrator(ivp;opts...) + @test typeof(integ)<:integrator + + # 2) test if the minimal backend is implemented + state=init(ivp,integ) + @test typeof(state)<:AbstractState + @test onestep!(ivp,integ,state) == cont + # TODO: we should implement outputing the initial data as the + # first step + # @test output(state) == (ode.t0,ode.y0,ode.dy0) + @test typeof(output(state)) == eltype(ivp) + + # 3) test the info methods + if params != nothing + order, name, isadaptive = params + @test ODE.order(integ) == order + @test ODE.name(integ) == name + @test ODE.isadaptive(integ) == isadaptive + end + + # 4) test the iterator interface + # pure integrator + for (t,y,dy) in ODE.solve(ivp,integrator;opts...) + @test_approx_eq_eps y sol(t) tol + end + # with dense output + for (t,y) in ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) + @test_approx_eq_eps y sol(t) tol + end + end +end From 8c1aaf35ab3e011845d3c868fcceecb9633ed6fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Mon, 1 Aug 2016 15:56:55 +0200 Subject: [PATCH 093/113] Added an iterator trait to Problem --- src/base.jl | 15 +++++++++++++++ test/iterators.jl | 12 ++++++++---- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/base.jl b/src/base.jl index 4f6ddc7b4..66be90f22 100644 --- a/src/base.jl +++ b/src/base.jl @@ -176,6 +176,21 @@ Base.length(prob::Problem) = length(prob.solver) Base.eltype{O,S}(::Type{Problem{O,S}}) = eltype(O) +if VERSION >= v"0.5.0-rc0" + """ + Makes some generic operations on iterators work, like + generator comprehensions: + tgen=(t for (t,y) in sol) + tout=collect(tgen) + or + errgen=(y-[exp(t)] for (t,y) in sol) + errout=collect(errgen) + + TODO: doesn't work for 0.4 and might have show issues due to non-copying output + """ + Base.iteratorsize{O,S}(::Type{Problem{O,S}}) = Base.SizeUnknown() +end + """ solve(ivp::IVP, solver::Type{AbstractSolver}, opts...) solve(ivp::IVP; solver=RKIntegratorAdaptive{:rk45}, opts...) diff --git a/test/iterators.jl b/test/iterators.jl index 7ee8ee316..3d3ae373d 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -40,7 +40,7 @@ const testsets = [ :y0 => [1.0,2.0], :tout => [0:.1:1;], :jac => (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]]), - :sol => t->[cos(t)-2*sin(t) 2*cos(t)+sin(t)], + :sol => t->[cos(t)-2*sin(t), 2*cos(t)+sin(t)], :isscalar => false, :name => "pendulum", :initstep => 0.001) @@ -152,16 +152,20 @@ function test_ode() :initstep => h0, :points => points) - solver = ODE.solve(equation,stepper;opts...) - - for (t,y) in solver + iterator = ODE.solve(equation,stepper;opts...) + for (t,y) in iterator @test_approx_eq_eps y sol(t) tol end + dense = ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) for (t,y) in ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) @test_approx_eq_eps y sol(t) tol end + # generator comprehension + @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in iterator))) + @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in dense))) + @test collect((t for (t,y) in dense))==tout end end end From d0816591245087b8ae912e7acc9d9149631be556 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 18 Aug 2016 10:39:44 +0200 Subject: [PATCH 094/113] Minor doc update --- docs/src/man/base.md | 39 ++++++++++++++++++++++++++++++++++++--- src/base.jl | 24 +++++++++++++++++++++++- 2 files changed, 59 insertions(+), 4 deletions(-) diff --git a/docs/src/man/base.md b/docs/src/man/base.md index 9eddc3585..b6a0327ee 100644 --- a/docs/src/man/base.md +++ b/docs/src/man/base.md @@ -1,4 +1,37 @@ -```@autodocs -Modules = [ODE] -Order = [:function, :type] +```@meta +CurrentModule = ODE +``` + +# Base + +The file `base.jl` implements the most basic iterator infrastructure +for solvers and the definitions of the types representing general IVP +(initial value problem) and solvers. + +## Predefined types of initial value problems + +```@docs +AbstractIVP +IVP +ExplicitODE +ImplicitODE +``` +## Solver architecture + +```@docs +AbstractSolver +AbstractIntegrator +AbstractState +``` + +The fallback constructor for `AbstractSolver(ivp::IVP;opts...)` ensures +that an error is thrown if a solver is constructed for an unsupported +type of the given IVP. + +## Fallback functions for solvers + +```@docs +Base.length(::AbstractSolver) +output(::AbstractState) +Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) ``` diff --git a/src/base.jl b/src/base.jl index 66be90f22..780ec4c68 100644 --- a/src/base.jl +++ b/src/base.jl @@ -7,11 +7,21 @@ # - +""" + AbstractIVP{T,Y} + +A progenitor of types representing an IVP (initial value +problem). The type parameters `T` and `Y` correspond to the types of +time and state variable respectively. + +""" abstract AbstractIVP{T,Y} Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) = Tuple{T,Y,Y} """ + IVP{T,Y,F,G,J} <: AbstractIVP{T,Y} + Defines the mathematical part of an IVP (initial value problem) specified in the general form: @@ -46,6 +56,10 @@ end """ + typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} + +Can be constructed by calling + ODE.ExplicitODE(t0,y0,F!;J!=jacobian)) Explicit ODE representing the problem @@ -91,14 +105,22 @@ end """ + AbstractSolver + The supertype of anything which can get you to a solution of a IVP. Subtypes include: `AbstractIntegrator`s but also `DenseOutput` """ abstract AbstractSolver -legnth(s::AbstractSolver) = error("`length` is not defined for $(typeof(s)).") +Base.length(s::AbstractSolver) = error("`length` is not defined for $(typeof(s)).") + +""" + +The fallback generator of an abstract solver, throws an error if the +solver cannot be generated for the given initial value problem type. +""" @compat (::Type{S}){S<:AbstractSolver}(ivp;opts...) = error("The solver $S doesn't support IVP of form $(typeof(ivp))") From 6acd2836556cbdb7e49601e29419683991cf33e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 18 Aug 2016 11:08:27 +0200 Subject: [PATCH 095/113] More minor fixes --- src/base.jl | 9 +++++++-- src/dense.jl | 4 ++-- src/integrators/ode23s.jl | 2 +- src/integrators/runge-kutta.jl | 6 +++--- src/options.jl | 14 +++++++------- 5 files changed, 20 insertions(+), 15 deletions(-) diff --git a/src/base.jl b/src/base.jl index 780ec4c68..2013022c1 100644 --- a/src/base.jl +++ b/src/base.jl @@ -72,12 +72,17 @@ Explicit ODE representing the problem """ typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} +# TODO: +# typealias ExplicitODE{T,Y,F,J} IVP{T,Y,F,Void,J} @compat function (::Type{ExplicitODE}){T,Y}(t0::T, y0::Y, F!::Function; - J!::Function = forward_jacobian!(F!,similar(y0)), + J!::Function = forward_jacobian!(F!,copy(y0)), kargs...) - ExplicitODE{T,Y}(t0,y0,similar(y0),F!,nothing,J!) + # precompute y' + dy0 = copy(y0) + F!(t0,y0,dy0) + ExplicitODE{T,Y}(t0,y0,dy0,F!,nothing,J!) end """ diff --git a/src/dense.jl b/src/dense.jl index ad657065a..1f988c6cd 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -93,10 +93,10 @@ output(ds::DenseState) = output(ds.step_out) function init(ivp::IVP, solver::DenseOutput) integrator_state = init(ivp, solver.integ) - dy0 = similar(ivp.y0) + dy0 = copy(ivp.y0) ivp.F!(ivp.t0,ivp.y0,dy0) step_prev = Step(ivp.t0,copy(ivp.y0),dy0) - step_out = Step(ivp.t0,similar(ivp.y0),similar(ivp.y0)) + step_out = Step(ivp.t0,copy(ivp.y0),copy(ivp.y0)) return DenseState(1,step_prev,step_out,integrator_state) end diff --git a/src/integrators/ode23s.jl b/src/integrators/ode23s.jl index ee2f9fe75..ae2b9de5a 100644 --- a/src/integrators/ode23s.jl +++ b/src/integrators/ode23s.jl @@ -9,7 +9,7 @@ immutable ModifiedRosenbrockIntegrator{T<:Number} <: AbstractIntegrator const_e::T end -function ModifiedRosenbrockIntegrator{T}(ode::ExplicitODE{T};opts...) +function ModifiedRosenbrockIntegrator{T,Y<:AbstractVector}(ode::ExplicitODE{T,Y};opts...) const_d = 1/(2+sqrt(T(2))) const_e = 6+sqrt(T(2)) diff --git a/src/integrators/runge-kutta.jl b/src/integrators/runge-kutta.jl index d6f5905aa..b1551f66e 100644 --- a/src/integrators/runge-kutta.jl +++ b/src/integrators/runge-kutta.jl @@ -20,7 +20,7 @@ immutable TableauRKExplicit{T} <: Tableau{T} @assert istril(a) @assert s==size(a,1)==size(a,2)==size(b,2) @assert size(b,1)==length(order) - @assert norm(sum(a,2)-c'',Inf)vecnorm(y,Inf), + norm::N = y->maxabs(y), maxiters = T(Inf), isoutofdomain::O = Base.isnan, kargs...) - @assert minstep>=0 && maxstep>=0 && initstep>=0 # TODO: move to inner constructor + @assert minstep>=T(0) && maxstep>=T(0) && initstep>=T(0) # TODO: move to inner constructor AdaptiveOptions{T,N,O}(tstop,reltol,abstol,minstep,maxstep,initstep,norm,maxiters,isoutofdomain) end @@ -65,7 +65,7 @@ end @compat function (::Type{FixedOptions{T}}){T}(; tout = T[Inf], tstop = tout[end], - initstep = 10*eps(T), + initstep = T(10)*eps(T), kargs...) @assert initstep>=0 FixedOptions{T}(tstop,initstep) From f65448aba6e3f30c79d91c592759c4719f5bcaf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 18 Aug 2016 11:09:31 +0200 Subject: [PATCH 096/113] New tests module --- examples/custom_integrator.jl | 29 +++++++------- src/ODE.jl | 25 +++++++++++- src/test_solver.jl | 63 ------------------------------ src/tests/integrators.jl | 58 ++++++++++++++++++++++++++++ src/tests/minimal_types.jl | 72 +++++++++++++++++++++++++++++++++++ 5 files changed, 169 insertions(+), 78 deletions(-) delete mode 100644 src/test_solver.jl create mode 100644 src/tests/integrators.jl create mode 100644 src/tests/minimal_types.jl diff --git a/examples/custom_integrator.jl b/examples/custom_integrator.jl index 6c1f70c13..093936045 100644 --- a/examples/custom_integrator.jl +++ b/examples/custom_integrator.jl @@ -1,3 +1,5 @@ +include("../src/ODE.jl") + """ Here we demonstrate how to implement an integrator so that it is @@ -34,7 +36,7 @@ function EulerIntegrator{T}(ode::ExplicitODE{T}; tstop = T(Inf), initstep = T(1//10), opts...) - EulerIntegrator(tstop,initstep) + EulerIntegrator{T}(tstop,initstep) end # The state of the integrator, it stores the current values for time @@ -46,16 +48,14 @@ end type EulerState{T,Y} <: AbstractState{T,Y} step::ODE.Step{T,Y} end +output(state::EulerState) = output(state.step) # Generates the state given an ODE and an integrator, this method has # to be specialized for `EulerIntegrator`, we don't have to specialize # it for `ExplicitODE` as the type of an ODE is already filtered by # the specialized the construtor, but we do it here for clarity. function init(ode::ExplicitODE, integ::EulerIntegrator) - t0, y0 = ode.t0, copy(ode.y0) - dy0 = similar(y0) - ode.F!(t0,y0,dy0) # fill in the values of the derivative - EulerState(ODE.Step(t0,y0,dy0)) + EulerState(ODE.Step(ode.t0,copy(ode.y0),copy(ode.dy0))) end function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) @@ -74,9 +74,9 @@ function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) dt = min(integ.initstep,integ.tstop-t) # update the time, - state.step.t += dt + state.step.t += dt # the function (this is the `dy` from the previous step) - state.step.y .+= dt*dy + state.step.y += dt*dy # and its derivative ode.F!(t,y,dy) # return a flag to continue the integration @@ -101,21 +101,24 @@ type EulerState2{T,Y} <: ODE.AbstractState{T,Y} end # but then we have to define the method `output` output(state::EulerState2) = (state.t, state.y, state.dy) -# normally `output` falls back to `output(state)=output(state.step)` -# with `output(step)=step.t,step.y,step.dy`. end # Usage example using ODE -import MyIntegrator: EulerIntegrator +# import ODETests: test_integrator +using ODETests +using MyIntegrator + +integ = MyIntegrator.EulerIntegrator # declare the ODE as usual ode =ODE.ExplicitODE(0.0,[1.0],(t,y,dy)->copy!(dy,y)) # solve the `ode` with our integrator, note that we can pass options to `solve` -sol =ODE.solve(ode,EulerIntegrator;tstop=1.0,initstep=0.001) -# print the last step of the solution +sol =ODE.solve(ode,integ;tstop=1.0,initstep=0.001) +# # print the last step of the solution collect(sol)[end] # test the integrator -ODE.test_integrator(EulerIntegrator) +ODETests.test_integrator(integ,ODETests.case_vector) +ODETests.test_integrator(integ,ODETests.case_minimal_type) diff --git a/src/ODE.jl b/src/ODE.jl index 445a14d2a..91499de76 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -49,7 +49,28 @@ include("integrators/rosenbrock.jl") # User interface to solvers include("top-interface.jl") +end # module ODE + + +""" + +This module contains simple test functions for solvers/integrators +compatible with ODE.jl. You can use it to test your custom solvers, +for examples of how to use these functions see our tests in `test/` +directory. + +""" +module ODETests + +import Base.Test: @test, @test_approx_eq_eps + +using ODE + +import ODE: AbstractIntegrator, AbstractState, ExplicitODE + +include("tests/minimal_types.jl") + # Test function for the iterator interface -include("test_solver.jl") +include("tests/integrators.jl") -end # module ODE +end diff --git a/src/test_solver.jl b/src/test_solver.jl deleted file mode 100644 index fdc469069..000000000 --- a/src/test_solver.jl +++ /dev/null @@ -1,63 +0,0 @@ -import Base.Test: @test, @test_approx_eq_eps - -const testset = [ - Dict(:ivp => ExplicitODE(0.0, - [0.0], - (t,y,dy)->dy[1]=6.0, - J! =(t,y,dy)->dy[1]=0.0), - :sol => t->[6t], - :isscalar => true, - :name => "y'=6", - :options => Dict(:initstep => 0.1, - :tstop => 1.0) - ) - ] - - -function test_integrator{I<:AbstractIntegrator}(integrator::Type{I}; - params = nothing, - eqntypes = [ExplicitODE]) - - const tol = 0.02 - - # filter the tests - tests = filter(x->in(typeof(x[:ivp]),eqntypes),testset) - - for test in testset - println("Testing problem $(test[:name])") - - ivp, opts, sol = test[:ivp], test[:options], test[:sol] - - # 1) test the constructor - @test integrator <: AbstractIntegrator - integ=integrator(ivp;opts...) - @test typeof(integ)<:integrator - - # 2) test if the minimal backend is implemented - state=init(ivp,integ) - @test typeof(state)<:AbstractState - @test onestep!(ivp,integ,state) == cont - # TODO: we should implement outputing the initial data as the - # first step - # @test output(state) == (ode.t0,ode.y0,ode.dy0) - @test typeof(output(state)) == eltype(ivp) - - # 3) test the info methods - if params != nothing - order, name, isadaptive = params - @test ODE.order(integ) == order - @test ODE.name(integ) == name - @test ODE.isadaptive(integ) == isadaptive - end - - # 4) test the iterator interface - # pure integrator - for (t,y,dy) in ODE.solve(ivp,integrator;opts...) - @test_approx_eq_eps y sol(t) tol - end - # with dense output - for (t,y) in ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) - @test_approx_eq_eps y sol(t) tol - end - end -end diff --git a/src/tests/integrators.jl b/src/tests/integrators.jl new file mode 100644 index 000000000..acf1fdb1d --- /dev/null +++ b/src/tests/integrators.jl @@ -0,0 +1,58 @@ +using Base.Test + +const case_vector = Dict(:ivp => ExplicitODE(0.0, + [0.0], + (t,y,dy)->dy[:]=6.0, + J! =(t,y,dy)->dy[1]=0.0), + :sol => t->[6t], + :name => "y'=6 (vector)", + :options => Dict(:initstep => 0.1, + :tstop => 1.0), + ) + +function test_integrator(integrator,test) + + ivp, sol, name, opts = test[:ivp], test[:sol], test[:name], test[:options] + + T,Y = eltype(ivp).parameters + + tol = 2//10 + + # 1) test the constructor + @test integrator <: AbstractIntegrator + integ=integrator(ivp;opts...) + @test typeof(integ)<:integrator + + # 2) test if the minimal backend is implemented + state=ODE.init(ivp,integ) + @test typeof(state)<:AbstractState + # output after initialization should give the initial data + @test ODE.output(state) == (ivp.t0,ivp.y0,ivp.dy0) + + # we should be able to perform the first step + @test ODE.onestep!(ivp,integ,state) == ODE.cont + # after one step the output should be updated + @test ODE.output(state) != (ivp.t0,ivp.y0,ivp.dy0) + + # 3) test the iterator interface + # pure integrator + for (t,y,dy) in ODE.solve(ivp,integrator;opts...) + @test maxabs(y-sol(t)) < tol + end + # with dense output + for (t,y) in ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) + @test maxabs(y-sol(t)) < tol + # TODO: make this work + # @test_approx_eq_eps y sol(t) tol + end +end + +function aaaa() +# 3) test the backend API + if properties != nothing + order, name, isadaptive = properties + @test ODE.order(integ) == order + @test ODE.name(integ) == name + @test ODE.isadaptive(integ) == isadaptive + end +end diff --git a/src/tests/minimal_types.jl b/src/tests/minimal_types.jl new file mode 100644 index 000000000..996a9c3ae --- /dev/null +++ b/src/tests/minimal_types.jl @@ -0,0 +1,72 @@ +import Base: <, >, >=, <= +import Base: +, -, *, /, ^ +import Base: .+, .- +import Base: copy, zero, getindex, setindex!, similar +import Base: eps, convert, promote_rule + + +# position variable +type Position{T} <: AbstractVector{T} + x::T + y::T +end + +copy(p::Position) = Position(p.x,p.y) +similar{T}(p::Position{T}) = copy(p) + +for op in (:+, :-, :.+) + @eval ($op)(p1::Position,p2::Position) = Position(($op)(p1.x,p2.x), + ($op)(p1.y,p2.y)) +end + +Base.size(::Position)=(2,) +getindex{T}(p::Position{T},i::Int) = i==1 ? p.x : p.y +setindex!{T}(p::Position{T},val::T,i::Int) = i==1 ? p.x=val : p.y=val + +# MyFloat variable +immutable MyFloat{T} <: Real + t::T +end + +# we need these to construct MyFloat from constants, constants are +# predefined in terms of numbers of inifinite precision such as Int or +# Rational +convert{T}(::Type{MyFloat{T}},s::Rational) = MyFloat{T}(convert(T,s)) +convert{T}(::Type{MyFloat{T}},s::Integer) = MyFloat{T}(convert(T,s)) +promote_rule{T<:MyFloat,R<:Rational}(::Type{T},::Type{R}) = T +promote_rule{T<:MyFloat,R<:Integer}(::Type{T},::Type{R}) = T + +eps{T}(::Type{MyFloat{T}}) = MyFloat{T}(eps(T)) + +# binary operators +for op in (:+, :-, :*, :/, :^) + @eval ($op)(t1::MyFloat,t2::MyFloat) = MyFloat(($op)(t1.t,t2.t)) +end + +# unary operators +for op in (:-,) + @eval ($op)(t::MyFloat) = MyFloat(($op)(t.t)) +end + +# comparisons +for op in (:<, :>, :>=, :<=) + @eval ($op)(t1::MyFloat,t2::MyFloat) = ($op)(t1.t,t2.t) +end + +# vector times scalar multiplication +*(t::MyFloat,p::Position)=Position(t*p.x,t*p.y) +*(p::Position,t::MyFloat)= *(t,p) + + +const case_minimal_type = + Dict(:ivp => ExplicitODE(MyFloat(0.0), + Position(MyFloat(0.0),MyFloat(1.0)), + (t,y,dy)->(dy.x=y.y;dy.y=-y.x)), + :sol => t->Position(sin(t),cos(t)), + :name => "y'=6 (minimal types)", + :options => Dict(:initstep => MyFloat(0.1), + :tstop => MyFloat(1.0)) + ) + +Base.sin{T}(t::MyFloat{T})=MyFloat{T}(sin(t.t)) +Base.cos{T}(t::MyFloat{T})=MyFloat{T}(cos(t.t)) From 79fc44d359cda9ba276328760eb25e7589095ef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 18 Aug 2016 19:19:38 +0200 Subject: [PATCH 097/113] New test suite, including custom types --- src/ODE.jl | 1 + src/integrators/ode23s.jl | 4 +- src/options.jl | 2 +- src/tests/integrators.jl | 39 +++--- src/tests/minimal_types.jl | 45 +++--- src/tests/test_cases.jl | 88 ++++++++++++ test/iterators.jl | 271 ++++++++++++++++--------------------- test/runtests.jl | 113 +--------------- test/top-interface.jl | 108 +++++++++++++++ 9 files changed, 363 insertions(+), 308 deletions(-) create mode 100644 src/tests/test_cases.jl create mode 100644 test/top-interface.jl diff --git a/src/ODE.jl b/src/ODE.jl index 91499de76..8191a3254 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -69,6 +69,7 @@ using ODE import ODE: AbstractIntegrator, AbstractState, ExplicitODE include("tests/minimal_types.jl") +include("tests/test_cases.jl") # Test function for the iterator interface include("tests/integrators.jl") diff --git a/src/integrators/ode23s.jl b/src/integrators/ode23s.jl index ae2b9de5a..35414e276 100644 --- a/src/integrators/ode23s.jl +++ b/src/integrators/ode23s.jl @@ -73,7 +73,7 @@ function init{T}(ode::ExplicitODE{T}, zero(y), # k2 zero(y), # k3 zero(y), # ynew - dt*0, # dtnew + zero(dt), # dtnew jac, # jac 0) # iters @@ -152,7 +152,7 @@ function errorcontrol!(ode::ExplicitODE, err = (abs(dt)/6)*(opts.norm(k1 - 2*k2 + k3))/delta # new step-size - dtnew = td*min(opts.maxstep, abs(dt)*0.8*err^(-1/3) ) + dtnew = td*min(opts.maxstep, abs(dt)*(8//10)*err^(-1//3) ) # trim in case newdt > dt dtnew = td*min(abs(dtnew), abs(opts.tstop-(t+dt))) diff --git a/src/options.jl b/src/options.jl index d75806978..bab07f0eb 100644 --- a/src/options.jl +++ b/src/options.jl @@ -63,7 +63,7 @@ immutable FixedOptions{T} <: Options{T} end @compat function (::Type{FixedOptions{T}}){T}(; - tout = T[Inf], + tout = [T(Inf)], tstop = tout[end], initstep = T(10)*eps(T), kargs...) diff --git a/src/tests/integrators.jl b/src/tests/integrators.jl index acf1fdb1d..be20b7d31 100644 --- a/src/tests/integrators.jl +++ b/src/tests/integrators.jl @@ -1,22 +1,15 @@ using Base.Test -const case_vector = Dict(:ivp => ExplicitODE(0.0, - [0.0], - (t,y,dy)->dy[:]=6.0, - J! =(t,y,dy)->dy[1]=0.0), - :sol => t->[6t], - :name => "y'=6 (vector)", - :options => Dict(:initstep => 0.1, - :tstop => 1.0), - ) - function test_integrator(integrator,test) ivp, sol, name, opts = test[:ivp], test[:sol], test[:name], test[:options] + println("Integrator $integrator)") + print(" Test case $name ") + T,Y = eltype(ivp).parameters - tol = 2//10 + tol = 1//500 # 1) test the constructor @test integrator <: AbstractIntegrator @@ -36,15 +29,29 @@ function test_integrator(integrator,test) # 3) test the iterator interface # pure integrator - for (t,y,dy) in ODE.solve(ivp,integrator;opts...) - @test maxabs(y-sol(t)) < tol + iterator = ODE.solve(ivp,integrator; opts...) + dense = ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) + niters=0 + for (t,y,dy) in iterator + niters+=1 + @test maxabs(y-sol(t)) < niters*tol + # TODO: replace with + # @test_approx_eq_eps y sol(t) tol end + # with dense output - for (t,y) in ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) + for (t,y,dy) in dense @test maxabs(y-sol(t)) < tol - # TODO: make this work - # @test_approx_eq_eps y sol(t) tol end + + # generator comprehension + @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in iterator))) + @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in dense))) + + tout = opts[:tout] + @test collect((t for (t,y) in dense))==tout + + println("OK!") end function aaaa() diff --git a/src/tests/minimal_types.jl b/src/tests/minimal_types.jl index 996a9c3ae..e57d50a8c 100644 --- a/src/tests/minimal_types.jl +++ b/src/tests/minimal_types.jl @@ -1,8 +1,9 @@ import Base: <, >, >=, <= import Base: +, -, *, /, ^ -import Base: .+, .- -import Base: copy, zero, getindex, setindex!, similar -import Base: eps, convert, promote_rule +# for the vector type +import Base: getindex, setindex!, similar +# for the scalar type +import Base: eps, convert, promote_rule, sqrt # position variable @@ -11,10 +12,9 @@ type Position{T} <: AbstractVector{T} y::T end -copy(p::Position) = Position(p.x,p.y) -similar{T}(p::Position{T}) = copy(p) +similar{T}(p::Position{T},::Type{T}) = Position(p.x,p.y) -for op in (:+, :-, :.+) +for op in (:+, :-) @eval ($op)(p1::Position,p2::Position) = Position(($op)(p1.x,p2.x), ($op)(p1.y,p2.y)) end @@ -23,7 +23,12 @@ Base.size(::Position)=(2,) getindex{T}(p::Position{T},i::Int) = i==1 ? p.x : p.y setindex!{T}(p::Position{T},val::T,i::Int) = i==1 ? p.x=val : p.y=val -# MyFloat variable +# MyFloat variable. It can be constructed from any type but cannot be +# converted, so operations between MyFloat{Float64} and Float64 should +# throw a conversion error. This is to detect operations mixing high +# precision floats (like BigFloat) with lower precision constants, +# which could result in decreasing the overall precision of the +# algorithm. immutable MyFloat{T} <: Real t::T end @@ -37,12 +42,17 @@ promote_rule{T<:MyFloat,R<:Rational}(::Type{T},::Type{R}) = T promote_rule{T<:MyFloat,R<:Integer}(::Type{T},::Type{R}) = T eps{T}(::Type{MyFloat{T}}) = MyFloat{T}(eps(T)) +# necessary for the modified Rosenbrock integrator +sqrt{T}(t::MyFloat{T})=MyFloat{T}(sqrt(t.t)) # binary operators for op in (:+, :-, :*, :/, :^) - @eval ($op)(t1::MyFloat,t2::MyFloat) = MyFloat(($op)(t1.t,t2.t)) + @eval ($op){T}(t1::MyFloat{T},t2::MyFloat{T}) = MyFloat{T}(($op)(t1.t,t2.t)) end +# See #18114 +^{T<:MyFloat}(x::T, y::Rational) = x^(convert(T,y.num)/convert(T,y.den)) + # unary operators for op in (:-,) @eval ($op)(t::MyFloat) = MyFloat(($op)(t.t)) @@ -50,23 +60,10 @@ end # comparisons for op in (:<, :>, :>=, :<=) - @eval ($op)(t1::MyFloat,t2::MyFloat) = ($op)(t1.t,t2.t) + @eval ($op){T}(t1::MyFloat{T},t2::MyFloat{T}) = ($op)(t1.t,t2.t) end -# vector times scalar multiplication -*(t::MyFloat,p::Position)=Position(t*p.x,t*p.y) -*(p::Position,t::MyFloat)= *(t,p) - - -const case_minimal_type = - Dict(:ivp => ExplicitODE(MyFloat(0.0), - Position(MyFloat(0.0),MyFloat(1.0)), - (t,y,dy)->(dy.x=y.y;dy.y=-y.x)), - :sol => t->Position(sin(t),cos(t)), - :name => "y'=6 (minimal types)", - :options => Dict(:initstep => MyFloat(0.1), - :tstop => MyFloat(1.0)) - ) - +# these are only necessary because they are used in the definition of +# the ODE test case (see test_cases.jl, :harmonic_minimal_types) Base.sin{T}(t::MyFloat{T})=MyFloat{T}(sin(t.t)) Base.cos{T}(t::MyFloat{T})=MyFloat{T}(cos(t.t)) diff --git a/src/tests/test_cases.jl b/src/tests/test_cases.jl new file mode 100644 index 000000000..600c2bd0c --- /dev/null +++ b/src/tests/test_cases.jl @@ -0,0 +1,88 @@ +# some standard test cases +const test_cases = + Dict(:constant_in_time=> + Dict(:ivp => ExplicitODE(0.0,[0.0], + (t,y,dy)->dy[:]=6.0, + J! = (t,y,dy)->dy[1]=0.0), + :sol => t->[6t], + :name => "y'=6 (vector)", + :options => Dict(:initstep => 0.1, + :tstop => 1.0, + :tout => [0.0,0.1,1.0]) + ), + + :variable_in_time=> + Dict(:ivp => ExplicitODE(0.0,[0.0], + (t,y,dy)->dy[1]=2t, + J! = (t,y,dy)->dy[1]=0.0), + :sol => t->[t^2], + :name => "y'=2t", + :options=> Dict(:tout => [0:0.001:1;], + :initstep => 0.001) + ), + + :linear=> + Dict(:ivp => ExplicitODE(0.0,[1.0], + (t,y,dy)->dy[1]=y[1], + J! = (t,y,dy)->dy[1]=1.0), + :sol => t->[exp(t)], + :name => "y'=y", + :options=> Dict(:tout => [0:0.001:1;], + :initstep => 0.001) + ), + + :backward_in_time=> + Dict(:ivp => ExplicitODE(1.0,[1.0], + (t,y,dy)->dy[1]=y[1], + J! = (t,y,dy)->dy[1]=1.0), + :sol => t->[exp(t-1)], + :name => "y'=y backwards", + :options=> Dict(:tout => [1:-0.001:0;], + :initstep => 0.001) + ), + + :harmonic=> + Dict(:ivp => ExplicitODE(0.0,[1.0,2.0], + (t,y,dy)->(dy[1]=-y[2];dy[2]=y[1]), + J! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])), + :sol => t->[cos(t)-2*sin(t), 2*cos(t)+sin(t)], + :name => "harmonic (with Jacobian)", + :options=> Dict(:tout => [0:.1:1;], + :tstop => 1.0, + :initstep => 0.001) + ), + + :harmonic_no_jac=> + Dict(:ivp => ExplicitODE(0.0,[1.0,2.0], + (t,y,dy)->(dy[1]=-y[2];dy[2]=y[1])), + :sol => t->[cos(t)-2*sin(t), 2*cos(t)+sin(t)], + :name => "harmonic (no Jacobian)", + :options=> Dict(:tout => [0:.1:1;], + :tstop => 1.0, + :initstep => 0.001) + ), + + :harmonic_minimal_types=> + Dict(:ivp => ExplicitODE(MyFloat(0.0), + Position(MyFloat(0.0),MyFloat(1.0)), + (t,y,dy)->(dy.x=y.y;dy.y=-y.x), + J! = (t,y,J)->(J[:]=0;J[2,1]=1;J[1,2]=-1)), + :sol => t->Position(sin(t),cos(t)), + :name => "harmonic (minimal types)", + :options => Dict(:initstep => MyFloat(0.001), + :tstop => MyFloat(1.0), + :tout => [MyFloat(0.0),MyFloat(0.1),MyFloat(1.0)]) + ), + + # :harmonic_minimal_types_no_jac=> + # Dict(:ivp => ExplicitODE(MyFloat(0.0), + # Position(MyFloat(0.0),MyFloat(1.0)), + # (t,y,dy)->(dy.x=y.y;dy.y=-y.x)), + # :sol => t->Position(sin(t),cos(t)), + # :name => "harmonic (minimal types, no Jacobian)", + # :options => Dict(:initstep => MyFloat(0.1), + # :tstop => MyFloat(1.0), + # :tout => [MyFloat(0.0),MyFloat(0.1),MyFloat(1.0)]) + # ) + + ) diff --git a/test/iterators.jl b/test/iterators.jl index 3d3ae373d..810cedcb5 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -1,52 +1,3 @@ -const testsets = [ - Dict( - :F! => (t,y,dy)->dy[1]=6.0, - :y0 => [0.], - :tout => [0:0.1:1;], - :jac => (t,y,dy)->dy[1]=0.0, - :sol => t->[6t], - :isscalar => true, - :name => "y'=6", - :initstep => 0.1), - Dict( - :F! => (t,y,dy)->dy[1]=2t, - :y0 => [0.], - :tout => [0:0.001:1;], - :jac => (t,y,dy)->dy[1]=0.0, - :sol => t->[t^2], - :isscalar => true, - :name => "y'=2t", - :initstep => 0.001), - Dict( - :F! => (t,y,dy)->dy[1]=y[1], - :y0 => [1.0], - :tout => [0:0.001:1;], - :jac => (t,y,dy)->dy[1]=1.0, - :sol => t->[exp(t)], - :isscalar => true, - :name => "y'=y", - :initstep => 0.001), - Dict( - :F! => (t,y,dy)->dy[1]=y[1], - :y0 => [1.0], - :tout => [1:-0.001:0;], - :jac => (t,y,dy)->dy[1]=1.0, - :sol => t->[exp(t-1)], - :isscalar => true, - :name => "y'=y backwards", - :initstep => 0.001), - Dict( - :F! => (t,y,dy)->(dy[1]=-y[2];dy[2]=y[1]), - :y0 => [1.0,2.0], - :tout => [0:.1:1;], - :jac => (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]]), - :sol => t->[cos(t)-2*sin(t), 2*cos(t)+sin(t)], - :isscalar => false, - :name => "pendulum", - :initstep => 0.001) - ] - - # Testing function ode const integrators = [ODE.RKIntegratorFixed{:feuler}, ODE.RKIntegratorFixed{:midpoint}, @@ -60,115 +11,125 @@ const integrators = [ODE.RKIntegratorFixed{:feuler}, ODE.ModifiedRosenbrockIntegrator ] -function test_ode() - tol = 0.002 +using ODETests +function test_integrators() for integ in integrators - println("Testing $integ") - for ts in testsets - println("Testing problem $(ts[:name])") - - tout, h0, stepper = ts[:tout], ts[:initstep], integ - - y0, F!, jac!, sol = ts[:y0], ts[:F!], ts[:jac], ts[:sol] - - F(t,y) = (dy = similar(y); F!(t,y,dy); return dy) - - for points = [:specified, :all] - if ts[:isscalar] - # test the ODE.odeXX scalar interface (if the equation is scalar) - Fscal = (t,y)->F(t,[y])[1] - y0scal = y0[1] - # with jacobian - tj,yj = ODE.ode(Fscal,y0scal,tout, - solver = stepper, - points = points, - initstep = h0, - J! = jac!) - @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol - # without jacobian - t,y = ODE.ode(Fscal,y0scal,tout, - solver = stepper, - points = points, - initstep = h0) - @test_approx_eq_eps y map(x->sol(x)[1],tj) tol - - # results with and without jacobian should be exactly the same - @test_approx_eq yj y - - if points == :specified - # test if we covered the whole timespan - @test length(tout) == length(t) == length(tj) - @test_approx_eq tout t - @test_approx_eq tout tj - end - end - - # ODE.odeXX vector interface - # with jacobian - tj,yj = ODE.ode(F,y0,tout, - solver = stepper, - points = points, - initstep = h0, - J! = jac!) - @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol - # without jacobian - t,y = ODE.ode(F,y0,tout, - solver = stepper, - points = points, - initstep = h0) - @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol - - @test_approx_eq hcat(yj...) hcat(y...) - - # TODO: tests for `y::AbstractArray` - # # ODE.odeXX array interface for arrays - # # with jacobian - # tj,yj = ODE.ode(F,reshape(y0,length(y0),1,1),tout, - # solver = stepper, - # points = points, - # initstep = h0, - # J! = jac!) - # @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol - # # without jacobian - # t,y = ODE.ode(F,reshape(y0,length(y0),1,1),tout, - # solver = stepper, - # points = points, - # initstep = h0) - # @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol - - # @test_approx_eq hcat(yj...) hcat(y...) - - if points == :specified - # test if we covered the whole timespan - @test length(tout) == length(t) == length(tj) - @test_approx_eq tout t - @test_approx_eq tout tj - end - - # test the iterator interface - equation = ODE.ExplicitODE(tout[1],y0,F!) - opts = Dict(:tout => tout, - :initstep => h0, - :points => points) - - iterator = ODE.solve(equation,stepper;opts...) - for (t,y) in iterator - @test_approx_eq_eps y sol(t) tol - end - - dense = ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) - for (t,y) in ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) - @test_approx_eq_eps y sol(t) tol - end - - # generator comprehension - @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in iterator))) - @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in dense))) - @test collect((t for (t,y) in dense))==tout - end + for case in values(ODETests.test_cases) + ODETests.test_integrator(integ,case) end end end -test_ode() +test_integrators() + +# function test_ode() +# tol = 0.002 + +# for integ in integrators +# println("Testing $integ") +# for ts in testsets +# println("Testing problem $(ts[:name])") + +# tout, h0, stepper = ts[:tout], ts[:initstep], integ + +# y0, F!, jac!, sol = ts[:y0], ts[:F!], ts[:jac], ts[:sol] + +# F(t,y) = (dy = similar(y); F!(t,y,dy); return dy) + +# for points = [:specified, :all] +# if ts[:isscalar] +# # test the ODE.odeXX scalar interface (if the equation is scalar) +# Fscal = (t,y)->F(t,[y])[1] +# y0scal = y0[1] +# # with jacobian +# tj,yj = ODE.ode(Fscal,y0scal,tout, +# solver = stepper, +# points = points, +# initstep = h0, +# J! = jac!) +# @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol +# # without jacobian +# t,y = ODE.ode(Fscal,y0scal,tout, +# solver = stepper, +# points = points, +# initstep = h0) +# @test_approx_eq_eps y map(x->sol(x)[1],tj) tol + +# # results with and without jacobian should be exactly the same +# @test_approx_eq yj y + +# if points == :specified +# # test if we covered the whole timespan +# @test length(tout) == length(t) == length(tj) +# @test_approx_eq tout t +# @test_approx_eq tout tj +# end +# end + +# # ODE.odeXX vector interface +# # with jacobian +# tj,yj = ODE.ode(F,y0,tout, +# solver = stepper, +# points = points, +# initstep = h0, +# J! = jac!) +# @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol +# # without jacobian +# t,y = ODE.ode(F,y0,tout, +# solver = stepper, +# points = points, +# initstep = h0) +# @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol + +# @test_approx_eq hcat(yj...) hcat(y...) + +# # TODO: tests for `y::AbstractArray` +# # # ODE.odeXX array interface for arrays +# # # with jacobian +# # tj,yj = ODE.ode(F,reshape(y0,length(y0),1,1),tout, +# # solver = stepper, +# # points = points, +# # initstep = h0, +# # J! = jac!) +# # @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol +# # # without jacobian +# # t,y = ODE.ode(F,reshape(y0,length(y0),1,1),tout, +# # solver = stepper, +# # points = points, +# # initstep = h0) +# # @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol + +# # @test_approx_eq hcat(yj...) hcat(y...) + +# if points == :specified +# # test if we covered the whole timespan +# @test length(tout) == length(t) == length(tj) +# @test_approx_eq tout t +# @test_approx_eq tout tj +# end + +# # test the iterator interface +# equation = ODE.ExplicitODE(tout[1],y0,F!) +# opts = Dict(:tout => tout, +# :initstep => h0, +# :points => points) + +# iterator = ODE.solve(equation,stepper;opts...) +# for (t,y) in iterator +# @test_approx_eq_eps y sol(t) tol +# end + +# dense = ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) +# for (t,y) in ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) +# @test_approx_eq_eps y sol(t) tol +# end + +# # generator comprehension +# @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in iterator))) +# @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in dense))) +# @test collect((t for (t,y) in dense))==tout +# end +# end +# end +# end diff --git a/test/runtests.jl b/test/runtests.jl index a890f442c..4e0af726b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,116 +1,9 @@ using ODE using Base.Test -const tol = 1e-2 - -solvers = [ - ## Non-stiff - # fixed step - ODE.ode1, - ODE.ode2_midpoint, - ODE.ode2_heun, - ODE.ode4, - ODE.ode4ms, - ODE.ode5ms, - # adaptive - ODE.ode21, - ODE.ode23, - ODE.ode45_dp, - ODE.ode45_fe, - ODE.ode78, - - ## Stiff - # fixed-step - ODE.ode4s_s, - ODE.ode4s_kr, - # adaptive - ODE.ode23s] - -for solver in solvers - println("using $solver") - - # dy - # -- = 6 ==> y = 6t - # dt - # we need to fix initstep for the fixed-step methods - t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) - @test maximum(abs(y-6t)) < tol - tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, J! = (t,y,dy)->dy[1]=0.0) - @test maximum(abs(yj-6tj)) < tol - @test norm(yj-y,Inf) y = t.^2 - # dt - t,y =solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) - @test maximum(abs(y-t.^2)) < tol - tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=0.0) - @test maximum(abs(yj-tj.^2)) < tol - @test norm(yj-y,Inf) y = y0*e.^t - # dt - t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001) - @test maximum(abs(y-e.^t)) < tol - tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) - @test maximum(abs(yj-e.^tj)) < tol - @test norm(yj-y,Inf)y, 1., [1:-.001:0;], initstep=0.001) - @test maximum(abs(y-e.^(t-1))) < tol - tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) - @test maximum(abs(yj-e.^(tj-1))) < tol - @test norm(yj-y,Inf) v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) - # dt dt - # - # y = [v, w] - t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) - ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} - @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol - tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, J! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) - ysj = hcat(yj...).' # convert Vector{Vector{Float}} to Matrix{Float} - @test maximum(abs(ysj-[cos(tj)-2*sin(tj) 2*cos(tj)+sin(tj)])) < tol - @test norm(map(norm,yj-y),Inf)2y, 0., [0,1]) - # test typeof(y0)==Vector{Int} does not throw - @test_throws ErrorException t,y=solver((t,y)->[2y], [0], [0,1]) - # test typeof(y0)==Int does not throw - @test_throws ErrorException t,y=solver((t,y)->2y, 0, [0,1]) - # test if we can deal with a mixed case - @test_throws ErrorException t,y=solver((t,y)->2y, Number[1,1.1,BigInt(1)], Rational[0,1]) -end - -# Test negative starting times ODE.ode23s -@test length(ODE.ode23s((t,y)->[-y[2]; y[1]], [1., 2.], [-5., 0])[1]) > 1 - -# rober testcase from http://www.unige.ch/~hairer/testset/testset.html -let - println("ROBER test case") - function f(t, y) - ydot = similar(y) - ydot[1] = -0.04*y[1] + 1.0e4*y[2]*y[3] - ydot[3] = 3.0e7*y[2]*y[2] - ydot[2] = -ydot[1] - ydot[3] - ydot - end - t = [0., 1e11] - t,y = ODE.ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, - maxstep=1e11/10, minstep=1e11/1e18) - - refsol = [0.2083340149701255e-07, - 0.8333360770334713e-13, - 0.9999999791665050] # reference solution at tspan[2] - @test norm(refsol-y[end], Inf) < 2.1e-10 -end - -include("interface-tests.jl") include("iterators.jl") +include("top-interface.jl") +# TODO: do we still need this? +# include("interface-tests.jl") println("All looks OK") diff --git a/test/top-interface.jl b/test/top-interface.jl new file mode 100644 index 000000000..0df4c0b13 --- /dev/null +++ b/test/top-interface.jl @@ -0,0 +1,108 @@ +solvers = [ + ## Non-stiff + # fixed step + ODE.ode1, + ODE.ode2_midpoint, + ODE.ode2_heun, + ODE.ode4, + ODE.ode4ms, + ODE.ode5ms, + # adaptive + ODE.ode21, + ODE.ode23, + ODE.ode45_dp, + ODE.ode45_fe, + ODE.ode78, + + ## Stiff + # fixed-step + ODE.ode4s_s, + ODE.ode4s_kr, + # adaptive + ODE.ode23s] + +for solver in solvers + tol = 1e-2 + + println("using $solver") + + # dy + # -- = 6 ==> y = 6t + # dt + # we need to fix initstep for the fixed-step methods + t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) + @test maximum(abs(y-6t)) < tol + tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, J! = (t,y,dy)->dy[1]=0.0) + @test maximum(abs(yj-6tj)) < tol + @test norm(yj-y,Inf) y = t.^2 + # dt + t,y =solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) + @test maximum(abs(y-t.^2)) < tol + tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=0.0) + @test maximum(abs(yj-tj.^2)) < tol + @test norm(yj-y,Inf) y = y0*e.^t + # dt + t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001) + @test maximum(abs(y-e.^t)) < tol + tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) + @test maximum(abs(yj-e.^tj)) < tol + @test norm(yj-y,Inf)y, 1., [1:-.001:0;], initstep=0.001) + @test maximum(abs(y-e.^(t-1))) < tol + tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) + @test maximum(abs(yj-e.^(tj-1))) < tol + @test norm(yj-y,Inf) v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) + # dt dt + # + # y = [v, w] + t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) + ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} + @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol + tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, J! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) + ysj = hcat(yj...).' # convert Vector{Vector{Float}} to Matrix{Float} + @test maximum(abs(ysj-[cos(tj)-2*sin(tj) 2*cos(tj)+sin(tj)])) < tol + @test norm(map(norm,yj-y),Inf)2y, 0., [0,1]) + # test typeof(y0)==Vector{Int} does not throw + @test_throws ErrorException t,y=solver((t,y)->[2y], [0], [0,1]) + # test typeof(y0)==Int does not throw + @test_throws ErrorException t,y=solver((t,y)->2y, 0, [0,1]) + # test if we can deal with a mixed case + @test_throws ErrorException t,y=solver((t,y)->2y, Number[1,1.1,BigInt(1)], Rational[0,1]) +end + +# Test negative starting times ODE.ode23s +@test length(ODE.ode23s((t,y)->[-y[2]; y[1]], [1., 2.], [-5., 0])[1]) > 1 + +# rober testcase from http://www.unige.ch/~hairer/testset/testset.html +let + println("ROBER test case") + function f(t, y) + ydot = similar(y) + ydot[1] = -0.04*y[1] + 1.0e4*y[2]*y[3] + ydot[3] = 3.0e7*y[2]*y[2] + ydot[2] = -ydot[1] - ydot[3] + ydot + end + t = [0., 1e11] + t,y = ODE.ode23s(f, [1.0, 0.0, 0.0], t; abstol=1e-8, reltol=1e-8, + maxstep=1e11/10, minstep=1e11/1e18) + + refsol = [0.2083340149701255e-07, + 0.8333360770334713e-13, + 0.9999999791665050] # reference solution at tspan[2] + @test norm(refsol-y[end], Inf) < 2.1e-10 +end From ad0f43b2436829422582d0f4d40dc1bdb1ef4aed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 19 Aug 2016 11:03:35 +0200 Subject: [PATCH 098/113] Even better tests (via @testset) --- examples/custom_integrator.jl | 19 ++++--- src/tests/integrators.jl | 104 ++++++++++++++++++++-------------- src/tests/test_cases.jl | 5 ++ test/iterators.jl | 6 +- 4 files changed, 82 insertions(+), 52 deletions(-) diff --git a/examples/custom_integrator.jl b/examples/custom_integrator.jl index 093936045..af2f136e9 100644 --- a/examples/custom_integrator.jl +++ b/examples/custom_integrator.jl @@ -64,14 +64,17 @@ function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) # access the fields directly but we use it here for convenience. t, y, dy = ODE.output(state) - # the only stop condition our solver has - if t >= integ.tstop + tdir = sign(integ.tstop-ode.t0) + + # the only stop condition our solver has. Note the use of `abs`, + # which enables integration backward in time. + if tdir*t >= tdir*integ.tstop # this flag finalizes the iterator return ODE.finish else # trim the stepsize to match the `tstop`, prevents # overshooting - dt = min(integ.initstep,integ.tstop-t) + dt = tdir*min(integ.initstep,abs(integ.tstop-t)) # update the time, state.step.t += dt @@ -84,10 +87,10 @@ function onestep!(ode::ExplicitODE, integ::EulerIntegrator, state::EulerState) end end -# OPTIONAL: -# Define properties of this integrator: order, name and +# OPTIONAL: Define properties of this integrator: order, name and # whether it is adaptive or not. At this point the information -# supplied here is not used. +# supplied here is not used but it might be a good idea to implement +# these methods for future use. order{T}(::Type{EulerIntegrator{T}}) = 1 name{T}(::Type{EulerIntegrator{T}}) = "My own Euler integrator" isadaptive{T}(::Type{EulerIntegrator{T}}) = false @@ -120,5 +123,5 @@ sol =ODE.solve(ode,integ;tstop=1.0,initstep=0.001) collect(sol)[end] # test the integrator -ODETests.test_integrator(integ,ODETests.case_vector) -ODETests.test_integrator(integ,ODETests.case_minimal_type) +# TODO: for now I can't figure out why it fails. +ODETests.test_integrator(integ) diff --git a/src/tests/integrators.jl b/src/tests/integrators.jl index be20b7d31..6038b6316 100644 --- a/src/tests/integrators.jl +++ b/src/tests/integrators.jl @@ -1,60 +1,82 @@ using Base.Test +""" + test_integrator(integrator, [test_case]) + +Test a single `integrator` with either a single `test_case` or all +test cases. Test cases are defined in `ODETests.test_cases` in +`src/tests/test_cases.jl`. + +""" + function test_integrator(integrator,test) ivp, sol, name, opts = test[:ivp], test[:sol], test[:name], test[:options] - println("Integrator $integrator)") - print(" Test case $name ") - T,Y = eltype(ivp).parameters tol = 1//500 + @testset "$name" begin - # 1) test the constructor - @test integrator <: AbstractIntegrator - integ=integrator(ivp;opts...) - @test typeof(integ)<:integrator - - # 2) test if the minimal backend is implemented - state=ODE.init(ivp,integ) - @test typeof(state)<:AbstractState - # output after initialization should give the initial data - @test ODE.output(state) == (ivp.t0,ivp.y0,ivp.dy0) - - # we should be able to perform the first step - @test ODE.onestep!(ivp,integ,state) == ODE.cont - # after one step the output should be updated - @test ODE.output(state) != (ivp.t0,ivp.y0,ivp.dy0) - - # 3) test the iterator interface - # pure integrator - iterator = ODE.solve(ivp,integrator; opts...) - dense = ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) - niters=0 - for (t,y,dy) in iterator - niters+=1 - @test maxabs(y-sol(t)) < niters*tol - # TODO: replace with - # @test_approx_eq_eps y sol(t) tol - end + # 1) test the constructor + @test integrator <: AbstractIntegrator + integ=integrator(ivp;opts...) + @test typeof(integ)<:integrator - # with dense output - for (t,y,dy) in dense - @test maxabs(y-sol(t)) < tol - end + # 2) test if the minimal backend is implemented + state=ODE.init(ivp,integ) + @test typeof(state)<:AbstractState + # output after initialization should give the initial data + @test ODE.output(state) == (ivp.t0,ivp.y0,ivp.dy0) + + # we should be able to perform the first step + @test ODE.onestep!(ivp,integ,state) == ODE.cont + # after one step the output should be updated + @test ODE.output(state) != (ivp.t0,ivp.y0,ivp.dy0) - # generator comprehension - @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in iterator))) - @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in dense))) + # 3) test the iterator interface + # pure integrator + iterator = ODE.solve(ivp,integrator; opts...) + dense = ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) - tout = opts[:tout] - @test collect((t for (t,y) in dense))==tout + tdir = sign(opts[:tstop]-ivp.t0) - println("OK!") + for iter in (iterator,dense) + for (t,y,dy) in dense + # TODO: is there a better way of doing this? We need + # a single @test statement in case of a failure for + # @testset to work properly. + if maxabs(y-sol(t)) > tol + @test maxabs(y-sol(t)) <= tol + break + end + if tdir*t > tdir*opts[:tstop] + @test tdir*t <= tdir*opts[:tstop] + break + end + end + end + + # generator comprehension + @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in iterator))) + @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in dense))) + + tout = opts[:tout] + @test collect((t for (t,y) in dense))==tout + + end +end + +function test_integrator(integrator) + @testset "$integrator" begin + for case in values(test_cases) + ODETests.test_integrator(integrator,case) + end + end end -function aaaa() +# this is the functionality not yet included in test_integrator +function unused() # 3) test the backend API if properties != nothing order, name, isadaptive = properties diff --git a/src/tests/test_cases.jl b/src/tests/test_cases.jl index 600c2bd0c..3b0fa20d4 100644 --- a/src/tests/test_cases.jl +++ b/src/tests/test_cases.jl @@ -18,6 +18,7 @@ const test_cases = :sol => t->[t^2], :name => "y'=2t", :options=> Dict(:tout => [0:0.001:1;], + :tstop => 1.0, :initstep => 0.001) ), @@ -28,6 +29,7 @@ const test_cases = :sol => t->[exp(t)], :name => "y'=y", :options=> Dict(:tout => [0:0.001:1;], + :tstop => 1.0, :initstep => 0.001) ), @@ -38,6 +40,7 @@ const test_cases = :sol => t->[exp(t-1)], :name => "y'=y backwards", :options=> Dict(:tout => [1:-0.001:0;], + :tstop => 0.0, :initstep => 0.001) ), @@ -74,6 +77,8 @@ const test_cases = :tout => [MyFloat(0.0),MyFloat(0.1),MyFloat(1.0)]) ), + # TODO: ForwardDiff Jacobian doesn't seem to work with custom AbstractVector type + # :harmonic_minimal_types_no_jac=> # Dict(:ivp => ExplicitODE(MyFloat(0.0), # Position(MyFloat(0.0),MyFloat(1.0)), diff --git a/test/iterators.jl b/test/iterators.jl index 810cedcb5..3c96f9dc1 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -14,9 +14,9 @@ const integrators = [ODE.RKIntegratorFixed{:feuler}, using ODETests function test_integrators() - for integ in integrators - for case in values(ODETests.test_cases) - ODETests.test_integrator(integ,case) + @testset "Iterator interfaces" begin + for integ in integrators + ODETests.test_integrator(integ) end end end From 414862b338b482e323d249fb69647711f7e18bf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 19 Aug 2016 11:45:39 +0200 Subject: [PATCH 099/113] New naming convention and Solution type --- src/base.jl | 87 ++++++++++++++++++++-------------------- src/tests/integrators.jl | 24 ++++++----- src/top-interface.jl | 15 +++---- 3 files changed, 66 insertions(+), 60 deletions(-) diff --git a/src/base.jl b/src/base.jl index 2013022c1..e9eb94f85 100644 --- a/src/base.jl +++ b/src/base.jl @@ -219,27 +219,26 @@ if VERSION >= v"0.5.0-rc0" end """ - solve(ivp::IVP, solver::Type{AbstractSolver}, opts...) - solve(ivp::IVP; solver=RKIntegratorAdaptive{:rk45}, opts...) + iterate(ivp::IVP; solver=RKIntegratorAdaptive{:rk45}, opts...) -Solve creates an iterable `Problem` instance from an `IVP` instance +Iterate creates an iterable `Problem` instance from an `IVP` instance (specifying the math) and from a `Type{AbstractSolver}` (the numerical integrator). The simplest use case is - for (t,y,dy) in solver(...) + for (t,y,dy) in iterate(...) # do something with t, y an dy end If the integration interval, defined by the keyword argument `tstop`, is finite you can request all the results at once by calling - collect(solver(...)) # => Vector{Tuple{T,Y,Y}} + collect(iterate(...)) # => Vector{Tuple{T,Y,Y}} Notes: -- usually a solvers requires the ivp to be in a certain form, say an +- usually solvers require the ivp to be in a certain form, say an `ExplicitODE`. -- the second argument it the *Type* of the solver and not an instance. +- the second argument is the *Type* of the solver and not an instance. The instance of the solve can only be created together with the `ivp` as their type parameters need to match. @@ -253,60 +252,60 @@ Output: - `::Problem` """ -function solve(ivp::IVP, solver; opts...) - Problem(ivp,solver(ivp;opts...)) +function iterate{S<:AbstractSolver}(ivp::IVP; + solver::Type{S} = RKIntegratorAdaptive{:rk45}, + opts...) + Problem(ivp, solver(ivp; opts...)) end -function solve{S<:AbstractSolver}(ivp::IVP; - solver::Type{S} = RKIntegratorAdaptive{:rk45}, - opts...) - solve(ivp, solver; opts...) -end +""" + solve(ivp::IVP; solver=RKIntegratorAdaptive{:rk45}, opts...) -# In Julia 0.5 the collect needs length to be defined, we cannot do -# that for a Problem but we can implement our own collect -function collect(prob::Problem) - pairs = Array(eltype(prob),0) +Solve the initial value problem `ivp` using an algorithm `solver` +(defaults to Runge-Kutta (4,5) integrator). One can pass additional +options to the `solver` via keyword arguments to `solve` (here denoted +as `options`). The output is a `Solution` type (currently simply a +tuple of vectors `(Vector{T},Vector{Y})`, where `T,Y=eltype(ivp)`). + +""" + +function solve(ivp::IVP; opts...) + prob = iterate(ivp; opts...) + T,Y=eltype(prob).parameters + tout = Array(T,0) + yout = Array(Y,0) + dyout = Array(Y,0) for (t,y,dy) in prob - push!(pairs,(t,copy(y),copy(dy))) + push!(tout,t) + push!(yout,copy(y)) + push!(dyout,copy(dy)) end - return pairs + return Solution{T,Y}(tout,yout,dyout) end - """ - collect_vectors(prob::Problem) +Stores a solution to the `ivp` -Input: - -- iterator constructed by `solve` - -Output: +""" +immutable Solution{T,Y} + t::Vector{T} + y::Vector{Y} + dy::Vector{Y} +end -- `(tout,yout,dyout)` with `tout::Array{T}` containing subsequent - times, `yout::Vector{Y}` and `dyout::Vector{Y}` containig the vector - of solution and derivative respectively at corresponding `tout` - times. In other words `yout[i]` approximates `y(tout[i])` where `y` - is the true solution to an ODE. It could be interpreted as a - transpose of "`collect(prob)`". -""" -function collect_vectors(prob::Problem) - T,Y = eltype(prob) - tout = Array(T,0) - yout = Array(Y,0) - dyout = Array(Y,0) +# In Julia 0.5 the collect needs length to be defined, we cannot do +# that for a Problem but we can implement our own collect +function collect(prob::Problem) + pairs = Array(eltype(prob),0) for (t,y,dy) in prob - push!(tout,t) - push!(yout,copy(y)) - push!(dyout,copy(dy)) + push!(pairs,(t,copy(y),copy(dy))) end - return (tout,yout,dyout) + return pairs end - # Iteration: take one step on a IVP `Problem` # # Defines: diff --git a/src/tests/integrators.jl b/src/tests/integrators.jl index 6038b6316..7a823a0b6 100644 --- a/src/tests/integrators.jl +++ b/src/tests/integrators.jl @@ -36,13 +36,13 @@ function test_integrator(integrator,test) # 3) test the iterator interface # pure integrator - iterator = ODE.solve(ivp,integrator; opts...) - dense = ODE.solve(ivp,ODE.DenseOutput{integrator}; opts...) + iterator = ODE.iterate(ivp; solver=integrator, opts...) + iterator_dense = ODE.iterate(ivp; solver=ODE.DenseOutput{integrator}, opts...) tdir = sign(opts[:tstop]-ivp.t0) - for iter in (iterator,dense) - for (t,y,dy) in dense + for iter in (iterator,iterator_dense) + for (t,y,dy) in iter # TODO: is there a better way of doing this? We need # a single @test statement in case of a failure for # @testset to work properly. @@ -55,15 +55,21 @@ function test_integrator(integrator,test) break end end - end - # generator comprehension - @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in iterator))) - @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in dense))) + # generator comprehension + @test all(collect((maxabs(y-sol(t))<=tol for (t,y) in iter))) + end tout = opts[:tout] - @test collect((t for (t,y) in dense))==tout + @test collect((t for (t,y) in iterator_dense))==tout + # Solution type + solution = ODE.solve(ivp; solver=integrator, opts...) + solution_dense = ODE.solve(ivp; solver=ODE.DenseOutput{integrator}, opts...) + + for s in (solution,solution_dense) + @test all(map((t,y)->maxabs(y-sol(t))<=tol,solution.t,solution.y)) + end end end diff --git a/src/top-interface.jl b/src/top-interface.jl index 3e9e85ef4..b72446a8d 100644 --- a/src/top-interface.jl +++ b/src/top-interface.jl @@ -15,14 +15,15 @@ function ode{T,Y,M<:AbstractSolver}(F, y0::Y, # construct a Problem equation = explicit_ineff(t0,y0,F;kargs...) if points == :all - prob = solve(equation, solver; - tout = tout, - kargs...) + prob = iterate(equation; + solver = solver, + tout = tout, + kargs...) elseif points == :specified - prob = solve(equation, - DenseOutput{solver}; - tout = tout, - kargs...) + prob = iterate(equation; + solver = DenseOutput{solver}, + tout = tout, + kargs...) else error("Unsupported points value (should be :all or :specified)") end From e3bff55ad5d9634431c59b7008211d00a616481e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 19 Aug 2016 12:36:13 +0200 Subject: [PATCH 100/113] Moved the ODETest to a submodule --- src/ODE.jl | 7 +-- test/iterators.jl | 125 ++-------------------------------------------- test/runtests.jl | 3 +- 3 files changed, 9 insertions(+), 126 deletions(-) diff --git a/src/ODE.jl b/src/ODE.jl index 8191a3254..e5b4fe671 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -49,12 +49,10 @@ include("integrators/rosenbrock.jl") # User interface to solvers include("top-interface.jl") -end # module ODE - """ -This module contains simple test functions for solvers/integrators +This submodule contains simple test functions for solvers/integrators compatible with ODE.jl. You can use it to test your custom solvers, for examples of how to use these functions see our tests in `test/` directory. @@ -75,3 +73,6 @@ include("tests/test_cases.jl") include("tests/integrators.jl") end + + +end # module ODE diff --git a/test/iterators.jl b/test/iterators.jl index 3c96f9dc1..4dd93b837 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -1,4 +1,4 @@ -# Testing function ode + const integrators = [ODE.RKIntegratorFixed{:feuler}, ODE.RKIntegratorFixed{:midpoint}, ODE.RKIntegratorFixed{:heun}, @@ -11,125 +11,8 @@ const integrators = [ODE.RKIntegratorFixed{:feuler}, ODE.ModifiedRosenbrockIntegrator ] -using ODETests - -function test_integrators() - @testset "Iterator interfaces" begin - for integ in integrators - ODETests.test_integrator(integ) - end +@testset "Iterator interfaces" begin + for integ in integrators + ODETests.test_integrator(integ) end end - -test_integrators() - -# function test_ode() -# tol = 0.002 - -# for integ in integrators -# println("Testing $integ") -# for ts in testsets -# println("Testing problem $(ts[:name])") - -# tout, h0, stepper = ts[:tout], ts[:initstep], integ - -# y0, F!, jac!, sol = ts[:y0], ts[:F!], ts[:jac], ts[:sol] - -# F(t,y) = (dy = similar(y); F!(t,y,dy); return dy) - -# for points = [:specified, :all] -# if ts[:isscalar] -# # test the ODE.odeXX scalar interface (if the equation is scalar) -# Fscal = (t,y)->F(t,[y])[1] -# y0scal = y0[1] -# # with jacobian -# tj,yj = ODE.ode(Fscal,y0scal,tout, -# solver = stepper, -# points = points, -# initstep = h0, -# J! = jac!) -# @test_approx_eq_eps yj map(x->sol(x)[1],tj) tol -# # without jacobian -# t,y = ODE.ode(Fscal,y0scal,tout, -# solver = stepper, -# points = points, -# initstep = h0) -# @test_approx_eq_eps y map(x->sol(x)[1],tj) tol - -# # results with and without jacobian should be exactly the same -# @test_approx_eq yj y - -# if points == :specified -# # test if we covered the whole timespan -# @test length(tout) == length(t) == length(tj) -# @test_approx_eq tout t -# @test_approx_eq tout tj -# end -# end - -# # ODE.odeXX vector interface -# # with jacobian -# tj,yj = ODE.ode(F,y0,tout, -# solver = stepper, -# points = points, -# initstep = h0, -# J! = jac!) -# @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol -# # without jacobian -# t,y = ODE.ode(F,y0,tout, -# solver = stepper, -# points = points, -# initstep = h0) -# @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol - -# @test_approx_eq hcat(yj...) hcat(y...) - -# # TODO: tests for `y::AbstractArray` -# # # ODE.odeXX array interface for arrays -# # # with jacobian -# # tj,yj = ODE.ode(F,reshape(y0,length(y0),1,1),tout, -# # solver = stepper, -# # points = points, -# # initstep = h0, -# # J! = jac!) -# # @test_approx_eq_eps hcat(yj...) hcat(map(sol,tj)...) tol -# # # without jacobian -# # t,y = ODE.ode(F,reshape(y0,length(y0),1,1),tout, -# # solver = stepper, -# # points = points, -# # initstep = h0) -# # @test_approx_eq_eps hcat(y...) hcat(map(sol,t)...) tol - -# # @test_approx_eq hcat(yj...) hcat(y...) - -# if points == :specified -# # test if we covered the whole timespan -# @test length(tout) == length(t) == length(tj) -# @test_approx_eq tout t -# @test_approx_eq tout tj -# end - -# # test the iterator interface -# equation = ODE.ExplicitODE(tout[1],y0,F!) -# opts = Dict(:tout => tout, -# :initstep => h0, -# :points => points) - -# iterator = ODE.solve(equation,stepper;opts...) -# for (t,y) in iterator -# @test_approx_eq_eps y sol(t) tol -# end - -# dense = ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) -# for (t,y) in ODE.solve(equation,ODE.DenseOutput{stepper}; opts...) -# @test_approx_eq_eps y sol(t) tol -# end - -# # generator comprehension -# @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in iterator))) -# @test all(collect((norm(y-sol(t),Inf)<=tol for (t,y) in dense))) -# @test collect((t for (t,y) in dense))==tout -# end -# end -# end -# end diff --git a/test/runtests.jl b/test/runtests.jl index 4e0af726b..812f1ea9a 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,9 +1,8 @@ using ODE +using ODE.ODETests using Base.Test include("iterators.jl") include("top-interface.jl") # TODO: do we still need this? # include("interface-tests.jl") - -println("All looks OK") From 003b3e1d92c18cdb5a1330adf7dc5387a03b52e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 19 Aug 2016 12:44:11 +0200 Subject: [PATCH 101/113] Removed an old `tdir` from dense output --- src/dense.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/dense.jl b/src/dense.jl index 1f988c6cd..cec08364d 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -48,8 +48,6 @@ end Base.length(dense::DenseOutput) = length(dense.opts.tout) -tdir(ivp, solver::DenseOutput) = sign(solver.opts.tstop - ivp.t0) - @compat function (::Type{DenseOutput{I}}){T,I}(ivp::IVP{T}; tstop = T(Inf), tout::AbstractVector{T} = T[tstop], From a173cef98d1d2914ecc1ccdf2ef897bbfaa442e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 19 Aug 2016 14:06:56 +0200 Subject: [PATCH 102/113] Updated docs --- docs/src/index.md | 124 ++++++++++++++++++++++++++++++++----------- docs/src/man/base.md | 9 ++++ src/base.jl | 19 ++++++- 3 files changed, 120 insertions(+), 32 deletions(-) diff --git a/docs/src/index.md b/docs/src/index.md index 4eaa20a7a..726159c7d 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -29,7 +29,52 @@ F(t,y) = y The vectors `t` and `y` store the time and solution values at the corresponding times. -You might find the basic interface limiting. First of all, it stores +## Solve interface + +`ODE.ode` only supports explicit differential equations defined as +`y'=F(t,y)`, for more advenced uses consider using `ODE.solve`, which +was designed to work with a variety of other types of initial value +problems and is optimized for better performance. First we have to +define an initial value problem, in our case this is an explicit +differential equation `y'=y` with inital data `y0=[1.0]` given at the +time `t0=0.0`. + +```@example solve +using ODE +t0 = 0.0 +y0 = [1.0] +F!(t,y,dy) = dy[1]=y[1] +ode = ODE.ExplicitODE(t0,y0,F!) +``` + +Note that unlike in `ODE.ode` we now have to supply an in place +function `F!` instead of an explicit function `F`. We can solve the +ODE problem `ode` by simply calling + +```@example solve +sol = ODE.solve(ode, tstop = 1) +``` + +This returns a `Solution` type, which stores the solution. You +probably noticed that we passed a keyword argument `tstop`, this is +the final time of integration which we have to specify because `tstop` +defaults to `Inf` and the integration would carry on forever. You can +access the solution with + +```@example solve +(t,y) = sol.t, sol.y +``` + +You can change the default algorithm (Runge-Kutta (4,5)) by passing an +optional argument `solver` + +```@example solve +sol = ODE.solve(ode, tstop = 1, solver = ODE.RKIntegratorAdaptive{:dopri5}) +``` + +For other options accepted by `solve` see [Options](/Options/) below. + +You might still find this interface limiting. First of all, it stores all the results, so if you are only interested in the final value of `y` it still stores all the intermediate steps. Secondly, you cannot process the results on the fly (e.g. plot the current state of a @@ -39,11 +84,9 @@ iterator interface. ## Iterator interface To offeset the limitations of the `ODE.ode` interface we implemented a -general. First we define an initial value problem, in our case this is -an explicit differential equation `y'=y` with inital data `y0=[1.0]` -given at the time `t0=0.0`. +general. We use the same problem as before as an example -```@example iterator +```@example iterate using ODE t0 = 0.0 y0 = [1.0] @@ -51,13 +94,12 @@ F!(t,y,dy) = dy[1]=y[1] ode = ODE.ExplicitODE(t0,y0,F!) ``` -Note that unlike in `ODE.ode` we now have to supply an in place -function `F!` instead of an explicit function `F`. Now we are ready -to produce the iterator that solvese to our problem. +Now we have full flow control over the solver, we can analyze the +intermediate results or interrupt the integration at any point. -```@example iterator -sol = ODE.solve(ode) -for (t,y) in sol +```@example iterate +iter = ODE.iterate(ode) +for (t,y) in iter @show (t,y) if t > 1 break @@ -65,42 +107,62 @@ for (t,y) in sol end ``` -Note that we had to interrupt the loop because `sol` would be -producing solutions ad infinitum (in theory, in practice we will get -to the point where the solver won't be able to produce reasonable -solution anymore). To set the final integration time and other -parameters of the integrator `integ` we can pass optional arguments to +Note that we had to break the loop because `sol` would keep producing +the results. To set the final integration time and other parameters +of the integrator `integ` we can pass optional arguments to `ODE.solver`. -```@example iterator -sol = ODE.solve(ode; tstop = 1) -for (t,y) in sol +```@example iterate +iter = ODE.iterate(ode; tstop = 1) +for (t,y) in iter @show (t,y) end ``` This approach has the added benefit of the solution never exceeding -the final time. Apart from the time and value `(t,y)` the `ODE.solve` -returns also the derivative, you can retrive it as the third argument -in the returned tuple. In the following example we use it compute the -absolute error. - -```@example iterator -sol = ODE.solve(ode; tstop = 1) -for (t,y,dy) in sol +the final time. Both `ODE.iterate` and `ODE.solve` support the same +options, so you can easily change the method of integration with the +keyword `solver`. + +Apart from the time and value `(t,y)` the `ODE.solve` also returns the +derivative, you can retrive it as the third argument in the returned +tuple. In the following example we use it compute the absolute +residual error (zero in this case). + +```@example iterate +iter = ODE.iterate(ode; tstop = 1) +for (t,y,dy) in iter err = norm(y-dy) @show err end ``` With `tstop` specified we can also get all results at once using -`collect`. +`collect` and other constructs working on iterators +(e.g. generators). For example + +```@example iterate +solution = collect(iter) +``` -```@example iterator -res = collect(sol) +returns a vector of triples `(t,y,dy)`. Or if you only wan the first +component of a solution you could simply use + +```@example iterate +y1 = collect(y[1] for (t,y) in iter) ``` -Note that `collect` returns a vector of triples `(t,y,dy)`. +There are, however, several caveats that you should take into account: + +1. Each time the iterator is collected the differential equation is + actually solved, which has potentially high computational cost and + might be inefficient. + +2. The `length` is undefined for the result of `ODE.iterate`, because + we don't know a priori how many steps the integration will require + (especially in the case of adaptive solvers). This means that the + functions requireing `length` might not work. For the same reason + there are no `getindex` methods. ## Options diff --git a/docs/src/man/base.md b/docs/src/man/base.md index b6a0327ee..535f2443e 100644 --- a/docs/src/man/base.md +++ b/docs/src/man/base.md @@ -8,6 +8,13 @@ The file `base.jl` implements the most basic iterator infrastructure for solvers and the definitions of the types representing general IVP (initial value problem) and solvers. +## General functions for solving initial value problems + +```@docs +solve +iterate +``` + ## Predefined types of initial value problems ```@docs @@ -16,12 +23,14 @@ IVP ExplicitODE ImplicitODE ``` + ## Solver architecture ```@docs AbstractSolver AbstractIntegrator AbstractState +Solution ``` The fallback constructor for `AbstractSolver(ivp::IVP;opts...)` ensures diff --git a/src/base.jl b/src/base.jl index e9eb94f85..a5b8aa73f 100644 --- a/src/base.jl +++ b/src/base.jl @@ -16,6 +16,10 @@ time and state variable respectively. """ abstract AbstractIVP{T,Y} + +""" +The "elements" of AbstractIVP are `t,y,dy` so the `eltype` returns `Tuple{T,Y,Y}` +""" Base.eltype{T,Y}(::Type{AbstractIVP{T,Y}}) = Tuple{T,Y,Y} """ @@ -285,7 +289,7 @@ end """ -Stores a solution to the `ivp` +Stores a solution to the `ivp`. """ immutable Solution{T,Y} @@ -294,6 +298,19 @@ immutable Solution{T,Y} dy::Vector{Y} end +""" +Experimental support for the interpolation +""" +function interpolate(sol::Solution, t) + if (t>sol.t[end]) | (t t>ti, sol.t) + theta = (t-sol.t[i])/(sol.t[i+1]-sol.t[i]) + return sol.y[i]+theta*(sol.y[i+1]-sol.y[i]) + end +end + # In Julia 0.5 the collect needs length to be defined, we cannot do # that for a Problem but we can implement our own collect From 6e48fe07788ed64003fe2b9d0f95f9075b09c55b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Tue, 23 Aug 2016 11:38:15 +0200 Subject: [PATCH 103/113] Fixed infinite tstop bug custom type test --- REQUIRE | 2 +- docs/src/index.md | 10 +++--- src/base.jl | 72 +++++++++++++++++++++----------------- src/dense.jl | 4 +-- src/options.jl | 8 ++--- src/tests/integrators.jl | 10 ++++++ src/tests/minimal_types.jl | 33 +++++++++-------- src/tests/test_cases.jl | 10 +++--- 8 files changed, 84 insertions(+), 65 deletions(-) diff --git a/REQUIRE b/REQUIRE index 62155a5ae..e53925cbf 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,4 +1,4 @@ -julia 0.4 +julia 0.5 Polynomials ForwardDiff Compat 0.4.1 diff --git a/docs/src/index.md b/docs/src/index.md index 726159c7d..ea3c6fdeb 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -98,8 +98,7 @@ Now we have full flow control over the solver, we can analyze the intermediate results or interrupt the integration at any point. ```@example iterate -iter = ODE.iterate(ode) -for (t,y) in iter +for (t,y) in ODE.iterate(ode) @show (t,y) if t > 1 break @@ -113,8 +112,7 @@ of the integrator `integ` we can pass optional arguments to `ODE.solver`. ```@example iterate -iter = ODE.iterate(ode; tstop = 1) -for (t,y) in iter +for (t,y) in ODE.iterate(ode; tstop = 1) @show (t,y) end ``` @@ -130,8 +128,7 @@ tuple. In the following example we use it compute the absolute residual error (zero in this case). ```@example iterate -iter = ODE.iterate(ode; tstop = 1) -for (t,y,dy) in iter +for (t,y,dy) in ODE.iterate(ode; tstop = 1) err = norm(y-dy) @show err end @@ -142,6 +139,7 @@ With `tstop` specified we can also get all results at once using (e.g. generators). For example ```@example iterate +iter = ODE.iterate(ode; tstop = 1) solution = collect(iter) ``` diff --git a/src/base.jl b/src/base.jl index a5b8aa73f..7b675b99b 100644 --- a/src/base.jl +++ b/src/base.jl @@ -10,9 +10,9 @@ """ AbstractIVP{T,Y} -A progenitor of types representing an IVP (initial value -problem). The type parameters `T` and `Y` correspond to the types of -time and state variable respectively. +The abstract supertype of all IVPs (initial value problems). The type +parameters `T` and `Y` correspond to the types of time and state +variable respectively. """ abstract AbstractIVP{T,Y} @@ -207,20 +207,20 @@ Base.length(prob::Problem) = length(prob.solver) Base.eltype{O,S}(::Type{Problem{O,S}}) = eltype(O) -if VERSION >= v"0.5.0-rc0" - """ - Makes some generic operations on iterators work, like - generator comprehensions: - tgen=(t for (t,y) in sol) - tout=collect(tgen) - or - errgen=(y-[exp(t)] for (t,y) in sol) - errout=collect(errgen) - - TODO: doesn't work for 0.4 and might have show issues due to non-copying output - """ - Base.iteratorsize{O,S}(::Type{Problem{O,S}}) = Base.SizeUnknown() -end +""" +Makes some generic operations on iterators work, like +generator comprehensions: + + tgen=(t for (t,y) in sol) + tout=collect(tgen) + +or + errgen=(y-[exp(t)] for (t,y) in sol) + errout=collect(errgen) + +TODO: doesn't work for 0.4 and might have issues with `show` due to non-copying output +""" +Base.iteratorsize{O,S}(::Type{Problem{O,S}}) = Base.SizeUnknown() """ iterate(ivp::IVP; solver=RKIntegratorAdaptive{:rk45}, opts...) @@ -274,8 +274,29 @@ tuple of vectors `(Vector{T},Vector{Y})`, where `T,Y=eltype(ivp)`). """ function solve(ivp::IVP; opts...) + + # TODO: perhaps there is a more + # graceful way to treat these + # cases. We only care about + # infinite `tstop` but if + # `tstop` is unspecified it + # defaults to `tout[end]`, with + # `tout` defaulting to + # `[Inf]`. Maybe we should add + # `tout(::Problem)` to fix this? + # Or maybe we could store + # `tstop` in `Problem`. + + dopts = Dict(opts) + if !in(:tstop,keys(dopts)) & !in(:tout,keys(dopts)) + error("Neither `tstop` nor `tout` was specified.") + end + if in(:tstop,keys(dopts)) & !isfinite(dopts[:tstop]) + error("Trying to integrate over an infinite time span, try specifying `|tstop|false, @@ -49,7 +49,7 @@ end Base.length(dense::DenseOutput) = length(dense.opts.tout) @compat function (::Type{DenseOutput{I}}){T,I}(ivp::IVP{T}; - tstop = T(Inf), + tstop = T(1//0), tout::AbstractVector{T} = T[tstop], opts...) if all(tout.>=ivp.t0) diff --git a/src/options.jl b/src/options.jl index bab07f0eb..6194c103e 100644 --- a/src/options.jl +++ b/src/options.jl @@ -31,7 +31,7 @@ immutable AdaptiveOptions{T,N<:Function,O<:Function} <: Options{T} end @compat function (::Type{AdaptiveOptions{T}}){T,N,O}(; - tout = [T(Inf)], + tout = [T(1//0)], tstop = tout[end], reltol = eps(T)^T(1//3)/T(10), abstol = eps(T)^T(1//2)/T(10), @@ -39,7 +39,7 @@ end maxstep = 1/minstep, initstep = eps(T)^T(1//3), norm::N = y->maxabs(y), - maxiters = T(Inf), + maxiters = T(1//0), isoutofdomain::O = Base.isnan, kargs...) @assert minstep>=T(0) && maxstep>=T(0) && initstep>=T(0) # TODO: move to inner constructor @@ -63,9 +63,9 @@ immutable FixedOptions{T} <: Options{T} end @compat function (::Type{FixedOptions{T}}){T}(; - tout = [T(Inf)], + tout = [T(1//0)], tstop = tout[end], - initstep = T(10)*eps(T), + initstep = T(1//100), kargs...) @assert initstep>=0 FixedOptions{T}(tstop,initstep) diff --git a/src/tests/integrators.jl b/src/tests/integrators.jl index 7a823a0b6..ab0e7df2f 100644 --- a/src/tests/integrators.jl +++ b/src/tests/integrators.jl @@ -70,6 +70,16 @@ function test_integrator(integrator,test) for s in (solution,solution_dense) @test all(map((t,y)->maxabs(y-sol(t))<=tol,solution.t,solution.y)) end + + # TODO: The following is not a test for a particular + # integrator but rather for the implementation of + # `ODE.solve`. It should be moved out to another test + # function. + + # If neither `tstop` nor `tout` was specified throw an error + @test_throws ErrorException ODE.solve(ivp; solver=integrator, delete!(delete!(Dict(opts),:tstop),:tout)...) + # If `tstop` was specified but is infinite + @test_throws ErrorException ODE.solve(ivp; solver=integrator, merge(Dict(opts), Dict(:tstop=>T(1//0)))...) end end diff --git a/src/tests/minimal_types.jl b/src/tests/minimal_types.jl index e57d50a8c..7f91faad4 100644 --- a/src/tests/minimal_types.jl +++ b/src/tests/minimal_types.jl @@ -3,7 +3,7 @@ import Base: +, -, *, /, ^ # for the vector type import Base: getindex, setindex!, similar # for the scalar type -import Base: eps, convert, promote_rule, sqrt +import Base: eps, convert, promote_rule, sqrt, isfinite # position variable @@ -29,41 +29,44 @@ setindex!{T}(p::Position{T},val::T,i::Int) = i==1 ? p.x=val : p.y=val # precision floats (like BigFloat) with lower precision constants, # which could result in decreasing the overall precision of the # algorithm. -immutable MyFloat{T} <: Real - t::T +immutable MyFloat <: Real + t::Float64 + # the dummy is here to prevent the use of the default constructor + dummy::Int end # we need these to construct MyFloat from constants, constants are # predefined in terms of numbers of inifinite precision such as Int or # Rational -convert{T}(::Type{MyFloat{T}},s::Rational) = MyFloat{T}(convert(T,s)) -convert{T}(::Type{MyFloat{T}},s::Integer) = MyFloat{T}(convert(T,s)) -promote_rule{T<:MyFloat,R<:Rational}(::Type{T},::Type{R}) = T -promote_rule{T<:MyFloat,R<:Integer}(::Type{T},::Type{R}) = T +convert(::Type{MyFloat},s::Rational) = MyFloat(convert(Float64,s),0) +convert(::Type{MyFloat},s::Integer) = MyFloat(convert(Float64,s),0) +promote_rule{R<:Rational}(::Type{MyFloat},::Type{R}) = MyFloat +promote_rule{R<:Integer}(::Type{MyFloat},::Type{R}) = MyFloat -eps{T}(::Type{MyFloat{T}}) = MyFloat{T}(eps(T)) +eps(::Type{MyFloat}) = MyFloat(eps(Float64),0) +isfinite(x::MyFloat) = isfinite(x.t) # necessary for the modified Rosenbrock integrator -sqrt{T}(t::MyFloat{T})=MyFloat{T}(sqrt(t.t)) +sqrt(t::MyFloat)=MyFloat(sqrt(t.t),0) # binary operators for op in (:+, :-, :*, :/, :^) - @eval ($op){T}(t1::MyFloat{T},t2::MyFloat{T}) = MyFloat{T}(($op)(t1.t,t2.t)) + @eval ($op)(t1::MyFloat,t2::MyFloat) = MyFloat(($op)(t1.t,t2.t),0) end # See #18114 -^{T<:MyFloat}(x::T, y::Rational) = x^(convert(T,y.num)/convert(T,y.den)) +^(x::MyFloat, y::Rational) = x^(convert(MyFloat,y.num)/convert(MyFloat,y.den)) # unary operators for op in (:-,) - @eval ($op)(t::MyFloat) = MyFloat(($op)(t.t)) + @eval ($op)(t::MyFloat) = MyFloat(($op)(t.t),0) end # comparisons for op in (:<, :>, :>=, :<=) - @eval ($op){T}(t1::MyFloat{T},t2::MyFloat{T}) = ($op)(t1.t,t2.t) + @eval ($op)(t1::MyFloat,t2::MyFloat) = ($op)(t1.t,t2.t) end # these are only necessary because they are used in the definition of # the ODE test case (see test_cases.jl, :harmonic_minimal_types) -Base.sin{T}(t::MyFloat{T})=MyFloat{T}(sin(t.t)) -Base.cos{T}(t::MyFloat{T})=MyFloat{T}(cos(t.t)) +Base.sin(t::MyFloat)=MyFloat(sin(t.t),0) +Base.cos(t::MyFloat)=MyFloat(cos(t.t),0) diff --git a/src/tests/test_cases.jl b/src/tests/test_cases.jl index 3b0fa20d4..5e27114a7 100644 --- a/src/tests/test_cases.jl +++ b/src/tests/test_cases.jl @@ -66,15 +66,15 @@ const test_cases = ), :harmonic_minimal_types=> - Dict(:ivp => ExplicitODE(MyFloat(0.0), - Position(MyFloat(0.0),MyFloat(1.0)), + Dict(:ivp => ExplicitODE(MyFloat(0), + Position(MyFloat(0),MyFloat(1)), (t,y,dy)->(dy.x=y.y;dy.y=-y.x), J! = (t,y,J)->(J[:]=0;J[2,1]=1;J[1,2]=-1)), :sol => t->Position(sin(t),cos(t)), :name => "harmonic (minimal types)", - :options => Dict(:initstep => MyFloat(0.001), - :tstop => MyFloat(1.0), - :tout => [MyFloat(0.0),MyFloat(0.1),MyFloat(1.0)]) + :options => Dict(:initstep => MyFloat(1//1000), + :tstop => MyFloat(1), + :tout => [MyFloat(0),MyFloat(1//10),MyFloat(1)]) ), # TODO: ForwardDiff Jacobian doesn't seem to work with custom AbstractVector type From ed09054a30e98528d0d6aba8fbbaf1f21a86c953 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Tue, 23 Aug 2016 12:03:32 +0200 Subject: [PATCH 104/113] Better handling of function types in IVP --- src/base.jl | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/base.jl b/src/base.jl index 7b675b99b..fcae1aaba 100644 --- a/src/base.jl +++ b/src/base.jl @@ -60,7 +60,7 @@ end """ - typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} + typealias ExplicitODE{T,Y,F,J} IVP{T,Y,F,Void,J} Can be constructed by calling @@ -75,9 +75,7 @@ Explicit ODE representing the problem - J!: (optional) computes `J=dF/dy` in place, called with `J!(t,y,J)` """ -typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} -# TODO: -# typealias ExplicitODE{T,Y,F,J} IVP{T,Y,F,Void,J} +typealias ExplicitODE{T,Y,F,J} IVP{T,Y,F,Void,J} @compat function (::Type{ExplicitODE}){T,Y}(t0::T, y0::Y, F!::Function; @@ -86,7 +84,7 @@ typealias ExplicitODE{T,Y} IVP{T,Y,Function,Void,Function} # precompute y' dy0 = copy(y0) F!(t0,y0,dy0) - ExplicitODE{T,Y}(t0,y0,dy0,F!,nothing,J!) + IVP(t0,y0,dy0,F!,nothing,J!) end """ @@ -102,14 +100,14 @@ Implicit ODE representing the problem Returns Jacobian in-place in `out`. """ -typealias ImplicitODE{T,Y} IVP{T,Y,Void,Function,Function} +typealias ImplicitODE{T,Y,G,J} IVP{T,Y,Void,G,J} @compat function (::Type{ImplicitODE}){T,Y}(t0::T, y0::Y, G!::Function; J!::Function = forward_jacobian_implicit!(G!,similar(y0)), dy0::Y = zero(y0), kargs...) - ImplicitODE{T,Y}(t0,y0,dy0,nothing,G!,J!) + IVP(t0,y0,dy0,nothing,G!,J!) end """ From 91a1bfe32f5802ab82a1d2edc4d4d23e83ec98f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Tue, 23 Aug 2016 12:28:31 +0200 Subject: [PATCH 105/113] Comment cleanup --- src/ODE.jl | 2 +- src/base.jl | 14 +------------- src/dense.jl | 16 ++++++---------- src/integrators/runge-kutta.jl | 5 +---- 4 files changed, 9 insertions(+), 28 deletions(-) diff --git a/src/ODE.jl b/src/ODE.jl index e5b4fe671..73de9a648 100644 --- a/src/ODE.jl +++ b/src/ODE.jl @@ -12,7 +12,7 @@ Coding conventions: Variables and Type variables: - T -> t::T -- Y -> y::Y TODO: or Vector{Y}? +- Y -> y::Y """ diff --git a/src/base.jl b/src/base.jl index fcae1aaba..0a154d51c 100644 --- a/src/base.jl +++ b/src/base.jl @@ -156,16 +156,6 @@ output(st::AbstractState) = t,y,dy output(st::AbstractState) = st.step.t, st.step.y, st.step.dy - -# m3: -# - docs -# - maybe use the typevars as defined in make_consistent_types for t, -# y, dy? T->Et, S->Ty -# (or something else consistent throughout, maybe nicer would be all -# uppercase: ET, EFY, TT, TY). -# - if find `Step` a bit confusing name, in particular combined with -# AbstractIntegrator, but not sure what's better. - """ Holds a value of a function and its derivative at time t. This is @@ -215,8 +205,6 @@ generator comprehensions: or errgen=(y-[exp(t)] for (t,y) in sol) errout=collect(errgen) - -TODO: doesn't work for 0.4 and might have issues with `show` due to non-copying output """ Base.iteratorsize{O,S}(::Type{Problem{O,S}}) = Base.SizeUnknown() @@ -432,7 +420,7 @@ Input: Output: -- Bool: `false`: continue iteration, `true`: terminate iteration. +- Status substeps. diff --git a/src/dense.jl b/src/dense.jl index 6d41dab1a..dcb3df18b 100644 --- a/src/dense.jl +++ b/src/dense.jl @@ -6,20 +6,16 @@ Dense output options: - tout ::Vector{T} output times -TODO options: - -- points ::Symbol which points are returned: `:specified` only the - ones in tspan or `:all` which includes also the step-points of the solver. -- stopevent Stop integration at a zero of this function -- roottol - """ immutable DenseOptions{T<:Number,TO<:AbstractVector} <: Options{T} tout::TO - # points ::Symbol - # stopevent::S - # roottol ::T + + # Planned options: + # - points ::Symbol which points are returned: `:specified` only the + # ones in tspan or `:all` which includes also the step-points of the solver. + # - stopevent Stop integration at a zero of this function + # - roottol end @compat function (::Type{DenseOptions{T}}){T}(; diff --git a/src/integrators/runge-kutta.jl b/src/integrators/runge-kutta.jl index b1551f66e..6b1a11053 100644 --- a/src/integrators/runge-kutta.jl +++ b/src/integrators/runge-kutta.jl @@ -233,7 +233,6 @@ function trialstep!(ode::ExplicitODE, integ::RKIntegratorAdaptive, state::RKStat end if abs(dt) < opts.minstep - # TODO: use some sort of logging system warn("Minimum step size reached") return abort end @@ -354,7 +353,6 @@ function stepsize_hw92!{T}(work, # # TODO: # - allow component-wise reltol and abstol? - # - allow other norms ord = T(minimum(order(tableau))) timout_after_nan = 5 @@ -375,13 +373,12 @@ function stepsize_hw92!{T}(work, return T(10), dt*facmin, timout_after_nan end - y0 = last_step.y[d] # TODO: is this supposed to be the last successful step? + y0 = last_step.y[d] y1 = work.ynew[d] # the approximation to the next step sci = (opts.abstol + opts.reltol*max(norm(y0),norm(y1))) work.yerr[d] ./= sci # Eq 4.10 end - # TOOD: should we use opts.norm here as well? err = opts.norm(work.yerr) # Eq. 4.11 newdt = sign(dt)*min(opts.maxstep, abs(dt)*clamp(fac*(1/err)^(1/(ord+1)),facmin,facmax)) # Eq 4.13 modified From c373406d4a2bd4a0c28e9db1d0fe2a5c7947da8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 25 Aug 2016 11:55:40 +0200 Subject: [PATCH 106/113] Switched to the Documenter HTML generation --- docs/make.jl | 14 ++++++++++++-- docs/mkdocs.yml | 32 -------------------------------- 2 files changed, 12 insertions(+), 34 deletions(-) delete mode 100644 docs/mkdocs.yml diff --git a/docs/make.jl b/docs/make.jl index 4b09c9b45..5ca0abe6d 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,8 +1,18 @@ using Documenter, ODE -makedocs() +makedocs( + format = Documenter.Formats.HTML, + sitename = "ODE", + pages = [ + "Home" => "index.md", + + "Manual" => [ "Basics" => "man/basics.md", + "Base" => "man/base.md" ] + ] + + ) deploydocs( repo = "github.com/JuliaODE/ODE.jl.git", - deps = Deps.pip("pygments", "mkdocs", "mkdocs-material", "python-markdown-math") + deps = Deps.pip("pygments", "python-markdown-math") ) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml deleted file mode 100644 index 81dcbad65..000000000 --- a/docs/mkdocs.yml +++ /dev/null @@ -1,32 +0,0 @@ -site_name: ODE.jl -repo_url: https://github.com/JuliaODE/ODE.jl -site_description: Julia package for solving differential equations - -theme: readthedocs - -extra_css: - - assets/Documenter.css - -markdown_extensions: - - extra - - tables - - fenced_code - - mdx_math - -extra_javascript: - - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML - - assets/mathjaxhelper.js - -docs_dir: 'build' - -pages: -- Home: index.md -- Manual: - - Basics: man/basics.md - - Base: man/base.md - # - Integrators: - # - Solvers: - # - Options: -- Tutorials: - - Fixed step integrator: tutorials/euler_integrator.md - # - Adaptive step integrator: From d902e563a2deca54028250b9ef703423a8673bd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 7 Sep 2016 09:06:10 +0200 Subject: [PATCH 107/113] Wrapped all the tests into testsets --- test/runtests.jl | 6 ++- test/top-interface.jl | 112 +++++++++++++++++++++--------------------- 2 files changed, 61 insertions(+), 57 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 812f1ea9a..29a9a4035 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,7 +2,9 @@ using ODE using ODE.ODETests using Base.Test -include("iterators.jl") -include("top-interface.jl") +@testset "ODE tests" begin + include("iterators.jl") + include("top-interface.jl") +end # TODO: do we still need this? # include("interface-tests.jl") diff --git a/test/top-interface.jl b/test/top-interface.jl index 0df4c0b13..17f90f0d1 100644 --- a/test/top-interface.jl +++ b/test/top-interface.jl @@ -21,67 +21,69 @@ solvers = [ # adaptive ODE.ode23s] -for solver in solvers - tol = 1e-2 +@testset "Legacy interfaces" begin + for solver in solvers + @testset "$solver" begin + tol = 1e-2 - println("using $solver") + # dy + # -- = 6 ==> y = 6t + # dt + # we need to fix initstep for the fixed-step methods + t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) + @test maximum(abs(y-6t)) < tol + tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, J! = (t,y,dy)->dy[1]=0.0) + @test maximum(abs(yj-6tj)) < tol + @test norm(yj-y,Inf) y = 6t - # dt - # we need to fix initstep for the fixed-step methods - t,y=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1) - @test maximum(abs(y-6t)) < tol - tj,yj=solver((t,y)->6.0, 0., [0:.1:1;], initstep=.1, J! = (t,y,dy)->dy[1]=0.0) - @test maximum(abs(yj-6tj)) < tol - @test norm(yj-y,Inf) y = t.^2 + # dt + t,y =solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) + @test maximum(abs(y-t.^2)) < tol + tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=0.0) + @test maximum(abs(yj-tj.^2)) < tol + @test norm(yj-y,Inf) y = t.^2 - # dt - t,y =solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001) - @test maximum(abs(y-t.^2)) < tol - tj,yj=solver((t,y)->2t, 0., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=0.0) - @test maximum(abs(yj-tj.^2)) < tol - @test norm(yj-y,Inf) y = y0*e.^t + # dt + t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001) + @test maximum(abs(y-e.^t)) < tol + tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) + @test maximum(abs(yj-e.^tj)) < tol + @test norm(yj-y,Inf) y = y0*e.^t - # dt - t,y=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001) - @test maximum(abs(y-e.^t)) < tol - tj,yj=solver((t,y)->y, 1., [0:.001:1;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) - @test maximum(abs(yj-e.^tj)) < tol - @test norm(yj-y,Inf)y, 1., [1:-.001:0;], initstep=0.001) + @test maximum(abs(y-e.^(t-1))) < tol + tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) + @test maximum(abs(yj-e.^(tj-1))) < tol + @test norm(yj-y,Inf)y, 1., [1:-.001:0;], initstep=0.001) - @test maximum(abs(y-e.^(t-1))) < tol - tj,yj=solver((t,y)->y, 1., [1:-.001:0;], initstep=0.001, J! = (t,y,dy)->dy[1]=1.0) - @test maximum(abs(yj-e.^(tj-1))) < tol - @test norm(yj-y,Inf) v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) + # dt dt + # + # y = [v, w] + t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) + ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} + @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol + tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, J! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) + ysj = hcat(yj...).' # convert Vector{Vector{Float}} to Matrix{Float} + @test maximum(abs(ysj-[cos(tj)-2*sin(tj) 2*cos(tj)+sin(tj)])) < tol + @test norm(map(norm,yj-y),Inf) v = v0*cos(t) - w0*sin(t), w = w0*cos(t) + v0*sin(t) - # dt dt - # - # y = [v, w] - t,y=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001) - ys = hcat(y...).' # convert Vector{Vector{Float}} to Matrix{Float} - @test maximum(abs(ys-[cos(t)-2*sin(t) 2*cos(t)+sin(t)])) < tol - tj,yj=solver((t,y)->[-y[2]; y[1]], [1., 2.], [0:.001:2*pi;], initstep=0.001, J! = (t,y,dy)->copy!(dy,Float64[[0,1] [-1,0]])) - ysj = hcat(yj...).' # convert Vector{Vector{Float}} to Matrix{Float} - @test maximum(abs(ysj-[cos(tj)-2*sin(tj) 2*cos(tj)+sin(tj)])) < tol - @test norm(map(norm,yj-y),Inf)2y, 0., [0,1]) - # test typeof(y0)==Vector{Int} does not throw - @test_throws ErrorException t,y=solver((t,y)->[2y], [0], [0,1]) - # test typeof(y0)==Int does not throw - @test_throws ErrorException t,y=solver((t,y)->2y, 0, [0,1]) - # test if we can deal with a mixed case - @test_throws ErrorException t,y=solver((t,y)->2y, Number[1,1.1,BigInt(1)], Rational[0,1]) + # test typeof(tspan)==Vector{Int} does not throw + @test_throws ErrorException t,y=solver((t,y)->2y, 0., [0,1]) + # test typeof(y0)==Vector{Int} does not throw + @test_throws ErrorException t,y=solver((t,y)->[2y], [0], [0,1]) + # test typeof(y0)==Int does not throw + @test_throws ErrorException t,y=solver((t,y)->2y, 0, [0,1]) + # test if we can deal with a mixed case + @test_throws ErrorException t,y=solver((t,y)->2y, Number[1,1.1,BigInt(1)], Rational[0,1]) + end + end end # Test negative starting times ODE.ode23s From fec75b92c9ffee319976720c096796496bf64cb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Wed, 7 Sep 2016 09:07:54 +0200 Subject: [PATCH 108/113] Removed interface-tests.jl --- test/interface-tests.jl | 105 ---------------------------------------- test/runtests.jl | 2 - 2 files changed, 107 deletions(-) delete mode 100644 test/interface-tests.jl diff --git a/test/interface-tests.jl b/test/interface-tests.jl deleted file mode 100644 index 75d174cf6..000000000 --- a/test/interface-tests.jl +++ /dev/null @@ -1,105 +0,0 @@ -# Here are tests which test what interface the solvers require. - -################################################################################ -# This is to test a scalar-like state variable -# (due to @acroy: https://gist.github.com/acroy/28be4f2384d01f38e577) - -import Base: +, -, *, /, .+, .-, .*, ./ - -const delta0 = 0. -const V0 = 1. -const g0 = 0. - -# define custom type ... -immutable CompSol <: Number - rho::Matrix{Complex128} - x::Float64 - p::Float64 - - CompSol(r,x,p) = new(copy(r),x,p) -end - -# ... which has to support the following operations -# to work with odeX -Base.norm(y::CompSol, p::Float64) = maximum([Base.norm(y.rho, p) abs(y.x) abs(y.p)]) -Base.norm(y::CompSol) = norm(y::CompSol, 2.0) - -+(y1::CompSol, y2::CompSol) = CompSol(y1.rho+y2.rho, y1.x+y2.x, y1.p+y2.p) --(y1::CompSol, y2::CompSol) = CompSol(y1.rho-y2.rho, y1.x-y2.x, y1.p-y2.p) -*(y1::CompSol, s::Real) = CompSol(y1.rho*s, y1.x*s, y1.p*s) -*(s::Bool, y1::CompSol) = false -*(s::Real, y1::CompSol) = y1*s -/(y1::CompSol, s::Real) = CompSol(y1.rho/s, y1.x/s, y1.p/s) - -### new for PR #68 -Base.abs(y::CompSol) = norm(y, 2.) # TODO not needed anymore once https://github.com/JuliaLang/julia/pull/11043 is in current stable julia -Base.abs2(y::CompSol) = norm(y, 2.) - -Base.zero(::Type{CompSol}) = CompSol(complex(zeros(2,2)), 0., 0.) -Base.zero(::CompSol) = zero(CompSol) -# TODO: This is now an option and has to be passed to the -# solvers. Looks ugly and a kind of a pain to handle. -isoutofdomain(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) -# TODO: We should decide on which version do we pick. The isnan -# variant seems to be causing less trouble (see the allocation comment -# in runge_kutta.jl). -Base.isnan(y::CompSol) = any(isnan, vcat(y.rho[:], y.x, y.p)) - -# Because the new RK solvers wrap scalars in an array and because of -# https://github.com/JuliaLang/julia/issues/11053 these are also needed: -.+(y1::CompSol, y2::CompSol) = CompSol(y1.rho+y2.rho, y1.x+y2.x, y1.p+y2.p) -.-(y1::CompSol, y2::CompSol) = CompSol(y1.rho-y2.rho, y1.x-y2.x, y1.p-y2.p) -.*(y1::CompSol, s::Real) = CompSol(y1.rho*s, y1.x*s, y1.p*s) -.*(s::Real, y1::CompSol) = y1*s -./(y1::CompSol, s::Real) = CompSol(y1.rho/s, y1.x/s, y1.p/s) - -################################################################################ - -# define RHSs of differential equations -# delta, V and g are parameters -function rhs(t, y, delta, V, g) - H = [[-delta/2 V]; - [V delta/2]] - - rho_dot = -im*H*y.rho + im*y.rho*H - x_dot = y.p - p_dot = -y.x - - return CompSol( rho_dot, x_dot, p_dot) -end - -# inital conditons -rho0 = zeros(2,2); -rho0[1,1]=1.; -y0 = CompSol(complex(rho0), 2., 1.) - -# solve ODEs -endt = 2*pi; - -F(t,y)=rhs(t, y, delta0, V0, g0) -t,y1 = ODE.ode45(F, y0, [0., endt], - reltol=1e-8,abstol=1e-5, - isoutofdomain = isoutofdomain) # used as reference - -println("Testing interface for scalar-like state... ") -for solver in solvers - println("Testing $solver") - # these only work with some Array-like interface defined: - if solver in solvers # [ODE.ode23s] # , ODE.ode4s_s, ODE.ode4s_kr - continue - end - tout = collect(linspace(0., endt, 5)) - t,y2 = solver(F, y0, tout, - abstol=1e-8,reltol=1e-5, - initstep=1e-4, - isoutofdomain = isoutofdomain) - break - @test norm(y1[end]-y2[end])<0.1 - - # test that typeof(tspan)==Vector{Int} does not throw: - t,y2 = solver((t,y)->rhs(t, y, delta0, V0, g0), y0, [0,1]) -end -println("ok.") - -################################################################################ -# TODO: test a vector-like state variable, i.e. one which can be indexed. diff --git a/test/runtests.jl b/test/runtests.jl index 29a9a4035..27be75b6f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,5 +6,3 @@ using Base.Test include("iterators.jl") include("top-interface.jl") end -# TODO: do we still need this? -# include("interface-tests.jl") From ce9864b8e8cbadb065e41d7e5da72c3626d96dc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Thu, 8 Sep 2016 11:43:25 +0200 Subject: [PATCH 109/113] Update REQUIRE --- REQUIRE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/REQUIRE b/REQUIRE index e53925cbf..ffc6c5b9f 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,4 +1,4 @@ -julia 0.5 +julia 0.5- Polynomials ForwardDiff Compat 0.4.1 From 9f02b2013e2c7b999002c2fb3faf3e3cd0ef5c6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 9 Sep 2016 11:23:45 +0200 Subject: [PATCH 110/113] Temporary fix: disabled building on release Because of backward compatibility issues ODE.jl no longer passes tests on release branch (v0.4). Once 0.5 is out we should re-enable it. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 94720fd6a..18fe21d2c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ os: - osx - linux julia: - - release +# - release - nightly git: depth: 999999 From 9e7ccfdf667812293426e135c2fb1c5caede98a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 16 Sep 2016 10:48:28 +0200 Subject: [PATCH 111/113] Fixing the docs --- .travis.yml | 2 +- docs/make.jl | 15 +++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 18fe21d2c..a28bc851d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ os: - osx - linux julia: -# - release + - 0.5 - nightly git: depth: 999999 diff --git a/docs/make.jl b/docs/make.jl index 5ca0abe6d..32fdf7932 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -2,17 +2,24 @@ using Documenter, ODE makedocs( format = Documenter.Formats.HTML, - sitename = "ODE", + modules = [ODE], + clean = false, + sitename = "ODE.jl", pages = [ "Home" => "index.md", - "Manual" => [ "Basics" => "man/basics.md", - "Base" => "man/base.md" ] + "Manual" => [ + "Basics" => "man/basics.md", + "Base" => "man/base.md" + ] ] ) deploydocs( repo = "github.com/JuliaODE/ODE.jl.git", - deps = Deps.pip("pygments", "python-markdown-math") + target = "build", + julia = "0.5", + deps = nothing, + make = nothing ) From e5c4b26eef1c9ffbde6a709fe400c7be40d67ec4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 16 Sep 2016 11:14:28 +0200 Subject: [PATCH 112/113] Removed obsolate badges --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 3af94c65f..39f99e199 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,8 @@ Various basic Ordinary Differential Equation solvers implemented in Julia. [![Join the chat at https://gitter.im/pwl/ODE.jl](https://badges.gitter.im/pwl/ODE.jl.svg)](https://gitter.im/pwl/ODE.jl?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://travis-ci.org/JuliaODE/ODE.jl.svg?branch=master)](https://travis-ci.org/JuliaODE/ODE.jl) [![Coverage Status](https://img.shields.io/coveralls/JuliaODE/ODE.jl.svg)](https://coveralls.io/r/JuliaODE/ODE.jl) -[![ODE](http://pkg.julialang.org/badges/ODE_0.4.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.4) [![ODE](http://pkg.julialang.org/badges/ODE_0.5.svg)](http://pkg.julialang.org/?pkg=ODE&ver=0.5) -[![](https://img.shields.io/badge/docs-stable-blue.svg)](https://JuliaODE.github.io/ODE.jl/stable) + [![](https://img.shields.io/badge/docs-latest-blue.svg)](https://JuliaODE.github.io/ODE.jl/latest) Pull requests are always highly welcome to fix bugs, add solvers, or anything else! From e47748ba852ebf4f9b1acf4ce0771e577bbe92e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Biernat?= Date: Fri, 16 Sep 2016 11:22:40 +0200 Subject: [PATCH 113/113] Added back pygments dependency --- docs/make.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/make.jl b/docs/make.jl index 32fdf7932..3df633a3c 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -20,6 +20,6 @@ deploydocs( repo = "github.com/JuliaODE/ODE.jl.git", target = "build", julia = "0.5", - deps = nothing, + deps = Deps.pip("pygments", "python-markdown-math"), make = nothing )