Skip to content

Commit

Permalink
Merge pull request #2542 from mcarmesin/master
Browse files Browse the repository at this point in the history
Add tests for LinearExponential() on GPU
  • Loading branch information
ChrisRackauckas authored Nov 30, 2024
2 parents b37a5cc + ee8ea06 commit c7b8a80
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 3 deletions.
6 changes: 3 additions & 3 deletions lib/OrdinaryDiffEqLinear/src/linear_caches.jl
Original file line number Diff line number Diff line change
Expand Up @@ -571,9 +571,9 @@ function _phiv_timestep_caches(u_prototype, maxiter::Int, p::Int)
n = length(u_prototype)
T = eltype(u_prototype)
u = zero(u_prototype) # stores the current state
W = Matrix{T}(undef, n, p + 1) # stores the w vectors
P = Matrix{T}(undef, n, p + 2) # stores output from phiv!
Ks = KrylovSubspace{T}(n, maxiter) # stores output from arnoldi!
W = similar(u_prototype, n, p+1) # stores the w vectors
P = similar(u_prototype, n, p+2) # stores output from phiv!
Ks = KrylovSubspace{T,T,typeof(similar(u_prototype,size(u_prototype,1),2))}(n, maxiter) # stores output from arnoldi!
phiv_cache = PhivCache(u_prototype, maxiter, p + 1) # cache used by phiv! (need +1 for error estimation)
return u, W, P, Ks, phiv_cache
end
Expand Down
34 changes: 34 additions & 0 deletions test/gpu/linear_exp.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
using LinearAlgebra
using SparseArrays
using CUDA
using CUDA.CUSPARSE
using OrdinaryDiffEq

# Linear exponential solvers
A = MatrixOperator([2.0 -1.0; -1.0 2.0])
u0 = ones(2)

A_gpu = MatrixOperator(cu([2.0 -1.0; -1.0 2.0]))
u0_gpu = cu(ones(2))
prob_gpu = ODEProblem(A_gpu, u0_gpu, (0.0, 1.0))

sol_analytic = exp(1.0 * Matrix(A)) * u0

@test_broken sol1_gpu = solve(prob_gpu, LinearExponential(krylov = :off))(1.0) |> Vector
sol2_gpu = solve(prob_gpu, LinearExponential(krylov = :simple))(1.0) |> Vector
sol3_gpu = solve(prob_gpu, LinearExponential(krylov = :adaptive))(1.0) |> Vector

@test_broken isapprox(sol1_gpu, sol_analytic, rtol = 1e-6)
@test isapprox(sol2_gpu, sol_analytic, rtol = 1e-6)
@test isapprox(sol3_gpu, sol_analytic, rtol = 1e-6)

A2_gpu = MatrixOperator(cu(sparse([2.0 -1.0; -1.0 2.0])))
prob2_gpu = ODEProblem(A2_gpu, u0_gpu, (0.0, 1.0))

@test_broken sol2_1_gpu = solve(prob2_gpu, LinearExponential(krylov = :off))(1.0) |> Vector
sol2_2_gpu = solve(prob2_gpu, LinearExponential(krylov = :simple))(1.0) |> Vector
sol2_3_gpu = solve(prob2_gpu, LinearExponential(krylov = :adaptive))(1.0) |> Vector

@test_broken isapprox(sol2_1_gpu, sol_analytic, rtol = 1e-6)
@test isapprox(sol2_2_gpu, sol_analytic, rtol = 1e-6)
@test isapprox(sol2_3_gpu, sol_analytic, rtol = 1e-6)
1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ end
end
@time @safetestset "Autoswitch GPU" include("gpu/autoswitch.jl")
@time @safetestset "Linear LSRK GPU" include("gpu/linear_lsrk.jl")
@time @safetestset "Linear Exponential GPU" include("gpu/linear_exp.jl")
@time @safetestset "Reaction-Diffusion Stiff Solver GPU" include("gpu/reaction_diffusion_stiff.jl")
@time @safetestset "Scalar indexing bug bypass" include("gpu/hermite_test.jl")
end
Expand Down

0 comments on commit c7b8a80

Please sign in to comment.