From b38166f3e6731c6dc23b4573f593110ed6e44116 Mon Sep 17 00:00:00 2001 From: Yueh-Hua Tu Date: Wed, 13 Sep 2023 17:08:25 +0800 Subject: [PATCH 1/2] split cuda tests and fix GraphSignals.adjacency_matrix --- ext/GraphSignalsCUDAExt/linalg.jl | 7 +- test/cuda.jl | 350 ------------------------------ test/cuda/featuredgraph.jl | 45 ++++ test/cuda/graphdomain.jl | 29 +++ test/cuda/linalg.jl | 191 ++++++++++++++++ test/cuda/sparsegraph.jl | 76 +++++++ test/cuda/sparsematrix.jl | 11 + test/runtests.jl | 10 +- 8 files changed, 366 insertions(+), 353 deletions(-) delete mode 100644 test/cuda.jl create mode 100644 test/cuda/featuredgraph.jl create mode 100644 test/cuda/graphdomain.jl create mode 100644 test/cuda/linalg.jl create mode 100644 test/cuda/sparsegraph.jl create mode 100644 test/cuda/sparsematrix.jl diff --git a/ext/GraphSignalsCUDAExt/linalg.jl b/ext/GraphSignalsCUDAExt/linalg.jl index d695eed..b370f55 100644 --- a/ext/GraphSignalsCUDAExt/linalg.jl +++ b/ext/GraphSignalsCUDAExt/linalg.jl @@ -8,9 +8,12 @@ function GraphSignals.adjacency_matrix(adj::CuSparseMatrixCSC) return CuMatrix(adj) end -adjacency_matrix(adj::CuMatrix{T}, ::Type{T}) where {T} = adjacency_matrix(adj) +function GraphSignals.adjacency_matrix(adj::CuMatrix{T}, ::Type{S}) where {T,S} + GraphSignals._dim_check(adj) + return CuMatrix{S}(adj) +end -function adjacency_matrix(adj::CuMatrix) +function GraphSignals.adjacency_matrix(adj::CuMatrix) GraphSignals._dim_check(adj) return adj end diff --git a/test/cuda.jl b/test/cuda.jl deleted file mode 100644 index 3787530..0000000 --- a/test/cuda.jl +++ /dev/null @@ -1,350 +0,0 @@ -@testset "cuda" begin - T = Float32 - - @testset "CuSparseMatrixCSC" begin - adjm = cu(sparse( - T[0 1 0 1; - 1 0 1 0; - 0 1 0 1; - 1 0 1 0])) - @test collect(rowvals(adjm, 2)) == [1, 3] - @test collect(nonzeros(adjm, 2)) == [1, 1] - - @test GraphSignals.adjacency_matrix(adjm, Int32) isa CuMatrix{Int32} - @test GraphSignals.adjacency_matrix(adjm) isa CuMatrix{T} - end - - @testset "SparseGraph" begin - @testset "undirected graph" begin - # undirected graph with self loop - V = 5 - E = 5 - ef = cu(rand(10, E)) - - adjm = T[0 1 0 1 1; - 1 0 0 0 0; - 0 0 1 0 0; - 1 0 0 0 1; - 1 0 0 1 0] - - adjl = Vector{T}[ - [2, 4, 5], - [1], - [3], - [1, 5], - [1, 4] - ] - - sg = SparseGraph(adjm, false) |> gpu - @test (collect(sg.S) .!= 0) == adjm - @test sg.S isa CUSPARSE.CuSparseMatrixCSC{T} - @test collect(sg.edges) == [1, 3, 4, 1, 2, 3, 5, 4, 5] - @test sg.edges isa CuVector - @test sg.E == E - @test nv(sg) == V - @test ne(sg) == E - @test collect(neighbors(sg, 1)) == adjl[1] - @test collect(neighbors(sg, 2)) == adjl[2] - @test collect(GraphSignals.dsts(sg)) == [1, 3, 1, 1, 4] - @test collect(GraphSignals.srcs(sg)) == [2, 3, 4, 5, 5] - @test_throws ArgumentError GraphSignals.aggregate_index(sg, :edge, :in) - @test random_walk(sg, 1) ⊆ [2, 4, 5] - @test neighbor_sample(sg, 1) ⊆ [2, 4, 5] - end - - @testset "directed graph" begin - # directed graph with self loop - V = 5 - E = 7 - ef = cu(rand(10, E)) - - adjm = T[0 0 1 0 1; - 1 0 0 0 0; - 0 0 0 0 0; - 0 0 1 1 1; - 1 0 0 0 0] - - adjl = Vector{T}[ - [2, 5], - [], - [1, 4], - [4], - [1, 4], - ] - - sg = SparseGraph(adjm, true) |> gpu - @test (collect(sg.S) .!= 0) == adjm - @test sg.S isa CUSPARSE.CuSparseMatrixCSC{T} - @test collect(sg.edges) == collect(1:7) - @test sg.edges isa CuVector - @test sg.E == E - @test nv(sg) == V - @test ne(sg) == E - @test collect(neighbors(sg, 1)) == adjl[1] - @test collect(neighbors(sg, 3)) == adjl[3] - @test Array(GraphSignals.dsts(sg)) == [2, 5, 1, 4, 4, 1, 4] - @test Array(GraphSignals.srcs(sg)) == [1, 1, 3, 3, 4, 5, 5] - @test random_walk(sg, 1) ⊆ [2, 5] - @test neighbor_sample(sg, 1) ⊆ [2, 5] - end - end - - @testset "graphdomains" begin - d = GraphSignals.NullDomain() |> gpu - @test isnothing(GraphSignals.domain(d)) - @test isnothing(positional_feature(d)) - @test !has_positional_feature(d) - - pf = rand(T, 2, 3, 4) - d = GraphSignals.NodeDomain(pf) |> gpu - @test collect(GraphSignals.domain(d)) == pf - @test collect(positional_feature(d)) == pf - @test has_positional_feature(d) - - V = 5 - nf = rand(10, V) - pf = rand(10, V) - - adjm = T[0 1 0 1 1; - 1 0 0 0 0; - 0 0 1 0 0; - 1 0 0 0 1; - 1 0 0 1 0] - - fg = FeaturedGraph(adjm; nf=nf, pf=pf) |> gpu - gs = gradient(x -> sum(positional_feature(FeaturedGraph(x))), fg)[1] - @test :domain in keys(gs.pf) - @test gs.pf.domain isa CuArray - end - - @testset "featuredgraph" begin - @testset "undirected graph" begin - # undirected graph with self loop - V = 5 - E = 5 - nf = rand(10, V) - - adjm = T[0 1 0 1 1; - 1 0 0 0 0; - 0 0 1 0 0; - 1 0 0 0 1; - 1 0 0 1 0] - - fg = FeaturedGraph(adjm; directed=:undirected, nf=nf) |> gpu - @test has_graph(fg) - @test has_node_feature(fg) - @test !has_edge_feature(fg) - @test !has_global_feature(fg) - @test graph(fg) isa SparseGraph - @test node_feature(fg) isa CuMatrix{T} - end - - @testset "directed graph" begin - # directed graph with self loop - V = 5 - E = 7 - nf = rand(10, V) - - adjm = T[0 0 1 0 1; - 1 0 0 0 0; - 0 0 0 0 0; - 0 0 1 1 1; - 1 0 0 0 0] - - fg = FeaturedGraph(adjm; directed=:directed, nf=nf) |> gpu - @test has_graph(fg) - @test has_node_feature(fg) - @test !has_edge_feature(fg) - @test !has_global_feature(fg) - @test graph(fg) isa SparseGraph - @test node_feature(fg) isa CuMatrix{T} - end - end - - @testset "linalg" begin - in_channel = 3 - out_channel = 5 - N = 6 - - adjs = Dict( - :simple => [0. 1. 1. 0. 0. 0.; - 1. 0. 1. 0. 1. 0.; - 1. 1. 0. 1. 0. 1.; - 0. 0. 1. 0. 0. 0.; - 0. 1. 0. 0. 0. 0.; - 0. 0. 1. 0. 0. 0.], - :weight => [0. 2. 2. 0. 0. 0.; - 2. 0. 1. 0. 2. 0.; - 2. 1. 0. 5. 0. 2.; - 0. 0. 5. 0. 0. 0.; - 0. 2. 0. 0. 0. 0.; - 0. 0. 2. 0. 0. 0.], - ) - - degs = Dict( - :simple => [2. 0. 0. 0. 0. 0.; - 0. 3. 0. 0. 0. 0.; - 0. 0. 4. 0. 0. 0.; - 0. 0. 0. 1. 0. 0.; - 0. 0. 0. 0. 1. 0.; - 0. 0. 0. 0. 0. 1.], - :weight => [4. 0. 0. 0. 0. 0.; - 0. 5. 0. 0. 0. 0.; - 0. 0. 10. 0. 0. 0.; - 0. 0. 0. 5. 0. 0.; - 0. 0. 0. 0. 2. 0.; - 0. 0. 0. 0. 0. 2.] - ) - - laps = Dict( - :simple => [2. -1. -1. 0. 0. 0.; - -1. 3. -1. 0. -1. 0.; - -1. -1. 4. -1. 0. -1.; - 0. 0. -1. 1. 0. 0.; - 0. -1. 0. 0. 1. 0.; - 0. 0. -1. 0. 0. 1.], - :weight => [4. -2. -2. 0. 0. 0.; - -2. 5. -1. 0. -2. 0.; - -2. -1. 10. -5. 0. -2.; - 0. 0. -5. 5. 0. 0.; - 0. -2. 0. 0. 2. 0.; - 0. 0. -2. 0. 0. 2.], - ) - - norm_laps = Dict( - :simple => [1. -1/sqrt(2*3) -1/sqrt(2*4) 0. 0. 0.; - -1/sqrt(2*3) 1. -1/sqrt(3*4) 0. -1/sqrt(3) 0.; - -1/sqrt(2*4) -1/sqrt(3*4) 1. -1/2 0. -1/2; - 0. 0. -1/2 1. 0. 0.; - 0. -1/sqrt(3) 0. 0. 1. 0.; - 0. 0. -1/2 0. 0. 1.], - :weight => [1. -2/sqrt(4*5) -2/sqrt(4*10) 0. 0. 0.; - -2/sqrt(4*5) 1. -1/sqrt(5*10) 0. -2/sqrt(2*5) 0.; - -2/sqrt(4*10) -1/sqrt(5*10) 1. -5/sqrt(5*10) 0. -2/sqrt(2*10); - 0. 0. -5/sqrt(5*10) 1. 0. 0.; - 0. -2/sqrt(2*5) 0. 0. 1. 0.; - 0. 0. -2/sqrt(2*10) 0. 0. 1.] - ) - - @testset "undirected graph" begin - adjm = [0 1 0 1; - 1 0 1 0; - 0 1 0 1; - 1 0 1 0] - deg = [2 0 0 0; - 0 2 0 0; - 0 0 2 0; - 0 0 0 2] - isd = [√2, √2, √2, √2] - lap = [2 -1 0 -1; - -1 2 -1 0; - 0 -1 2 -1; - -1 0 -1 2] - norm_lap = [1. -.5 0. -.5; - -.5 1. -.5 0.; - 0. -.5 1. -.5; - -.5 0. -.5 1.] - scaled_lap = [0 -0.5 0 -0.5; - -0.5 0 -0.5 -0; - 0 -0.5 0 -0.5; - -0.5 0 -0.5 0] - rw_lap = [1 -.5 0 -.5; - -.5 1 -.5 0; - 0 -.5 1 -.5; - -.5 0 -.5 1] - - fg = FeaturedGraph(T.(adjm)) |> gpu - @test collect(GraphSignals.adjacency_matrix(fg)) == adjm - @test collect(GraphSignals.degrees(fg; dir=:both)) == [2, 2, 2, 2] - D = GraphSignals.degree_matrix(fg, T, dir=:out) - @test collect(D) == T.(deg) - @test GraphSignals.degree_matrix(fg, T; dir=:in) == D - @test GraphSignals.degree_matrix(fg, T; dir=:both) == D - @test eltype(D) == T - L = Graphs.laplacian_matrix(fg, T) - @test collect(L) == T.(lap) - @test eltype(L) == T - - NA = GraphSignals.normalized_adjacency_matrix(fg, T) - @test collect(NA) ≈ T.(I - norm_lap) - @test eltype(NA) == T - - NA = GraphSignals.normalized_adjacency_matrix(fg, T, selfloop=true) - @test eltype(NA) == T - - NL = GraphSignals.normalized_laplacian(fg, T) - @test collect(NL) ≈ T.(norm_lap) - @test eltype(NL) == T - - SL = GraphSignals.scaled_laplacian(fg, T) - @test collect(SL) ≈ T.(scaled_lap) - @test eltype(SL) == T - - # RW = GraphSignals.random_walk_laplacian(fg, T) - # @test RW == T.(rw_lap) - # @test eltype(RW) == T - end - - # @testset "directed" begin - # adjm = [0 2 0 3; - # 0 0 4 0; - # 2 0 0 1; - # 0 0 0 0] - # degs = Dict( - # :out => diagm(0=>[2, 2, 4, 4]), - # :in => diagm(0=>[5, 4, 3, 0]), - # :both => diagm(0=>[7, 6, 7, 4]), - # ) - # laps = Dict( - # :out => degs[:out] - adjm, - # :in => degs[:in] - adjm, - # :both => degs[:both] - adjm, - # ) - # norm_laps = Dict( - # :out => I - diagm(0=>[1/2, 1/2, 1/4, 1/4])*adjm, - # :in => I - diagm(0=>[1/5, 1/4, 1/3, 0])*adjm, - # ) - # sig_laps = Dict( - # :out => degs[:out] + adjm, - # :in => degs[:in] + adjm, - # :both => degs[:both] + adjm, - # ) - # rw_laps = Dict( - # :out => I - diagm(0=>[1/2, 1/2, 1/4, 1/4]) * adjm, - # :in => I - diagm(0=>[1/5, 1/4, 1/3, 0]) * adjm, - # :both => I - diagm(0=>[1/7, 1/6, 1/7, 1/4]) * adjm, - # ) - - # for g in [adjm, sparse(adjm)] - # for dir in [:out, :in, :both] - # D = GraphSignals.degree_matrix(g, T, dir=dir) - # @test D == T.(degs[dir]) - # @test eltype(D) == T - - # L = Graphs.laplacian_matrix(g, T, dir=dir) - # @test L == T.(laps[dir]) - # @test eltype(L) == T - - # SL = GraphSignals.signless_laplacian(g, T, dir=dir) - # @test SL == T.(sig_laps[dir]) - # @test eltype(SL) == T - # end - # @test_throws DomainError GraphSignals.degree_matrix(g, dir=:other) - # end - - # for g in [adjm, sparse(adjm)] - # for dir in [:out, :in] - # L = normalized_laplacian(g, T, dir=dir) - # @test L == T.(norm_laps[dir]) - # @test eltype(L) == T - # end - - # for dir in [:out, :in, :both] - # RW = GraphSignals.random_walk_laplacian(g, T, dir=dir) - # @test RW == T.(rw_laps[dir]) - # @test eltype(RW) == T - # end - # end - # end - end -end diff --git a/test/cuda/featuredgraph.jl b/test/cuda/featuredgraph.jl new file mode 100644 index 0000000..da61ed4 --- /dev/null +++ b/test/cuda/featuredgraph.jl @@ -0,0 +1,45 @@ +T = Float32 + +@testset "cuda/featuredgraph" begin + @testset "undirected graph" begin + # undirected graph with self loop + V = 5 + E = 5 + nf = rand(10, V) + + adjm = T[0 1 0 1 1; + 1 0 0 0 0; + 0 0 1 0 0; + 1 0 0 0 1; + 1 0 0 1 0] + + fg = FeaturedGraph(adjm; directed=:undirected, nf=nf) |> gpu + @test has_graph(fg) + @test has_node_feature(fg) + @test !has_edge_feature(fg) + @test !has_global_feature(fg) + @test graph(fg) isa SparseGraph + @test node_feature(fg) isa CuMatrix{T} + end + + @testset "directed graph" begin + # directed graph with self loop + V = 5 + E = 7 + nf = rand(10, V) + + adjm = T[0 0 1 0 1; + 1 0 0 0 0; + 0 0 0 0 0; + 0 0 1 1 1; + 1 0 0 0 0] + + fg = FeaturedGraph(adjm; directed=:directed, nf=nf) |> gpu + @test has_graph(fg) + @test has_node_feature(fg) + @test !has_edge_feature(fg) + @test !has_global_feature(fg) + @test graph(fg) isa SparseGraph + @test node_feature(fg) isa CuMatrix{T} + end +end diff --git a/test/cuda/graphdomain.jl b/test/cuda/graphdomain.jl new file mode 100644 index 0000000..d565d08 --- /dev/null +++ b/test/cuda/graphdomain.jl @@ -0,0 +1,29 @@ +T = Float32 + +@testset "cuda/graphdomains" begin + d = GraphSignals.NullDomain() |> gpu + @test isnothing(GraphSignals.domain(d)) + @test isnothing(positional_feature(d)) + @test !has_positional_feature(d) + + pf = rand(T, 2, 3, 4) + d = GraphSignals.NodeDomain(pf) |> gpu + @test collect(GraphSignals.domain(d)) == pf + @test collect(positional_feature(d)) == pf + @test has_positional_feature(d) + + V = 5 + nf = rand(10, V) + pf = rand(10, V) + + adjm = T[0 1 0 1 1; + 1 0 0 0 0; + 0 0 1 0 0; + 1 0 0 0 1; + 1 0 0 1 0] + + fg = FeaturedGraph(adjm; nf=nf, pf=pf) |> gpu + gs = gradient(x -> sum(positional_feature(FeaturedGraph(x))), fg)[1] + @test :domain in keys(gs.pf) + @test gs.pf.domain isa CuArray +end diff --git a/test/cuda/linalg.jl b/test/cuda/linalg.jl new file mode 100644 index 0000000..753f3b0 --- /dev/null +++ b/test/cuda/linalg.jl @@ -0,0 +1,191 @@ +T = Float32 + +@testset "cuda/linalg" begin + in_channel = 3 + out_channel = 5 + N = 6 + + adjs = Dict( + :simple => [0. 1. 1. 0. 0. 0.; + 1. 0. 1. 0. 1. 0.; + 1. 1. 0. 1. 0. 1.; + 0. 0. 1. 0. 0. 0.; + 0. 1. 0. 0. 0. 0.; + 0. 0. 1. 0. 0. 0.], + :weight => [0. 2. 2. 0. 0. 0.; + 2. 0. 1. 0. 2. 0.; + 2. 1. 0. 5. 0. 2.; + 0. 0. 5. 0. 0. 0.; + 0. 2. 0. 0. 0. 0.; + 0. 0. 2. 0. 0. 0.], + ) + + degs = Dict( + :simple => [2. 0. 0. 0. 0. 0.; + 0. 3. 0. 0. 0. 0.; + 0. 0. 4. 0. 0. 0.; + 0. 0. 0. 1. 0. 0.; + 0. 0. 0. 0. 1. 0.; + 0. 0. 0. 0. 0. 1.], + :weight => [4. 0. 0. 0. 0. 0.; + 0. 5. 0. 0. 0. 0.; + 0. 0. 10. 0. 0. 0.; + 0. 0. 0. 5. 0. 0.; + 0. 0. 0. 0. 2. 0.; + 0. 0. 0. 0. 0. 2.] + ) + + laps = Dict( + :simple => [2. -1. -1. 0. 0. 0.; + -1. 3. -1. 0. -1. 0.; + -1. -1. 4. -1. 0. -1.; + 0. 0. -1. 1. 0. 0.; + 0. -1. 0. 0. 1. 0.; + 0. 0. -1. 0. 0. 1.], + :weight => [4. -2. -2. 0. 0. 0.; + -2. 5. -1. 0. -2. 0.; + -2. -1. 10. -5. 0. -2.; + 0. 0. -5. 5. 0. 0.; + 0. -2. 0. 0. 2. 0.; + 0. 0. -2. 0. 0. 2.], + ) + + norm_laps = Dict( + :simple => [1. -1/sqrt(2*3) -1/sqrt(2*4) 0. 0. 0.; + -1/sqrt(2*3) 1. -1/sqrt(3*4) 0. -1/sqrt(3) 0.; + -1/sqrt(2*4) -1/sqrt(3*4) 1. -1/2 0. -1/2; + 0. 0. -1/2 1. 0. 0.; + 0. -1/sqrt(3) 0. 0. 1. 0.; + 0. 0. -1/2 0. 0. 1.], + :weight => [1. -2/sqrt(4*5) -2/sqrt(4*10) 0. 0. 0.; + -2/sqrt(4*5) 1. -1/sqrt(5*10) 0. -2/sqrt(2*5) 0.; + -2/sqrt(4*10) -1/sqrt(5*10) 1. -5/sqrt(5*10) 0. -2/sqrt(2*10); + 0. 0. -5/sqrt(5*10) 1. 0. 0.; + 0. -2/sqrt(2*5) 0. 0. 1. 0.; + 0. 0. -2/sqrt(2*10) 0. 0. 1.] + ) + + @testset "undirected graph" begin + adjm = [0 1 0 1; + 1 0 1 0; + 0 1 0 1; + 1 0 1 0] + deg = [2 0 0 0; + 0 2 0 0; + 0 0 2 0; + 0 0 0 2] + isd = [√2, √2, √2, √2] + lap = [2 -1 0 -1; + -1 2 -1 0; + 0 -1 2 -1; + -1 0 -1 2] + norm_lap = [1. -.5 0. -.5; + -.5 1. -.5 0.; + 0. -.5 1. -.5; + -.5 0. -.5 1.] + scaled_lap = [0 -0.5 0 -0.5; + -0.5 0 -0.5 -0; + 0 -0.5 0 -0.5; + -0.5 0 -0.5 0] + rw_lap = [1 -.5 0 -.5; + -.5 1 -.5 0; + 0 -.5 1 -.5; + -.5 0 -.5 1] + + @test GraphSignals.adjacency_matrix(adjm |> gpu, Int32) isa CuMatrix{Int32} + @test GraphSignals.adjacency_matrix(adjm |> gpu) isa CuMatrix{Int64} + + fg = FeaturedGraph(T.(adjm)) |> gpu + @test collect(GraphSignals.adjacency_matrix(fg)) == adjm + @test collect(GraphSignals.degrees(fg; dir=:both)) == [2, 2, 2, 2] + D = GraphSignals.degree_matrix(fg, T, dir=:out) + @test collect(D) == T.(deg) + @test GraphSignals.degree_matrix(fg, T; dir=:in) == D + @test GraphSignals.degree_matrix(fg, T; dir=:both) == D + @test eltype(D) == T + L = Graphs.laplacian_matrix(fg, T) + @test collect(L) == T.(lap) + @test eltype(L) == T + + NA = GraphSignals.normalized_adjacency_matrix(fg, T) + @test collect(NA) ≈ T.(I - norm_lap) + @test eltype(NA) == T + + NA = GraphSignals.normalized_adjacency_matrix(fg, T, selfloop=true) + @test eltype(NA) == T + + NL = GraphSignals.normalized_laplacian(fg, T) + @test collect(NL) ≈ T.(norm_lap) + @test eltype(NL) == T + + SL = GraphSignals.scaled_laplacian(fg, T) + @test collect(SL) ≈ T.(scaled_lap) + @test eltype(SL) == T + + # RW = GraphSignals.random_walk_laplacian(fg, T) + # @test RW == T.(rw_lap) + # @test eltype(RW) == T + end + + # @testset "directed" begin + # adjm = [0 2 0 3; + # 0 0 4 0; + # 2 0 0 1; + # 0 0 0 0] + # degs = Dict( + # :out => diagm(0=>[2, 2, 4, 4]), + # :in => diagm(0=>[5, 4, 3, 0]), + # :both => diagm(0=>[7, 6, 7, 4]), + # ) + # laps = Dict( + # :out => degs[:out] - adjm, + # :in => degs[:in] - adjm, + # :both => degs[:both] - adjm, + # ) + # norm_laps = Dict( + # :out => I - diagm(0=>[1/2, 1/2, 1/4, 1/4])*adjm, + # :in => I - diagm(0=>[1/5, 1/4, 1/3, 0])*adjm, + # ) + # sig_laps = Dict( + # :out => degs[:out] + adjm, + # :in => degs[:in] + adjm, + # :both => degs[:both] + adjm, + # ) + # rw_laps = Dict( + # :out => I - diagm(0=>[1/2, 1/2, 1/4, 1/4]) * adjm, + # :in => I - diagm(0=>[1/5, 1/4, 1/3, 0]) * adjm, + # :both => I - diagm(0=>[1/7, 1/6, 1/7, 1/4]) * adjm, + # ) + + # for g in [adjm, sparse(adjm)] + # for dir in [:out, :in, :both] + # D = GraphSignals.degree_matrix(g, T, dir=dir) + # @test D == T.(degs[dir]) + # @test eltype(D) == T + + # L = Graphs.laplacian_matrix(g, T, dir=dir) + # @test L == T.(laps[dir]) + # @test eltype(L) == T + + # SL = GraphSignals.signless_laplacian(g, T, dir=dir) + # @test SL == T.(sig_laps[dir]) + # @test eltype(SL) == T + # end + # @test_throws DomainError GraphSignals.degree_matrix(g, dir=:other) + # end + + # for g in [adjm, sparse(adjm)] + # for dir in [:out, :in] + # L = normalized_laplacian(g, T, dir=dir) + # @test L == T.(norm_laps[dir]) + # @test eltype(L) == T + # end + + # for dir in [:out, :in, :both] + # RW = GraphSignals.random_walk_laplacian(g, T, dir=dir) + # @test RW == T.(rw_laps[dir]) + # @test eltype(RW) == T + # end + # end + # end +end diff --git a/test/cuda/sparsegraph.jl b/test/cuda/sparsegraph.jl new file mode 100644 index 0000000..c4ae503 --- /dev/null +++ b/test/cuda/sparsegraph.jl @@ -0,0 +1,76 @@ +T = Float32 + +@testset "cuda/sparsegraph" begin + @testset "undirected graph" begin + # undirected graph with self loop + V = 5 + E = 5 + ef = cu(rand(10, E)) + + adjm = T[0 1 0 1 1; + 1 0 0 0 0; + 0 0 1 0 0; + 1 0 0 0 1; + 1 0 0 1 0] + + adjl = Vector{T}[ + [2, 4, 5], + [1], + [3], + [1, 5], + [1, 4] + ] + + sg = SparseGraph(adjm, false) |> gpu + @test (collect(sg.S) .!= 0) == adjm + @test sg.S isa CUSPARSE.CuSparseMatrixCSC{T} + @test collect(sg.edges) == [1, 3, 4, 1, 2, 3, 5, 4, 5] + @test sg.edges isa CuVector + @test sg.E == E + @test nv(sg) == V + @test ne(sg) == E + @test collect(neighbors(sg, 1)) == adjl[1] + @test collect(neighbors(sg, 2)) == adjl[2] + @test collect(GraphSignals.dsts(sg)) == [1, 3, 1, 1, 4] + @test collect(GraphSignals.srcs(sg)) == [2, 3, 4, 5, 5] + @test_throws ArgumentError GraphSignals.aggregate_index(sg, :edge, :in) + @test random_walk(sg, 1) ⊆ [2, 4, 5] + @test neighbor_sample(sg, 1) ⊆ [2, 4, 5] + end + + @testset "directed graph" begin + # directed graph with self loop + V = 5 + E = 7 + ef = cu(rand(10, E)) + + adjm = T[0 0 1 0 1; + 1 0 0 0 0; + 0 0 0 0 0; + 0 0 1 1 1; + 1 0 0 0 0] + + adjl = Vector{T}[ + [2, 5], + [], + [1, 4], + [4], + [1, 4], + ] + + sg = SparseGraph(adjm, true) |> gpu + @test (collect(sg.S) .!= 0) == adjm + @test sg.S isa CUSPARSE.CuSparseMatrixCSC{T} + @test collect(sg.edges) == collect(1:7) + @test sg.edges isa CuVector + @test sg.E == E + @test nv(sg) == V + @test ne(sg) == E + @test collect(neighbors(sg, 1)) == adjl[1] + @test collect(neighbors(sg, 3)) == adjl[3] + @test Array(GraphSignals.dsts(sg)) == [2, 5, 1, 4, 4, 1, 4] + @test Array(GraphSignals.srcs(sg)) == [1, 1, 3, 3, 4, 5, 5] + @test random_walk(sg, 1) ⊆ [2, 5] + @test neighbor_sample(sg, 1) ⊆ [2, 5] + end +end diff --git a/test/cuda/sparsematrix.jl b/test/cuda/sparsematrix.jl new file mode 100644 index 0000000..3dfcf98 --- /dev/null +++ b/test/cuda/sparsematrix.jl @@ -0,0 +1,11 @@ +T = Float32 + +@testset "cuda/sparsematrix" begin + adjm = cu(sparse( + T[0 1 0 1; + 1 0 1 0; + 0 1 0 1; + 1 0 1 0])) + @test collect(rowvals(adjm, 2)) == [1, 3] + @test collect(nonzeros(adjm, 2)) == [1, 1] +end diff --git a/test/runtests.jl b/test/runtests.jl index 438703d..b04aa99 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -15,6 +15,14 @@ CUDA.allowscalar(false) include("test_utils.jl") +cuda_tests = [ + "cuda/linalg", + "cuda/featuredgraph", + "cuda/sparsematrix", + "cuda/sparsegraph", + "cuda/graphdomain", +] + tests = [ "positional", "graph", @@ -31,7 +39,7 @@ tests = [ ] if CUDA.functional() - push!(tests, "cuda") + append!(tests, cuda_tests) end @testset "GraphSignals.jl" begin From e332b4d7ad10575e5f54d15ac1dec37a8c1cf50e Mon Sep 17 00:00:00 2001 From: Yueh-Hua Tu Date: Sat, 16 Sep 2023 00:17:16 +0800 Subject: [PATCH 2/2] add test --- test/cuda/linalg.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/cuda/linalg.jl b/test/cuda/linalg.jl index 753f3b0..79eb621 100644 --- a/test/cuda/linalg.jl +++ b/test/cuda/linalg.jl @@ -119,6 +119,7 @@ T = Float32 @test eltype(NL) == T SL = GraphSignals.scaled_laplacian(fg, T) + @test SL isa CuMatrix{T} @test collect(SL) ≈ T.(scaled_lap) @test eltype(SL) == T