Skip to content

Can't get connect four example to work #213

Closed
@ghost

Description

I do as the readme says and clone/initialize the AlphaZero project, then try to run the connect-four line, I get this error no matter what I do. I've tried different versions of CUDA, other versions of Julia, it just doesn't work.

MethodError: no method matching length(::Nothing)

Closest candidates are:
  length(::Base.AsyncGenerator)
   @ Base asyncmap.jl:390
  length(::RegexMatch)
   @ Base regex.jl:285
  length(::Distributions.VonMisesFisherSampler)
   @ Distributions C:\Users\KOOLD\.julia\packages\Distributions\UaWBm\src\samplers\vonmisesfisher.jl:20
  ...

Stacktrace:
  [1] #s597#122
    @ C:\Users\KOOLD\.julia\packages\GPUCompiler\S3TWf\src\cache.jl:18 [inlined]
  [2] var"#s597#122"(f::Any, tt::Any, ::Any, job::Any)
    @ GPUCompiler .\none:0
  [3] (::Core.GeneratedFunctionStub)(::UInt64, ::LineNumberNode, ::Any, ::Vararg{Any})
    @ Core .\boot.jl:602
  [4] cached_compilation(cache::Dict{UInt64, Any}, job::GPUCompiler.CompilerJob, compiler::typeof(CUDA.cufunction_compile), linker::typeof(CUDA.cufunction_link))
    @ GPUCompiler C:\Users\KOOLD\.julia\packages\GPUCompiler\S3TWf\src\cache.jl:71
  [5] cufunction(f::GPUArrays.var"#broadcast_kernel#26", tt::Type{Tuple{CUDA.CuKernelContext, CUDA.CuDeviceArray{Float32, 4, 1}, Base.Broadcast.Broadcasted{CUDA.CuArrayStyle{4}, NTuple{4, Base.OneTo{Int64}}, typeof(identity), Tuple{Base.Broadcast.Broadcasted{CUDA.CuArrayStyle{4}, Nothing, typeof(+), Tuple{Base.Broadcast.Extruded{CUDA.CuDeviceArray{Float32, 4, 1}, NTuple{4, Bool}, NTuple{4, Int64}}, Base.Broadcast.Extruded{CUDA.CuDeviceArray{Float32, 4, 1}, NTuple{4, Bool}, NTuple{4, Int64}}}}}}, Int64}}; name::Nothing, always_inline::Bool, kwargs::@Kwargs{})
    @ CUDA C:\Users\KOOLD\.julia\packages\CUDA\BbliS\src\compiler\execution.jl:300
  [6] cufunction
    @ C:\Users\KOOLD\.julia\packages\CUDA\BbliS\src\compiler\execution.jl:293 [inlined]
  [7] macro expansion
    @ C:\Users\KOOLD\.julia\packages\CUDA\BbliS\src\compiler\execution.jl:102 [inlined]
  [8] #launch_heuristic#252
    @ C:\Users\KOOLD\.julia\packages\CUDA\BbliS\src\gpuarrays.jl:17 [inlined]
  [9] launch_heuristic
    @ C:\Users\KOOLD\.julia\packages\CUDA\BbliS\src\gpuarrays.jl:15 [inlined]
 [10] _copyto!
    @ C:\Users\KOOLD\.julia\packages\GPUArrays\5XhED\src\host\broadcast.jl:65 [inlined]
 [11] copyto!
    @ C:\Users\KOOLD\.julia\packages\GPUArrays\5XhED\src\host\broadcast.jl:46 [inlined]
 [12] copy
    @ C:\Users\KOOLD\.julia\packages\GPUArrays\5XhED\src\host\broadcast.jl:37 [inlined]
 [13] materialize
    @ .\broadcast.jl:903 [inlined]
 [14] (::Flux.Conv{2, 2, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}})(x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer})
    @ Flux C:\Users\KOOLD\.julia\packages\Flux\uCLgc\src\layers\conv.jl:202
 [15] macro expansion
    @ C:\Users\KOOLD\.julia\packages\Flux\uCLgc\src\layers\basic.jl:53 [inlined]
 [16] _applychain(layers::Tuple{Flux.Conv{2, 2, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.BatchNorm{typeof(NNlib.relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Vararg{Flux.Chain{Tuple{Flux.SkipConnection{Flux.Chain{Tuple{Flux.Conv{2, 2, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.BatchNorm{typeof(NNlib.relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.Conv{2, 2, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.BatchNorm{typeof(identity), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}}}, typeof(+)}, AlphaZero.FluxLib.var"#15#16"}}, 5}}, x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer})
    @ Flux C:\Users\KOOLD\.julia\packages\Flux\uCLgc\src\layers\basic.jl:53
 [17] (::Flux.Chain{Tuple{Flux.Conv{2, 2, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.BatchNorm{typeof(NNlib.relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Vararg{Flux.Chain{Tuple{Flux.SkipConnection{Flux.Chain{Tuple{Flux.Conv{2, 2, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.BatchNorm{typeof(NNlib.relu), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.Conv{2, 2, typeof(identity), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Flux.BatchNorm{typeof(identity), CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}}}, typeof(+)}, AlphaZero.FluxLib.var"#15#16"}}, 5}}})(x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer})
    @ Flux C:\Users\KOOLD\.julia\packages\Flux\uCLgc\src\layers\basic.jl:51
 [18] forward(nn::ResNet, state::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer})
    @ AlphaZero.FluxLib C:\Users\KOOLD\AlphaZero.jl\src\networks\flux.jl:142
 [19] forward_normalized(nn::ResNet, state::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, actions_mask::CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer})
    @ AlphaZero.Network C:\Users\KOOLD\AlphaZero.jl\src\networks\network.jl:264
 [20] evaluate_batch(nn::ResNet, batch::Vector{@NamedTuple{board::StaticArraysCore.SMatrix{7, 6, UInt8, 42}, curplayer::UInt8}})
    @ AlphaZero.Network C:\Users\KOOLD\AlphaZero.jl\src\networks\network.jl:312
 [21] fill_and_evaluate(net::ResNet, batch::Vector{@NamedTuple{board::StaticArraysCore.SMatrix{7, 6, UInt8, 42}, curplayer::UInt8}}; batch_size::Int64, fill_batches::Bool)
    @ AlphaZero C:\Users\KOOLD\AlphaZero.jl\src\simulations.jl:32
 [22] fill_and_evaluate
    @ C:\Users\KOOLD\AlphaZero.jl\src\simulations.jl:23 [inlined]
 [23] #36
    @ C:\Users\KOOLD\AlphaZero.jl\src\simulations.jl:54 [inlined]
 [24] #4
    @ C:\Users\KOOLD\AlphaZero.jl\src\batchifier.jl:71 [inlined]
 [25] log_event(f::AlphaZero.Batchifier.var"#4#7"{Vector{@NamedTuple{board::StaticArraysCore.SMatrix{7, 6, UInt8, 42}, curplayer::UInt8}}, AlphaZero.var"#36#37"{Int64, Bool, ResNet}}; name::String, cat::String, pid::Int64, tid::Int64)
    @ AlphaZero.ProfUtils C:\Users\KOOLD\AlphaZero.jl\src\prof_utils.jl:40
 [26] macro expansion
    @ C:\Users\KOOLD\AlphaZero.jl\src\batchifier.jl:68 [inlined]
 [27] macro expansion
    @ C:\Users\KOOLD\AlphaZero.jl\src\util.jl:21 [inlined]
 [28] (::AlphaZero.Batchifier.var"#2#5"{Int64, AlphaZero.var"#36#37"{Int64, Bool, ResNet}, Channel{Any}})()
    @ AlphaZero.Batchifier C:\Users\KOOLD\.julia\packages\ThreadPools\ANo2I\src\macros.jl:261

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions