diff --git a/src/ReinforcementLearningZoo/Project.toml b/src/ReinforcementLearningZoo/Project.toml index a9e21fa2f..771c56f77 100644 --- a/src/ReinforcementLearningZoo/Project.toml +++ b/src/ReinforcementLearningZoo/Project.toml @@ -19,7 +19,6 @@ Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" ReinforcementLearningBase = "e575027e-6cd6-5018-9292-cdc6200d2b44" ReinforcementLearningCore = "de1b191a-4ae0-4afa-a27b-92d07f46b2d6" -ReinforcementLearningEnvironments = "25e41dd2-4622-11e9-1641-f1adca772921" Requires = "ae029012-a4dd-5104-9daa-d747884805df" Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" @@ -40,7 +39,6 @@ IntervalSets = "0.5" MacroTools = "0.5" ReinforcementLearningBase = "0.9" ReinforcementLearningCore = "0.6.1" -ReinforcementLearningEnvironments = "0.4" Requires = "1" Setfield = "0.6, 0.7" StableRNGs = "1.0" diff --git a/src/ReinforcementLearningZoo/src/ReinforcementLearningZoo.jl b/src/ReinforcementLearningZoo/src/ReinforcementLearningZoo.jl index 78695dd72..9f8f4aa23 100644 --- a/src/ReinforcementLearningZoo/src/ReinforcementLearningZoo.jl +++ b/src/ReinforcementLearningZoo/src/ReinforcementLearningZoo.jl @@ -6,7 +6,6 @@ export RLZoo using CircularArrayBuffers using ReinforcementLearningBase using ReinforcementLearningCore -using ReinforcementLearningEnvironments using Setfield: @set using StableRNGs using Logging diff --git a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2C.jl b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2C.jl index aacc0f278..6c90b7c3d 100644 --- a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2C.jl +++ b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2C.jl @@ -110,3 +110,5 @@ function _update!(learner::A2CLearner, t::CircularArraySARTTrajectory) end update!(AC, gs) end + +RLCore.check(::QBasedPolicy{<:A2CLearner}, ::MultiThreadEnv) = nothing \ No newline at end of file diff --git a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2CGAE.jl b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2CGAE.jl index 5271cb02c..c389574b8 100644 --- a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2CGAE.jl +++ b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/A2CGAE.jl @@ -113,3 +113,5 @@ function _update!(learner::A2CGAELearner, t::CircularArraySARTTrajectory) update!(AC, gs) end + +RLCore.check(::QBasedPolicy{<:A2CGAELearner}, ::MultiThreadEnv) = nothing \ No newline at end of file diff --git a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/MAC.jl b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/MAC.jl index a33b8d946..dc1063c44 100644 --- a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/MAC.jl +++ b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/MAC.jl @@ -133,3 +133,5 @@ function _update!(learner::MACLearner, t::CircularArraySARTTrajectory) end update!(AC.critic, gs2) end + +RLCore.check(::QBasedPolicy{<:MACLearner}, ::MultiThreadEnv) = nothing \ No newline at end of file diff --git a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/multi_thread_env.jl b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/multi_thread_env.jl new file mode 100644 index 000000000..f8fcc8a4e --- /dev/null +++ b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/multi_thread_env.jl @@ -0,0 +1,148 @@ +export MultiThreadEnv + +using Base.Threads:@spawn + +""" + MultiThreadEnv(envs::Vector{<:AbstractEnv}) + +Wrap multiple instances of the same environment type into one environment. +Each environment will run in parallel by leveraging `Threads.@spawn`. +So remember to set the environment variable `JULIA_NUM_THREADS`! +""" +struct MultiThreadEnv{E,S,R,AS,SS,L} <: AbstractEnv + envs::Vector{E} + states::S + rewards::R + terminals::BitArray{1} + action_space::AS + state_space::SS + legal_action_space_mask::L +end + +function Base.show(io::IO, t::MIME"text/markdown", env::MultiThreadEnv) + s = """ + # MultiThreadEnv($(length(env)) x $(nameof(env[1]))) + """ + show(io, t, Markdown.parse(s)) +end + +""" + MultiThreadEnv(f, n::Int) + +`f` is a lambda function which creates an `AbstractEnv` by calling `f()`. +""" +MultiThreadEnv(f, n::Int) = MultiThreadEnv([f() for _ in 1:n]) + +function MultiThreadEnv(envs::Vector{<:AbstractEnv}) + n = length(envs) + S = state_space(envs[1]) + s = state(envs[1]) + if S isa Space + S_batch = similar(S, size(S)..., n) + s_batch = similar(s, size(s)..., n) + for j in 1:n + Sₙ = state_space(envs[j]) + sₙ = state(envs[j]) + for i in CartesianIndices(size(S)) + S_batch[i, j] = Sₙ[i] + s_batch[i, j] = sₙ[i] + end + end + else + S_batch = Space(state_space.(envs)) + s_batch = state.(envs) + end + + A = action_space(envs[1]) + if A isa Space + A_batch = similar(A, size(A)..., n) + for j in 1:n + Aⱼ = action_space(envs[j]) + for i in CartesianIndices(size(A)) + A_batch[i, j] = Aⱼ[i] + end + end + else + A_batch = Space(action_space.(envs)) + end + + r_batch = reward.(envs) + t_batch = is_terminated.(envs) + if ActionStyle(envs[1]) === FULL_ACTION_SET + m_batch = BitArray(undef, size(A_batch)) + for j in 1:n + L = legal_action_space_mask(envs[j]) + for i in CartesianIndices(size(A)) + m_batch[i, j] = L[i] + end + end + else + m_batch = nothing + end + MultiThreadEnv(envs, s_batch, r_batch, t_batch, A_batch, S_batch, m_batch) +end + +MacroTools.@forward MultiThreadEnv.envs Base.getindex, Base.length, Base.iterate + +function (env::MultiThreadEnv)(actions) + @sync for i in 1:length(env) + @spawn begin + env[i](actions[i]) + end + end +end + +function RLBase.reset!(env::MultiThreadEnv; is_force = false) + if is_force + for i in 1:length(env) + reset!(env[i]) + end + else + @sync for i in 1:length(env) + if is_terminated(env[i]) + @spawn begin + reset!(env[i]) + end + end + end + end +end + +const MULTI_THREAD_ENV_CACHE = IdDict{AbstractEnv,Dict{Symbol,Array}}() + +function RLBase.state(env::MultiThreadEnv) + N = ndims(env.states) + @sync for i in 1:length(env) + @spawn selectdim(env.states, N, i) .= state(env[i]) + end + env.states +end + +function RLBase.reward(env::MultiThreadEnv) + env.rewards .= reward.(env.envs) + env.rewards +end + +function RLBase.is_terminated(env::MultiThreadEnv) + env.terminals .= is_terminated.(env.envs) + env.terminals +end + +function RLBase.legal_action_space_mask(env::MultiThreadEnv) + @sync for i in 1:length(env) + @spawn selectdim(env.legal_action_space_mask, N, i) .= + legal_action_space_mask(env[i]) + end + env.legal_action_space_mask +end + +RLBase.action_space(env::MultiThreadEnv) = env.action_space +RLBase.state_space(env::MultiThreadEnv) = env.state_space +RLBase.legal_action_space(env::MultiThreadEnv) = Space(legal_action_space.(env.envs)) +# RLBase.current_player(env::MultiThreadEnv) = current_player.(env.envs) + +for f in RLBase.ENV_API + if endswith(String(f), "Style") + @eval RLBase.$f(x::MultiThreadEnv) = $f(x[1]) + end +end diff --git a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/run.jl b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/run.jl index 66c29f13e..7d9af366b 100644 --- a/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/run.jl +++ b/src/ReinforcementLearningZoo/src/algorithms/policy_gradient/run.jl @@ -1,3 +1,5 @@ +include("multi_thread_env.jl") + """ Many policy gradient based algorithms require that the `env` is a `MultiThreadEnv` to increase the diversity during training. So the training