Skip to content

Commit

Permalink
Drop dependency of RLEnvs (#136)
Browse files Browse the repository at this point in the history
* drop dependency of RLEnvs

* remove dependency on RLEnvs

* minor fix

* fix warnings
  • Loading branch information
findmyway authored Dec 21, 2020
1 parent a7c639d commit 94a4470
Show file tree
Hide file tree
Showing 7 changed files with 156 additions and 3 deletions.
2 changes: 0 additions & 2 deletions src/ReinforcementLearningZoo/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
ReinforcementLearningBase = "e575027e-6cd6-5018-9292-cdc6200d2b44"
ReinforcementLearningCore = "de1b191a-4ae0-4afa-a27b-92d07f46b2d6"
ReinforcementLearningEnvironments = "25e41dd2-4622-11e9-1641-f1adca772921"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
Expand All @@ -40,7 +39,6 @@ IntervalSets = "0.5"
MacroTools = "0.5"
ReinforcementLearningBase = "0.9"
ReinforcementLearningCore = "0.6.1"
ReinforcementLearningEnvironments = "0.4"
Requires = "1"
Setfield = "0.6, 0.7"
StableRNGs = "1.0"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ export RLZoo
using CircularArrayBuffers
using ReinforcementLearningBase
using ReinforcementLearningCore
using ReinforcementLearningEnvironments
using Setfield: @set
using StableRNGs
using Logging
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,3 +110,5 @@ function _update!(learner::A2CLearner, t::CircularArraySARTTrajectory)
end
update!(AC, gs)
end

RLCore.check(::QBasedPolicy{<:A2CLearner}, ::MultiThreadEnv) = nothing
Original file line number Diff line number Diff line change
Expand Up @@ -113,3 +113,5 @@ function _update!(learner::A2CGAELearner, t::CircularArraySARTTrajectory)

update!(AC, gs)
end

RLCore.check(::QBasedPolicy{<:A2CGAELearner}, ::MultiThreadEnv) = nothing
Original file line number Diff line number Diff line change
Expand Up @@ -133,3 +133,5 @@ function _update!(learner::MACLearner, t::CircularArraySARTTrajectory)
end
update!(AC.critic, gs2)
end

RLCore.check(::QBasedPolicy{<:MACLearner}, ::MultiThreadEnv) = nothing
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
export MultiThreadEnv

using Base.Threads:@spawn

"""
MultiThreadEnv(envs::Vector{<:AbstractEnv})
Wrap multiple instances of the same environment type into one environment.
Each environment will run in parallel by leveraging `Threads.@spawn`.
So remember to set the environment variable `JULIA_NUM_THREADS`!
"""
struct MultiThreadEnv{E,S,R,AS,SS,L} <: AbstractEnv
envs::Vector{E}
states::S
rewards::R
terminals::BitArray{1}
action_space::AS
state_space::SS
legal_action_space_mask::L
end

function Base.show(io::IO, t::MIME"text/markdown", env::MultiThreadEnv)
s = """
# MultiThreadEnv($(length(env)) x $(nameof(env[1])))
"""
show(io, t, Markdown.parse(s))
end

"""
MultiThreadEnv(f, n::Int)
`f` is a lambda function which creates an `AbstractEnv` by calling `f()`.
"""
MultiThreadEnv(f, n::Int) = MultiThreadEnv([f() for _ in 1:n])

function MultiThreadEnv(envs::Vector{<:AbstractEnv})
n = length(envs)
S = state_space(envs[1])
s = state(envs[1])
if S isa Space
S_batch = similar(S, size(S)..., n)
s_batch = similar(s, size(s)..., n)
for j in 1:n
Sₙ = state_space(envs[j])
sₙ = state(envs[j])
for i in CartesianIndices(size(S))
S_batch[i, j] = Sₙ[i]
s_batch[i, j] = sₙ[i]
end
end
else
S_batch = Space(state_space.(envs))
s_batch = state.(envs)
end

A = action_space(envs[1])
if A isa Space
A_batch = similar(A, size(A)..., n)
for j in 1:n
Aⱼ = action_space(envs[j])
for i in CartesianIndices(size(A))
A_batch[i, j] = Aⱼ[i]
end
end
else
A_batch = Space(action_space.(envs))
end

r_batch = reward.(envs)
t_batch = is_terminated.(envs)
if ActionStyle(envs[1]) === FULL_ACTION_SET
m_batch = BitArray(undef, size(A_batch))
for j in 1:n
L = legal_action_space_mask(envs[j])
for i in CartesianIndices(size(A))
m_batch[i, j] = L[i]
end
end
else
m_batch = nothing
end
MultiThreadEnv(envs, s_batch, r_batch, t_batch, A_batch, S_batch, m_batch)
end

MacroTools.@forward MultiThreadEnv.envs Base.getindex, Base.length, Base.iterate

function (env::MultiThreadEnv)(actions)
@sync for i in 1:length(env)
@spawn begin
env[i](actions[i])
end
end
end

function RLBase.reset!(env::MultiThreadEnv; is_force = false)
if is_force
for i in 1:length(env)
reset!(env[i])
end
else
@sync for i in 1:length(env)
if is_terminated(env[i])
@spawn begin
reset!(env[i])
end
end
end
end
end

const MULTI_THREAD_ENV_CACHE = IdDict{AbstractEnv,Dict{Symbol,Array}}()

function RLBase.state(env::MultiThreadEnv)
N = ndims(env.states)
@sync for i in 1:length(env)
@spawn selectdim(env.states, N, i) .= state(env[i])
end
env.states
end

function RLBase.reward(env::MultiThreadEnv)
env.rewards .= reward.(env.envs)
env.rewards
end

function RLBase.is_terminated(env::MultiThreadEnv)
env.terminals .= is_terminated.(env.envs)
env.terminals
end

function RLBase.legal_action_space_mask(env::MultiThreadEnv)
@sync for i in 1:length(env)
@spawn selectdim(env.legal_action_space_mask, N, i) .=
legal_action_space_mask(env[i])
end
env.legal_action_space_mask
end

RLBase.action_space(env::MultiThreadEnv) = env.action_space
RLBase.state_space(env::MultiThreadEnv) = env.state_space
RLBase.legal_action_space(env::MultiThreadEnv) = Space(legal_action_space.(env.envs))
# RLBase.current_player(env::MultiThreadEnv) = current_player.(env.envs)

for f in RLBase.ENV_API
if endswith(String(f), "Style")
@eval RLBase.$f(x::MultiThreadEnv) = $f(x[1])
end
end
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
include("multi_thread_env.jl")

"""
Many policy gradient based algorithms require that the `env` is a
`MultiThreadEnv` to increase the diversity during training. So the training
Expand Down

0 comments on commit 94a4470

Please sign in to comment.