Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into nested-tensor-splittraj
Browse files Browse the repository at this point in the history
  • Loading branch information
vmoens committed Jun 28, 2024
2 parents 0966659 + 1083b35 commit f635b1a
Show file tree
Hide file tree
Showing 236 changed files with 15,394 additions and 5,338 deletions.
2 changes: 1 addition & 1 deletion .github/scripts/m1_script.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash

export BUILD_VERSION=0.4.0
export TORCHRL_BUILD_VERSION=0.4.0

${CONDA_RUN} pip install git+https://github.com/pytorch/tensordict.git -U
2 changes: 1 addition & 1 deletion .github/unittest/linux/scripts/run_all.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ set -v

if [[ $OSTYPE != 'darwin'* ]]; then
apt-get update && apt-get upgrade -y
apt-get install -y vim git wget
apt-get install -y vim git wget libsdl2-dev libsdl2-2.0-0

apt-get install -y libglfw3 libgl1-mesa-glx libosmesa6 libglew-dev
apt-get install -y libglvnd0 libgl1 libglx0 libegl1 libgles2
Expand Down
90 changes: 24 additions & 66 deletions .github/unittest/linux_examples/scripts/run_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,24 +36,20 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/de
optim.pretrain_gradient_steps=55 \
optim.updates_per_episode=3 \
optim.warmup_steps=10 \
optim.device=cuda:0 \
logger.backend= \
env.backend=gymnasium \
env.name=HalfCheetah-v4
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/decision_transformer/online_dt.py \
optim.pretrain_gradient_steps=55 \
optim.updates_per_episode=3 \
optim.warmup_steps=10 \
optim.device=cuda:0 \
env.backend=gymnasium \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/iql/iql_offline.py \
optim.gradient_steps=55 \
optim.device=cuda:0 \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/cql/cql_offline.py \
optim.gradient_steps=55 \
optim.device=cuda:0 \
logger.backend=

# ==================================================================================== #
Expand Down Expand Up @@ -86,14 +82,10 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/dd
optim.batch_size=10 \
collector.frames_per_batch=16 \
collector.env_per_collector=2 \
collector.device=cuda:0 \
network.device=cuda:0 \
optim.utd_ratio=1 \
replay_buffer.size=120 \
env.name=Pendulum-v1 \
logger.backend=
# record_video=True \
# record_frames=4 \
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/a2c/a2c_mujoco.py \
env.env_name=HalfCheetah-v4 \
collector.total_frames=40 \
Expand All @@ -112,7 +104,6 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/dq
collector.init_random_frames=10 \
collector.frames_per_batch=16 \
buffer.batch_size=10 \
device=cuda:0 \
loss.num_updates=1 \
logger.backend= \
buffer.buffer_size=120
Expand All @@ -122,7 +113,6 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/cq
optim.batch_size=10 \
collector.frames_per_batch=16 \
collector.env_per_collector=2 \
collector.device=cuda:0 \
replay_buffer.size=120 \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/redq/redq.py \
Expand All @@ -131,10 +121,9 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/re
collector.init_random_frames=10 \
collector.frames_per_batch=16 \
collector.env_per_collector=2 \
collector.device=cuda:0 \
buffer.batch_size=10 \
optim.steps_per_batch=1 \
logger.record_video=True \
logger.video=True \
logger.record_frames=4 \
buffer.size=120 \
logger.backend=
Expand All @@ -143,53 +132,42 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/sa
collector.init_random_frames=10 \
collector.frames_per_batch=16 \
collector.env_per_collector=2 \
collector.device=cuda:0 \
optim.batch_size=10 \
optim.utd_ratio=1 \
replay_buffer.size=120 \
env.name=Pendulum-v1 \
network.device=cuda:0 \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/discrete_sac/discrete_sac.py \
collector.total_frames=48 \
collector.init_random_frames=10 \
collector.frames_per_batch=16 \
collector.env_per_collector=1 \
collector.device=cuda:0 \
optim.batch_size=10 \
optim.utd_ratio=1 \
network.device=cuda:0 \
optim.batch_size=10 \
optim.utd_ratio=1 \
replay_buffer.size=120 \
env.name=CartPole-v1 \
logger.backend=
# logger.record_video=True \
# logger.record_frames=4 \
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/dreamer/dreamer.py \
total_frames=200 \
init_random_frames=10 \
batch_size=10 \
frames_per_batch=200 \
num_workers=4 \
env_per_collector=2 \
collector_device=cuda:0 \
model_device=cuda:0 \
optim_steps_per_batch=1 \
record_video=True \
record_frames=4 \
buffer_size=120 \
rssm_hidden_dim=17
collector.total_frames=200 \
collector.init_random_frames=10 \
collector.frames_per_batch=200 \
env.n_parallel_envs=4 \
optimization.optim_steps_per_batch=1 \
logger.video=True \
logger.backend=csv \
replay_buffer.buffer_size=120 \
replay_buffer.batch_size=24 \
replay_buffer.batch_length=12 \
networks.rssm_hidden_dim=17
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/td3/td3.py \
collector.total_frames=48 \
collector.init_random_frames=10 \
optim.batch_size=10 \
collector.frames_per_batch=16 \
collector.num_workers=4 \
collector.env_per_collector=2 \
collector.device=cuda:0 \
collector.device=cuda:0 \
network.device=cuda:0 \
logger.mode=offline \
env.name=Pendulum-v1 \
logger.backend=
Expand All @@ -198,64 +176,51 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/iq
optim.batch_size=10 \
collector.frames_per_batch=16 \
env.train_num_envs=2 \
optim.device=cuda:0 \
collector.device=cuda:0 \
logger.mode=offline \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/iql/discrete_iql.py \
collector.total_frames=48 \
optim.batch_size=10 \
collector.frames_per_batch=16 \
env.train_num_envs=2 \
optim.device=cuda:0 \
collector.device=cuda:0 \
logger.mode=offline \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/cql/cql_online.py \
collector.total_frames=48 \
optim.batch_size=10 \
collector.frames_per_batch=16 \
env.train_num_envs=2 \
collector.device=cuda:0 \
optim.device=cuda:0 \
logger.mode=offline \
logger.backend=

# With single envs
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/dreamer/dreamer.py \
total_frames=200 \
init_random_frames=10 \
batch_size=10 \
frames_per_batch=200 \
num_workers=2 \
env_per_collector=1 \
collector_device=cuda:0 \
model_device=cuda:0 \
optim_steps_per_batch=1 \
record_video=True \
record_frames=4 \
buffer_size=120 \
rssm_hidden_dim=17
collector.total_frames=200 \
collector.init_random_frames=10 \
collector.frames_per_batch=200 \
env.n_parallel_envs=1 \
optimization.optim_steps_per_batch=1 \
logger.backend=csv \
logger.video=True \
replay_buffer.buffer_size=120 \
replay_buffer.batch_size=24 \
replay_buffer.batch_length=12 \
networks.rssm_hidden_dim=17
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/ddpg/ddpg.py \
collector.total_frames=48 \
collector.init_random_frames=10 \
optim.batch_size=10 \
collector.frames_per_batch=16 \
collector.env_per_collector=1 \
collector.device=cuda:0 \
network.device=cuda:0 \
optim.utd_ratio=1 \
replay_buffer.size=120 \
env.name=Pendulum-v1 \
logger.backend=
# record_video=True \
# record_frames=4 \
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/dqn/dqn_atari.py \
collector.total_frames=48 \
collector.init_random_frames=10 \
collector.frames_per_batch=16 \
buffer.batch_size=10 \
device=cuda:0 \
loss.num_updates=1 \
logger.backend= \
buffer.buffer_size=120
Expand All @@ -266,9 +231,8 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/re
collector.frames_per_batch=16 \
collector.env_per_collector=1 \
buffer.batch_size=10 \
collector.device=cuda:0 \
optim.steps_per_batch=1 \
logger.record_video=True \
logger.video=True \
logger.record_frames=4 \
buffer.size=120 \
logger.backend=
Expand All @@ -278,29 +242,23 @@ python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/iq
collector.frames_per_batch=16 \
env.train_num_envs=1 \
logger.mode=offline \
optim.device=cuda:0 \
collector.device=cuda:0 \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/cql/cql_online.py \
collector.total_frames=48 \
optim.batch_size=10 \
collector.frames_per_batch=16 \
collector.env_per_collector=1 \
logger.mode=offline \
optim.device=cuda:0 \
collector.device=cuda:0 \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/td3/td3.py \
collector.total_frames=48 \
collector.init_random_frames=10 \
collector.frames_per_batch=16 \
collector.num_workers=2 \
collector.env_per_collector=1 \
collector.device=cuda:0 \
logger.mode=offline \
optim.batch_size=10 \
env.name=Pendulum-v1 \
network.device=cuda:0 \
logger.backend=
python .github/unittest/helpers/coverage_run_parallel.py sota-implementations/multiagent/mappo_ippo.py \
collector.n_iters=2 \
Expand Down
1 change: 1 addition & 0 deletions .github/unittest/linux_libs/scripts_brax/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,5 @@ dependencies:
- pyyaml
- scipy
- hydra-core
- jax[cuda12]
- brax
3 changes: 3 additions & 0 deletions .github/unittest/linux_libs/scripts_gym/batch_scripts.sh
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ do
echo "Testing gym version: ${GYM_VERSION}"
# handling https://github.com/openai/gym/issues/3202
pip3 install wheel==0.38.4
pip3 install "pip<24.1"
pip3 install gym==$GYM_VERSION
$DIR/run_test.sh

Expand All @@ -70,6 +71,7 @@ do

echo "Testing gym version: ${GYM_VERSION}"
pip3 install wheel==0.38.4
pip3 install "pip<24.1"
pip3 install 'gym[atari]'==$GYM_VERSION
pip3 install ale-py==0.7
$DIR/run_test.sh
Expand All @@ -88,6 +90,7 @@ do

echo "Testing gym version: ${GYM_VERSION}"
pip3 install 'gym[atari]'==$GYM_VERSION
pip3 install pip -U
$DIR/run_test.sh

# delete the conda copy
Expand Down
4 changes: 2 additions & 2 deletions .github/unittest/linux_libs/scripts_gym/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ git submodule sync && git submodule update --init --recursive

printf "Installing PyTorch with %s\n" "${CU_VERSION}"
if [ "${CU_VERSION:-}" == cpu ] ; then
conda install pytorch==1.13.1 torchvision==0.14.1 cpuonly -c pytorch
conda install pytorch==2.0 torchvision==0.15 cpuonly -c pytorch -y
else
conda install pytorch==1.13.1 torchvision==0.14.1 pytorch-cuda=11.6 -c pytorch -c nvidia -y
conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.8 -c pytorch -c nvidia -y
fi

# Solving circular import: https://stackoverflow.com/questions/75501048/how-to-fix-attributeerror-partially-initialized-module-charset-normalizer-has
Expand Down
8 changes: 5 additions & 3 deletions .github/unittest/linux_libs/scripts_habitat/setup_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,11 @@ if [ ! -d "${env_dir}" ]; then
conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION"
fi
conda activate "${env_dir}"
#pip3 uninstall cython -y
#pip uninstall cython -y
#conda uninstall cython -y

# set debug variables
conda env config vars set MAGNUM_LOG=debug HABITAT_SIM_LOG=debug
conda deactivate && conda activate "${env_dir}"

pip3 install "cython<3"
conda install -c anaconda cython="<3.0.0" -y

Expand Down
8 changes: 4 additions & 4 deletions .github/unittest/linux_libs/scripts_jumanji/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,15 @@ git submodule sync && git submodule update --init --recursive
printf "Installing PyTorch with cu121"
if [[ "$TORCH_VERSION" == "nightly" ]]; then
if [ "${CU_VERSION:-}" == cpu ] ; then
pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu -U
pip3 install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cpu -U
else
pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121 -U
pip3 install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cu121 -U
fi
elif [[ "$TORCH_VERSION" == "stable" ]]; then
if [ "${CU_VERSION:-}" == cpu ] ; then
pip3 install torch --index-url https://download.pytorch.org/whl/cpu
pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
else
pip3 install torch --index-url https://download.pytorch.org/whl/cu121
pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cu121
fi
else
printf "Failed to install pytorch"
Expand Down
2 changes: 1 addition & 1 deletion .github/unittest/linux_libs/scripts_jumanji/run_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,6 @@ export MAGNUM_LOG=verbose MAGNUM_GPU_VALIDATION=ON
# this workflow only tests the libs
python -c "import jumanji"

python .github/unittest/helpers/coverage_run_parallel.py -m pytest test/test_libs.py --instafail -v --durations 200 --capture no -k TestJumanji --error-for-skips
python .github/unittest/helpers/coverage_run_parallel.py -m pytest test/test_libs.py --instafail -v --durations 200 --capture no -k TestJumanji --error-for-skips --runslow
coverage combine
coverage xml -i
15 changes: 15 additions & 0 deletions .github/unittest/linux_libs/scripts_meltingpot/environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
channels:
- pytorch
- defaults
dependencies:
- pip
- pip:
- cloudpickle
- torch
- pytest
- pytest-cov
- pytest-mock
- pytest-instafail
- pytest-rerunfailures
- pytest-error-for-skips
- expecttest
Loading

0 comments on commit f635b1a

Please sign in to comment.