Skip to content

[BugFix] Make sure ParallelEnv does not overflow mem when policy requires grad #3335

[BugFix] Make sure ParallelEnv does not overflow mem when policy requires grad

[BugFix] Make sure ParallelEnv does not overflow mem when policy requires grad #3335

Workflow file for this run

name: Continuous Benchmark (PR)
on:
pull_request:
permissions: write-all
concurrency:
# Documentation suggests ${{ github.head_ref }}, but that's only available on pull_request/pull_request_target triggers, so using ${{ github.ref }}.
# On master, we want all builds to complete even if merging happens faster to make it easier to discover at which point something broke.
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && format('ci-master-{0}', github.sha) || format('ci-{0}', github.ref) }}
cancel-in-progress: true
jobs:
benchmark_cpu:
name: CPU Pytest benchmark
runs-on: ubuntu-20.04
steps:
- name: Who triggered this?
run: |
echo "Action triggered by ${{ github.event.pull_request.html_url }}"
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 50 # this is to make sure we obtain the target base commit
- name: Python Setup
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Setup Environment
run: |
python -m pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
python -m pip install git+https://github.com/pytorch/tensordict
python setup.py develop
python -m pip install pytest pytest-benchmark
python3 -m pip install "gym[accept-rom-license,atari]"
python3 -m pip install dm_control
- name: Setup benchmarks
run: |
echo "BASE_SHA=$(echo ${{ github.event.pull_request.base.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "HEAD_SHA=$(echo ${{ github.event.pull_request.head.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "BASELINE_JSON=$(mktemp)" >> $GITHUB_ENV
echo "CONTENDER_JSON=$(mktemp)" >> $GITHUB_ENV
echo "PR_COMMENT=$(mktemp)" >> $GITHUB_ENV
- name: Run benchmarks
run: |
cd benchmarks/
RUN_BENCHMARK="pytest --rank 0 --benchmark-json "
git checkout ${{ github.event.pull_request.base.sha }}
$RUN_BENCHMARK ${{ env.BASELINE_JSON }}
git checkout ${{ github.event.pull_request.head.sha }}
$RUN_BENCHMARK ${{ env.CONTENDER_JSON }}
- name: Publish results
uses: apbard/pytest-benchmark-commenter@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
benchmark-file: ${{ env.CONTENDER_JSON }}
comparison-benchmark-file: ${{ env.BASELINE_JSON }}
benchmark-metrics: 'name,max,mean,ops'
comparison-benchmark-metric: 'ops'
comparison-higher-is-better: true
comparison-threshold: 5
benchmark-title: 'Result of CPU Benchmark Tests'
benchmark_gpu:
name: GPU Pytest benchmark
runs-on: linux.g5.4xlarge.nvidia.gpu
defaults:
run:
shell: bash -l {0}
container:
image: nvidia/cuda:12.3.0-base-ubuntu22.04
options: --gpus all
steps:
- name: Who triggered this?
run: |
echo "Action triggered by ${{ github.event.pull_request.html_url }}"
- name: Install deps
run: |
export TZ=Europe/London
export DEBIAN_FRONTEND=noninteractive # tzdata bug
apt-get update -y
apt-get install software-properties-common -y
add-apt-repository ppa:git-core/candidate -y
apt-get update -y
apt-get upgrade -y
apt-get -y install libglu1-mesa libgl1-mesa-glx libosmesa6 gcc curl g++ unzip wget libglfw3-dev libgles2-mesa-dev libglew-dev sudo git cmake libz-dev
- name: Check ldd --version
run: ldd --version
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 50 # this is to make sure we obtain the target base commit
- name: Python Setup
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Setup git
run: git config --global --add safe.directory /__w/rl/rl
- name: setup Path
run: |
echo /usr/local/bin >> $GITHUB_PATH
- name: Setup Environment
run: |
python3 -m pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121
python3 -m pip install git+https://github.com/pytorch/tensordict
python3 setup.py develop
python3 -m pip install pytest pytest-benchmark
python3 -m pip install "gym[accept-rom-license,atari]"
python3 -m pip install dm_control
- name: check GPU presence
run: |
python -c """import torch
assert torch.cuda.device_count()
"""
- name: Setup benchmarks
run: |
echo "BASE_SHA=$(echo ${{ github.event.pull_request.base.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "HEAD_SHA=$(echo ${{ github.event.pull_request.head.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "BASELINE_JSON=$(mktemp)" >> $GITHUB_ENV
echo "CONTENDER_JSON=$(mktemp)" >> $GITHUB_ENV
echo "PR_COMMENT=$(mktemp)" >> $GITHUB_ENV
- name: Run benchmarks
run: |
cd benchmarks/
RUN_BENCHMARK="pytest --rank 0 --benchmark-json "
git checkout ${{ github.event.pull_request.base.sha }}
$RUN_BENCHMARK ${{ env.BASELINE_JSON }}
git checkout ${{ github.event.pull_request.head.sha }}
$RUN_BENCHMARK ${{ env.CONTENDER_JSON }}
- name: Publish results
uses: apbard/pytest-benchmark-commenter@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
benchmark-file: ${{ env.CONTENDER_JSON }}
comparison-benchmark-file: ${{ env.BASELINE_JSON }}
benchmark-metrics: 'name,max,mean,ops'
comparison-benchmark-metric: 'ops'
comparison-higher-is-better: true
comparison-threshold: 5
benchmark-title: 'Result of GPU Benchmark Tests'