Skip to content

Commit

Permalink
Add Enforce Eager Flag (#499)
Browse files Browse the repository at this point in the history
* first stab at pipeline parallel for vllm

* pipeline didnt work, enforce eager time

* Revert "first stab at pipeline parallel for vllm"

This reverts commit 29a0d31.
  • Loading branch information
hamishivi authored Jan 6, 2025
1 parent 5eb8cfe commit d918192
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 0 deletions.
3 changes: 3 additions & 0 deletions open_instruct/ppo_vllm_thread_ray_gtrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,8 @@ class Args:
"""number of vLLM Engines, set to 0 to disable vLLM"""
vllm_tensor_parallel_size: int = 1
"""tensor parallel size of vLLM Engine for multi-GPU inference"""
vllm_enforce_eager: bool = False
"""whether to enforce eager mode for vLLM -- slow inference but needed for multi-node"""
vllm_sync_backend: str = "nccl"
"""DeepSpeed -> vLLM weight sync backend"""
enable_prefix_caching: bool = False
Expand Down Expand Up @@ -1683,6 +1685,7 @@ def main(args: Args, dataset_config: DatasetConfig, model_config: ModelConfig):
vllm_engines = create_vllm_engines(
args.vllm_num_engines,
args.vllm_tensor_parallel_size,
args.vllm_enforce_eager,
model_config.model_name_or_path,
model_config.model_revision,
args.seed,
Expand Down
2 changes: 2 additions & 0 deletions open_instruct/vllm_utils2.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,7 @@ def stop_remote_worker_execution_loop(self):
def create_vllm_engines(
num_engines: int,
tensor_parallel_size: int,
enforce_eager: bool,
pretrain: str,
revision: str,
seed: int,
Expand Down Expand Up @@ -224,6 +225,7 @@ def create_vllm_engines(
tokenizer_revision=revision,
trust_remote_code=True,
tensor_parallel_size=tensor_parallel_size,
enforce_eager=enforce_eager,
dtype="bfloat16",
seed=seed + i,
enable_prefix_caching=enable_prefix_caching,
Expand Down

0 comments on commit d918192

Please sign in to comment.