Skip to content

Commit d91085a

Browse files
committed
fix
1 parent 007c988 commit d91085a

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

tests/e2e_test/test_e2e.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def generate_launch_command(result_filename: str = "", launch_ray_cluster: bool
6363
f"--trust-remote-code "
6464
f"--request-migration-policy LCFS "
6565
f"--migration-backend {migration_backend} "
66-
f"--migration-cache-blocks 32 "
66+
f"--migration-buffer-blocks 32 "
6767
f"--tensor-parallel-size 1 "
6868
f"--request-output-queue-port {1234+port} "
6969
f"{'--enable-pd-disagg ' if enable_pd_disagg else ''} "

tests/unit_test/llumlet/test_engine_step_exception.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def __init__(self, *args, **kwargs) -> None:
3737
def set_error_step(self, broken: bool):
3838
self.backend_engine._stop_event.set()
3939

40-
time.sleep(3)
40+
time.sleep(30)
4141

4242
assert self.backend_engine.state == EngineState.STOPPED
4343

@@ -55,7 +55,7 @@ async def raise_error_step():
5555
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Need at least 1 GPU to run the test.")
5656
def test_engine_step_exception(setup_ray_env):
5757
engine_args = EngineArgs(model="facebook/opt-125m", max_model_len=8, worker_use_ray=True)
58-
migration_config = MigrationConfig("LCFS", "rpc", 16, 1, 4, 5, 20, 1)
58+
migration_config = MigrationConfig("LCFS", "rpc", 16, 1, 4, 5, 20, 2)
5959
node_id = ray.get_runtime_context().get_node_id()
6060
scheduling_strategy = NodeAffinitySchedulingStrategy(node_id=node_id, soft=False)
6161

0 commit comments

Comments
 (0)