From acea9bcec052400a6dfe2565b351c80147466d3a Mon Sep 17 00:00:00 2001 From: Travis Addair Date: Thu, 21 Mar 2024 16:25:20 -0700 Subject: [PATCH] Added logging --- server/lorax_server/models/flash_causal_lm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/lorax_server/models/flash_causal_lm.py b/server/lorax_server/models/flash_causal_lm.py index 80d50d1f6..7f8355483 100644 --- a/server/lorax_server/models/flash_causal_lm.py +++ b/server/lorax_server/models/flash_causal_lm.py @@ -784,11 +784,12 @@ def warmup(self, batch: FlashCausalLMBatch, max_new_tokens: int): ) with warmup_mode(): + logger.info("Warming up to max_total_tokens: {}", max_new_tokens) with tqdm(total=max_new_tokens, desc="Warmup to max_total_tokens") as pbar: - for i in range(max_new_tokens): + for _ in range(max_new_tokens): _, batch = self.generate_token(batch, is_warmup=True) - logger.info("Warmed up to token {}", i) pbar.update(1) + logger.info("Finished generating warmup tokens") except RuntimeError as e: if "CUDA out of memory" in str(e) or isinstance(e, torch.cuda.OutOfMemoryError): raise RuntimeError(