diff --git a/server/lorax_server/models/flash_causal_lm.py b/server/lorax_server/models/flash_causal_lm.py index 80d50d1f6..7f8355483 100644 --- a/server/lorax_server/models/flash_causal_lm.py +++ b/server/lorax_server/models/flash_causal_lm.py @@ -784,11 +784,12 @@ def warmup(self, batch: FlashCausalLMBatch, max_new_tokens: int): ) with warmup_mode(): + logger.info("Warming up to max_total_tokens: {}", max_new_tokens) with tqdm(total=max_new_tokens, desc="Warmup to max_total_tokens") as pbar: - for i in range(max_new_tokens): + for _ in range(max_new_tokens): _, batch = self.generate_token(batch, is_warmup=True) - logger.info("Warmed up to token {}", i) pbar.update(1) + logger.info("Finished generating warmup tokens") except RuntimeError as e: if "CUDA out of memory" in str(e) or isinstance(e, torch.cuda.OutOfMemoryError): raise RuntimeError(