From d0f27d2c8bbbff4da739e876cf051117eeb4d7e8 Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Fri, 29 Sep 2023 08:48:49 +0000 Subject: [PATCH] change to kwargs --- optimum/bettertransformer/models/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/bettertransformer/models/attention.py b/optimum/bettertransformer/models/attention.py index 829609cdcbd..a7c82569cf3 100644 --- a/optimum/bettertransformer/models/attention.py +++ b/optimum/bettertransformer/models/attention.py @@ -583,7 +583,7 @@ def llama_forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, + **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions is True: raise ValueError("output_attentions=True can not be supported with BetterTransformer.")