Skip to content

Commit

Permalink
Change int32 to int when casting.
Browse files Browse the repository at this point in the history
  • Loading branch information
SamanehSaadat committed May 22, 2024
1 parent 2fe2967 commit 7ecd466
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions keras_nlp/src/models/falcon/falcon_transformer_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def call(
mask = decoder_padding_mask
if mask is None:
batch_size, seq_length = ops.shape(inputs)[:2]
mask = ops.ones((batch_size, seq_length), dtype="int32")
mask = ops.ones((batch_size, seq_length), dtype="int")
alibi = self._build_alibi_tensor(self.num_attention_heads, mask)

# Attention block.
Expand Down Expand Up @@ -225,7 +225,7 @@ def _build_alibi_tensor(self, num_heads, attention_mask):
self._get_slopes(num_heads),
dtype=self.compute_dtype,
) # num_heads
attention_mask = ops.cast(attention_mask, dtype="int32")
attention_mask = ops.cast(attention_mask, dtype="int")
arange_tensor = (
((ops.cumsum(attention_mask, axis=-1) - 1) * attention_mask)
)[:, None, :]
Expand Down

0 comments on commit 7ecd466

Please sign in to comment.