Skip to content

Commit

Permalink
replace with custom sdpa before export
Browse files Browse the repository at this point in the history
  • Loading branch information
metascroy committed Apr 17, 2024
1 parent 0028ccf commit bd2ea41
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 1 deletion.
4 changes: 4 additions & 0 deletions export_et.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
# XnnpackDynamicallyQuantizedPartitioner,
# )
from executorch_portable_utils import export_to_edge
from export_et_util import replace_attention_with_sdpa_attention

from quantize import get_precision
from torch._export import capture_pre_autograd_graph
Expand Down Expand Up @@ -106,6 +107,9 @@ def export_model(model, device, output_path, args=None) -> str: # noqa: C901
else:
raise ValueError(f"Unsupported dtype for ET export: {target_precision}")



replace_attention_with_sdpa_attention(export_model)
with torch.nn.attention.sdpa_kernel(
[torch.nn.attention.SDPBackend.MATH]
), torch.no_grad():
Expand Down
11 changes: 10 additions & 1 deletion export_et_util.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from executorch.examples.models.llama2.custom_ops import sdpa_with_kv_cache
from build.model import Attention
from torch import nn

class AttentionWithSDPA(nn.Module):
class SDPAAttention(nn.Module):
def __init__(self, attention: Attention):
super().__init__()

Expand Down Expand Up @@ -51,3 +52,11 @@ def forward(
)
output = output.view(bsz, seqlen, self.dim)
return self.wo(output)


def replace_attention_with_sdpa_attention(module: nn.Module):
for name, child in module.named_children():
if isinstance(child, Attention):
setattr(module, name, SDPAAttention(child))
else:
replace_attention_with_sdpa_attention(child)

0 comments on commit bd2ea41

Please sign in to comment.