Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

build a "linearized" unet #19

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Early days of a lightweight MLIR Python frontend with support for PyTorch (throu
Just

```shell
pip install - requirements.txt
pip install -r requirements.txt
pip install . --no-build-isolation
```

Expand Down
113 changes: 89 additions & 24 deletions examples/unet.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,92 @@
import pi
from pi import nn
from pi.mlir.utils import pipile
from pi.utils.annotations import annotate_args
from pi.models.unet import UNet2DConditionModel


class MyUNet(nn.Module):
def __init__(self):
super().__init__()
self.unet = UNet2DConditionModel()

@annotate_args(
[
None,
([-1, -1, -1, -1], pi.float32, True),
]
import inspect
import re

import numpy as np
import torch

from pi.lazy_importer.run_lazy_imports import do_package_imports, do_hand_imports
from pi.lazy_importer import lazy_imports


#


def floats_tensor(shape, scale=1.0, rng=None, name=None):
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(np.random.random() * scale)
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()


def run(
CTor,
down_block_types=("CrossAttnDownBlock2D", "ResnetDownsampleBlock2D"),
up_block_types=("UpBlock2D", "ResnetUpsampleBlock2D"),
):
unet = CTor(
**{
"block_out_channels": (32, 64),
"down_block_types": down_block_types,
"up_block_types": up_block_types,
"cross_attention_dim": 32,
"attention_head_dim": 8,
"out_channels": 4,
"in_channels": 4,
"layers_per_block": 2,
"sample_size": 32,
}
)
def forward(self, x):
y = self.resnet(x)
return y
unet.eval()
batch_size = 4
num_channels = 4
sizes = (32, 32)

noise = floats_tensor((batch_size, num_channels) + sizes)
time_step = torch.tensor([10])
encoder_hidden_states = floats_tensor((batch_size, 4, 32))
output = unet(noise, time_step, encoder_hidden_states)


def make_linearized():
def filter(ret):
try:
MODULE_TARGET = lambda x: re.match(
r"(huggingface|torch|diffusers)", inspect.getmodule(x).__package__
)
return MODULE_TARGET(ret)
except:
return None

lazy_imports.MODULE_TARGET = filter

def _inner():

from diffusers import UNet2DConditionModel

run(
UNet2DConditionModel,
down_block_types=("CrossAttnDownBlock2D", "ResnetDownsampleBlock2D"),
up_block_types=("UpBlock2D", "ResnetUpsampleBlock2D"),
)
run(
UNet2DConditionModel,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
)

prefix = "from pi.models.unet.prologue import CONFIG_NAME, LORA_WEIGHT_NAME"
name = "unet_linearized"
do_package_imports(_inner, prefix, name)


def run_linearized():
from pi.models.unet import linearized

run(linearized.UNet2DConditionModel)


test_module = MyUNet()
x = pi.randn((1, 3, 64, 64))
mlir_module = pipile(test_module, example_args=(x,))
print(mlir_module)
if __name__ == "__main__":
make_linearized()
Empty file added pi/lazy_importer/__init__.py
Empty file.
Loading