forked from luosiallen/latent-consistency-model
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpredict.py
88 lines (79 loc) · 2.94 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
import os
import torch
from diffusers import DiffusionPipeline
from cog import BasePredictor, Input, Path
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
# # Official LCM Pipeline supported now.
# self.pipe = DiffusionPipeline.from_pretrained(
# "SimianLuo/LCM_Dreamshaper_v7",
# cache_dir="model_cache",
# local_files_only=True,
# )
# Want to use older ones, need to add "revision="fb9c5d1"
self.pipe = DiffusionPipeline.from_pretrained(
"SimianLuo/LCM_Dreamshaper_v7",
custom_pipeline="latent_consistency_txt2img",
custom_revision="main",
revision="fb9c5d1",
cache_dir="model_cache",
local_files_only=True,
)
self.pipe.to(torch_device="cuda", torch_dtype=torch.float32)
def predict(
self,
prompt: str = Input(
description="Input prompt",
default="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
),
width: int = Input(
description="Width of output image. Lower the setting if out of memory.",
default=768,
),
height: int = Input(
description="Height of output image. Lower the setting if out of memory.",
default=768,
),
num_images: int = Input(
description="Number of images to output.",
ge=1,
le=4,
default=1,
),
num_inference_steps: int = Input(
description="Number of denoising steps. Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.",
ge=1,
le=50,
default=8,
),
guidance_scale: float = Input(
description="Scale for classifier-free guidance", ge=1, le=20, default=8.0
),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed", default=None
),
) -> list[Path]:
"""Run a single prediction on the model"""
if seed is None:
seed = int.from_bytes(os.urandom(2), "big")
print(f"Using seed: {seed}")
torch.manual_seed(seed)
result = self.pipe(
prompt=prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
num_images_per_prompt=num_images,
lcm_origin_steps=50,
output_type="pil",
).images
output_paths = []
for i, sample in enumerate(result):
output_path = f"/tmp/out-{i}.png"
sample.save(output_path)
output_paths.append(Path(output_path))
return output_paths