TODO
Neurips Applied Ai Research Hackathon 2024
Latent Consistency Models trained using Direct Preference Optimizatoin
on direct preference data
def load_pipe(use_dpo: bool = False) -> DiffusionPipeline:
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
unet_params = {}
if use_dpo:
unet_params = {"unet": UNet2DConditionModel.from_pretrained(
"mhdang/dpo-sdxl-text2image-v1", subfolder="unet", torch_dtype=torch.float16
)}
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
vae=vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
**unet_params
)
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm")
pipe.set_adapters(["lcm"], adapter_weights=[1.0])
pipe.enable_model_cpu_offload()
pipe.enable_vae_tiling()
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
return pipe
pipe = load_pipe(False)
https://docs.google.com/presentation/d/11yQJeaHxmU58AsHpO23_oSvRH-aJyc3j5Myx4_Lnlu0/edit#slide=id.p
- ldm_eval
- lcm_eval
- ldm_eval_accelerated
eval server