This shows how to finetune stable-diffusion-inpainting checkpoint with LoRA for future frame generation and inferencing using finetuned model.
conda create --name env_name
conda activate env_name
pip3 install diffusers["torch"] transformers
pip3 install diffusers["flax"] transformers
pip3 install accelerate
pip3 install git+https://github.com/huggingface/diffusers
pip3 install cv2
pip3 install pillow
cd future_Frame_Generation/data_prep
python3 frame_extractor.py <path-to-folder_of_videos>
chmod +x ffp_lora_double_cond.sh
./ffp_lora_double_cond.sh
python3 inference_ffp_double_cond.py
https://github.com/huggingface/diffusers/tree/main
https://huggingface.co/blog/lora
https://huggingface.co/runwayml/stable-diffusion-inpainting