You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I am running PoerPaintV1 using batch processing
$ iopaint run --device=cuda --model=Sanster/PowerPaint-V1-stable-diffusion-inpainting --image=./image --mask=./mask --output=./powerpaint
It runs when I use the localhost web UI but not with batch processing in terminal
Model
PowerPaintv1
Describe the bug
│ │
│ 56 │ │ │ self.model.enable_sequential_cpu_offload(gpu_id=0) │
│ 57 │ │ else: │
│ 58 │ │ │ self.model = self.model.to(device) │
│ ❱ 59 │ │ │ if kwargs["sd_cpu_textencoder"]: │
│ 60 │ │ │ │ logger.info("Run Stable Diffusion TextEncoder on CPU") │
│ 61 │ │ │ │ self.model.text_encoder = CPUTextEncoderWrapper( │
│ 62 │ │ │ │ │ self.model.text_encoder, torch_dtype │
╰──────────────────────────────────────────────────────────────────────────────╯
KeyError: 'sd_cpu_textencoder'
I am running PoerPaintV1 using batch processing
$ iopaint run --device=cuda --model=Sanster/PowerPaint-V1-stable-diffusion-inpainting --image=./image --mask=./mask --output=./powerpaint
It runs when I use the localhost web UI but not with batch processing in terminal
This is my env
Screenshots
If applicable, add screenshots to help explain your problem.
Loading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]Some weights of StableDiffusionSafetyChecker were not initialized from the model checkpoint at /home/rei/.cache/huggingface/hub/models--Sanster--PowerPaint-V1-stable-diffusion-inpainting/snapshots/4104236baefdb2a1dd0f32812d8edbbfc4efd164/safety_checker and are newly initialized: ['vision_model.vision_model.embeddings.position_ids']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
Loading pipeline components...: 100%|█████████████| 7/7 [00:00<00:00, 8.04it/s]
╭───────────────────── Traceback (most recent call last) ──────────────────────╮
│ /home/rei/anaconda3/envs/gen_left_right/lib/python3.9/site-packages/iopaint/ │
│ cli.py:94 in run │
│ │
│ 91 │ │
│ 92 │ from iopaint.batch_processing import batch_inpaint │
│ 93 │ │
│ ❱ 94 │ batch_inpaint(model, device, image, mask, output, config, concat) │
│ 95 │
│ 96 │
│ 97 @typer_app.command(help="Start IOPaint server") │
│ │
│ /home/rei/anaconda3/envs/gen_left_right/lib/python3.9/site-packages/iopaint/ │
│ batch_processing.py:71 in batch_inpaint │
│ │
│ 68 │ │ │ inpaint_request = InpaintRequest(**json.load(f)) │
│ 69 │ │ logger.info(f"Using config: {inpaint_request}") │
│ 70 │ │
│ ❱ 71 │ model_manager = ModelManager(name=model, device=device) │
│ 72 │ first_mask = list(mask_paths.values())[0] │
│ 73 │ │
│ 74 │ console = Console() │
│ │
│ /home/rei/anaconda3/envs/gen_left_right/lib/python3.9/site-packages/iopaint/ │
│ model_manager.py:39 in init │
│ │
│ 36 │ │ │
│ 37 │ │ self.enable_powerpaint_v2 = kwargs.get("enable_powerpaint_v2", │
│ 38 │ │ │
│ ❱ 39 │ │ self.model = self.init_model(name, device, **kwargs) │
│ 40 │ │
│ 41 │ @Property │
│ 42 │ def current_model(self) -> ModelInfo: │
│ │
│ /home/rei/anaconda3/envs/gen_left_right/lib/python3.9/site-packages/iopaint/ │
│ model_manager.py:72 in init_model │
│ │
│ 69 │ │ │ return PowerPaintV2(device, **kwargs) │
│ 70 │ │ │
│ 71 │ │ if model_info.name in models: │
│ ❱ 72 │ │ │ return models[name](device, **kwargs) │
│ 73 │ │ │
│ 74 │ │ if model_info.model_type in [ │
│ 75 │ │ │ ModelType.DIFFUSERS_SD_INPAINT, │
│ │
│ /home/rei/anaconda3/envs/gen_left_right/lib/python3.9/site-packages/iopaint/ │
│ model/base.py:277 in init │
│ │
│ 274 │ def init(self, device, **kwargs): │
│ 275 │ │ self.model_info = kwargs["model_info"] │
│ 276 │ │ self.model_id_or_path = self.model_info.path │
│ ❱ 277 │ │ super().init(device, **kwargs) │
│ 278 │ │
│ 279 │ @torch.no_grad() │
│ 280 │ def call(self, image, mask, config: InpaintRequest): │
│ │
│ /home/rei/anaconda3/envs/gen_left_right/lib/python3.9/site-packages/iopaint/ │
│ model/base.py:35 in init │
│ │
│ 32 │ │ """ │
│ 33 │ │ device = switch_mps_device(self.name, device) │
│ 34 │ │ self.device = device │
│ ❱ 35 │ │ self.init_model(device, **kwargs) │
│ 36 │ │
│ 37 │ @abc.abstractmethod │
│ 38 │ def init_model(self, device, **kwargs): ... │
│ │
│ /home/rei/anaconda3/envs/gen_left_right/lib/python3.9/site-packages/iopaint/ │
│ model/power_paint/power_paint.py:59 in init_model │
│ │
│ 56 │ │ │ self.model.enable_sequential_cpu_offload(gpu_id=0) │
│ 57 │ │ else: │
│ 58 │ │ │ self.model = self.model.to(device) │
│ ❱ 59 │ │ │ if kwargs["sd_cpu_textencoder"]: │
│ 60 │ │ │ │ logger.info("Run Stable Diffusion TextEncoder on CPU") │
│ 61 │ │ │ │ self.model.text_encoder = CPUTextEncoderWrapper( │
│ 62 │ │ │ │ │ self.model.text_encoder, torch_dtype │
╰──────────────────────────────────────────────────────────────────────────────╯
System Info
This is my env
The text was updated successfully, but these errors were encountered: