diff --git a/calculate_background.py b/calculate_background.py index 7f6b81f..1bb0fe2 100644 --- a/calculate_background.py +++ b/calculate_background.py @@ -9,7 +9,7 @@ ): for name in files: className = path.split("/")[-1] - if ".tif" in name: + if ".png" in name: img = np.asarray(Image.open(os.path.join(path, name))) cumPer += np.sum(img == 0) / (img.shape[0] * img.shape[1]) num += 1 diff --git a/main_pseudo.py b/main_pseudo.py index 1041f4d..6037d7c 100644 --- a/main_pseudo.py +++ b/main_pseudo.py @@ -810,7 +810,7 @@ def save_pil_image(img, path): ).squeeze(0) # print(pseudo_mask.shape) - name = f'{filenames[j].split("/")[1]}.tif' + name = f'{filenames[j].split("/")[1]}.png' save_pil_image( pseudo_mask.detach().cpu().numpy().astype(np.int16), os.path.join(args.prediction_path, name), diff --git a/part_model/dataloader/part_imagenet.py b/part_model/dataloader/part_imagenet.py index e1a75c3..930716e 100644 --- a/part_model/dataloader/part_imagenet.py +++ b/part_model/dataloader/part_imagenet.py @@ -125,7 +125,7 @@ def _get_data(self): filenames = sorted([f.strip() for f in fns.readlines()]) images.extend([f"{img_path}/{f}.JPEG" for f in filenames]) masks.extend( - [f'{part_path}/{f.split("/")[1]}.tif' for f in filenames] + [f'{part_path}/{f.split("/")[1]}.png' for f in filenames] ) labels.extend([l] * len(filenames)) labels = torch.tensor(labels, dtype=torch.long) diff --git a/part_model/dataloader/part_imagenet_corrupt.py b/part_model/dataloader/part_imagenet_corrupt.py index c379123..f6a500d 100644 --- a/part_model/dataloader/part_imagenet_corrupt.py +++ b/part_model/dataloader/part_imagenet_corrupt.py @@ -143,8 +143,12 @@ def _get_data(self): with open(f"{self.path}/{label}.txt", "r") as fns: filenames = sorted([f.strip() for f in fns.readlines()]) for severity in range(1, 6): - images.extend([f"{img_path}/{f}_{severity}.JPEG" for f in filenames]) - masks.extend([f'{part_path}/{f.split("/")[1]}.tif' for f in filenames]) + images.extend( + [f"{img_path}/{f}_{severity}.JPEG" for f in filenames] + ) + masks.extend( + [f'{part_path}/{f.split("/")[1]}.png' for f in filenames] + ) labels.extend([l] * len(filenames)) labels = torch.tensor(labels, dtype=torch.long) return images, labels, masks @@ -197,7 +201,9 @@ def get_loader_sampler(args, transform, split, distributed_sampler=True): # TODO: can we make this cleaner? PART_IMAGENET_CORRUPT["part_to_class"] = part_imagenet_dataset.part_to_class PART_IMAGENET_CORRUPT["num_classes"] = part_imagenet_dataset.num_classes - PART_IMAGENET_CORRUPT["num_seg_labels"] = part_imagenet_dataset.num_seg_labels + PART_IMAGENET_CORRUPT[ + "num_seg_labels" + ] = part_imagenet_dataset.num_seg_labels setattr(args, "num_classes", part_imagenet_dataset.num_classes) pto = part_imagenet_dataset.part_to_object @@ -231,7 +237,9 @@ def load_part_imagenet(args): ] ) - train_loader, train_sampler = get_loader_sampler(args, train_transforms, "train") + train_loader, train_sampler = get_loader_sampler( + args, train_transforms, "train" + ) val_loader, _ = get_loader_sampler(args, val_transforms, "val") test_loader, _ = get_loader_sampler(args, val_transforms, "test") diff --git a/part_model/dataloader/part_imagenet_geirhos.py b/part_model/dataloader/part_imagenet_geirhos.py index aa9a377..2709402 100644 --- a/part_model/dataloader/part_imagenet_geirhos.py +++ b/part_model/dataloader/part_imagenet_geirhos.py @@ -123,7 +123,9 @@ def _get_data(self): with open(f"{self.path}/{label}.txt", "r") as fns: filenames = sorted([f.strip() for f in fns.readlines()]) images.extend([f"{img_path}/{f}.png" for f in filenames]) - masks.extend([f'{part_path}/{f.split("/")[1]}.tif' for f in filenames]) + masks.extend( + [f'{part_path}/{f.split("/")[1]}.png' for f in filenames] + ) labels.extend([l] * len(filenames)) labels = torch.tensor(labels, dtype=torch.long) return images, labels, masks @@ -173,8 +175,12 @@ def get_loader_sampler(args, transform, split, distributed_sampler=True): ) # TODO: can we make this cleaner? - PART_IMAGENET_GEIRHOS["part_to_class"] = part_imagenet_geirhos_dataset.part_to_class - PART_IMAGENET_GEIRHOS["num_classes"] = part_imagenet_geirhos_dataset.num_classes + PART_IMAGENET_GEIRHOS[ + "part_to_class" + ] = part_imagenet_geirhos_dataset.part_to_class + PART_IMAGENET_GEIRHOS[ + "num_classes" + ] = part_imagenet_geirhos_dataset.num_classes PART_IMAGENET_GEIRHOS[ "num_seg_labels" ] = part_imagenet_geirhos_dataset.num_seg_labels @@ -211,7 +217,9 @@ def load_part_imagenet_geirhos(args): ] ) - train_loader, train_sampler = get_loader_sampler(args, train_transforms, "train") + train_loader, train_sampler = get_loader_sampler( + args, train_transforms, "train" + ) val_loader, _ = get_loader_sampler(args, val_transforms, "val") test_loader, _ = get_loader_sampler(args, val_transforms, "test") diff --git a/part_model/dataloader/part_imagenet_imagenet_class.py b/part_model/dataloader/part_imagenet_imagenet_class.py index 5c93657..d96ef98 100644 --- a/part_model/dataloader/part_imagenet_imagenet_class.py +++ b/part_model/dataloader/part_imagenet_imagenet_class.py @@ -272,7 +272,7 @@ def _get_data(self): filenames = sorted([f.strip() for f in fns.readlines()]) images.extend([f"{img_path}/{f}.JPEG" for f in filenames]) masks.extend( - [f'{part_path}/{f.split("/")[1]}.tif' for f in filenames] + [f'{part_path}/{f.split("/")[1]}.png' for f in filenames] ) labels.extend([l] * len(filenames)) labels = torch.tensor(labels, dtype=torch.long) diff --git a/part_model/dataloader/part_imagenet_pseudo.py b/part_model/dataloader/part_imagenet_pseudo.py index ffd420f..e523e1c 100644 --- a/part_model/dataloader/part_imagenet_pseudo.py +++ b/part_model/dataloader/part_imagenet_pseudo.py @@ -126,7 +126,7 @@ def _get_data(self): filenames = sorted([f.strip() for f in fns.readlines()]) images.extend([f"{img_path}/{f}.JPEG" for f in filenames]) masks.extend( - [f'{part_path}/{f.split("/")[1]}.tif' for f in filenames] + [f'{part_path}/{f.split("/")[1]}.png' for f in filenames] ) labels.extend([l] * len(filenames)) files.extend(filenames) diff --git a/part_model/dataloader/part_imagenet_pseudo_imagenet_class.py b/part_model/dataloader/part_imagenet_pseudo_imagenet_class.py index 3cd2fb0..95a1bf2 100644 --- a/part_model/dataloader/part_imagenet_pseudo_imagenet_class.py +++ b/part_model/dataloader/part_imagenet_pseudo_imagenet_class.py @@ -273,7 +273,7 @@ def _get_data(self): filenames = sorted([f.strip() for f in fns.readlines()]) images.extend([f"{img_path}/{f}.JPEG" for f in filenames]) masks.extend( - [f'{part_path}/{f.split("/")[1]}.tif' for f in filenames] + [f'{part_path}/{f.split("/")[1]}.png' for f in filenames] ) labels.extend([l] * len(filenames)) files.extend(filenames) diff --git a/post_prediction.py b/post_prediction.py index 656fb02..7ce688b 100644 --- a/post_prediction.py +++ b/post_prediction.py @@ -96,7 +96,7 @@ tran_val_count = 0 for path, subdirs, files in os.walk(old_dataset_path): for name in files: - if ".tif" in name: + if ".png" in name: if "train" in path or "val" in path: tran_val_count += 1 count += 1 @@ -119,7 +119,7 @@ mask_names = [] for root, dirs, files in os.walk(prediction_path): for file in files: - if ".tif" in file: + if ".png" in file: mask_names.append(file.split(".")[0]) # Step 4 @@ -156,16 +156,16 @@ # symlink original masks from old dataset for fileName in step_all_list: os.symlink( - f"{relative_path_old_to_new}/{partition}/{c}/{fileName}.tif", # calculate relative path - f"{new_dataset_path}/{partition}/{c}/{fileName}.tif", + f"{relative_path_old_to_new}/{partition}/{c}/{fileName}.png", # calculate relative path + f"{new_dataset_path}/{partition}/{c}/{fileName}.png", ) # Step 6 # copy newly generated masks over for fileName in partition_generated[partition]: shutil.copyfile( - f"{prediction_path}/{fileName}.tif", - f"{new_dataset_path}/{partition}/{c}/{fileName}.tif", + f"{prediction_path}/{fileName}.png", + f"{new_dataset_path}/{partition}/{c}/{fileName}.png", ) # write .txt file with open(f"{new_dataset_path}/{partition}/{c}.txt", "w") as f: diff --git a/post_prediction_imagenet.py b/post_prediction_imagenet.py index 07cdc7d..1602fa2 100644 --- a/post_prediction_imagenet.py +++ b/post_prediction_imagenet.py @@ -76,7 +76,7 @@ tran_val_count = 0 for path, subdirs, files in os.walk(old_dataset_path): for name in files: - if ".tif" in name: + if ".png" in name: if "train" in path or "val" in path: tran_val_count += 1 count += 1 @@ -101,7 +101,7 @@ mask_names = [] for root, dirs, files in os.walk(prediction_path): for file in files: - if ".tif" in file: + if ".png" in file: mask_names.append(file.split(".")[0]) print(len(mask_names), len(set(mask_names))) @@ -160,20 +160,20 @@ # symlink for fileName in train_lines: os.symlink( - f"{relative_path_old_to_new}/train/{c}/{fileName}.tif", # calculate relative path - f"{new_dataset_path}/train/{c}/{fileName}.tif", + f"{relative_path_old_to_new}/train/{c}/{fileName}.png", # calculate relative path + f"{new_dataset_path}/train/{c}/{fileName}.png", ) for fileName in val_lines: os.symlink( - f"{relative_path_old_to_new}/val/{c}/{fileName}.tif", # calculate relative path - f"{new_dataset_path}/train/{c}/{fileName}.tif", + f"{relative_path_old_to_new}/val/{c}/{fileName}.png", # calculate relative path + f"{new_dataset_path}/train/{c}/{fileName}.png", ) # copy newly generated masks over for fileName in class_train_masks: shutil.copyfile( - f"{prediction_path}/{fileName}.tif", - f"{new_dataset_path}/train/{c}/{fileName}.tif", + f"{prediction_path}/{fileName}.png", + f"{new_dataset_path}/train/{c}/{fileName}.png", ) # Step 6 @@ -192,8 +192,8 @@ # copy newly generated masks over for fileName in new_partition_list: shutil.copyfile( - f"{prediction_path}/{fileName}.tif", - f"{new_dataset_path}/{partition}/{c}/{fileName}.tif", + f"{prediction_path}/{fileName}.png", + f"{new_dataset_path}/{partition}/{c}/{fileName}.png", ) # write .txt file with open(f"{new_dataset_path}/{partition}/{c}.txt", "w") as f: diff --git a/pre_prediction.py b/pre_prediction.py index 7be4de1..305e936 100644 --- a/pre_prediction.py +++ b/pre_prediction.py @@ -91,7 +91,7 @@ tran_val_count = 0 for path, subdirs, files in os.walk(old_dataset_path): for name in files: - if ".tif" in name: + if ".png" in name: if "train" in path or "val" in path: tran_val_count += 1 count += 1 @@ -152,7 +152,7 @@ pass -# Create .tif file of correct dimensions inside test dir +# Create .png file of correct dimensions inside test dir def save_pil_image(img, path): image_path = os.path.join(path) pil_img = Image.fromarray(img) @@ -168,7 +168,7 @@ def save_pil_image(img, path): tif = np.zeros(img.size) print(img.size) save_pil_image( - tif, f"{new_temp_mask_dataset_path}/test/{c}/{name[:-1]}.tif" + tif, f"{new_temp_mask_dataset_path}/test/{c}/{name[:-1]}.png" ) # Step 6 diff --git a/pre_prediction_imagenet.py b/pre_prediction_imagenet.py index d0fc48b..16a3f24 100644 --- a/pre_prediction_imagenet.py +++ b/pre_prediction_imagenet.py @@ -85,7 +85,7 @@ tran_val_count = 0 for path, subdirs, files in os.walk(old_dataset_path): for name in files: - if ".tif" in name: + if ".png" in name: if "train" in path or "val" in path: tran_val_count += 1 count += 1 @@ -143,7 +143,7 @@ ) as f: pass -# Create .tif file of correct dimensions inside test dir +# Create .png file of correct dimensions inside test dir def save_pil_image(img, path): image_path = os.path.join(path) pil_img = Image.fromarray(img) @@ -159,7 +159,7 @@ def save_pil_image(img, path): tif = np.zeros(img.size) print(img.size) save_pil_image( - tif, f"{new_temp_mask_dataset_path}/test/{c}/{name[:-1]}.tif" + tif, f"{new_temp_mask_dataset_path}/test/{c}/{name[:-1]}.png" ) # Step 6 diff --git a/to_class_specific.py b/to_class_specific.py index c1d0c18..b67a653 100644 --- a/to_class_specific.py +++ b/to_class_specific.py @@ -51,7 +51,7 @@ # Step 1 for path, subdirs, files in os.walk(metaclass_dataset_dir): for name in files: - if ".tif" in name: + if ".png" in name: metaclass = path.split("/")[-1] imagenet_class = name.split("_")[0] metaclass_to_class[metaclass].add(imagenet_class) @@ -115,7 +115,7 @@ def save_pil_image(img, path): for path, subdirs, files in os.walk(metaclass_dataset_dir): for name in files: className = path.split("/")[-1] - if ".tif" in name: + if ".png" in name: img = np.asarray(Image.open(os.path.join(path, name))) imagenet_className = name.split("_")[0]