diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..1b80620 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,14 @@ +fail_fast: false + +repos: + - repo: https://github.com/psf/black + rev: 23.10.1 + hooks: + - id: black + args: [--line-length=120] + + - repo: https://github.com/PyCQA/flake8 + rev: 7.0.0 + hooks: + - id: flake8 + args: [--max-line-length=120, "--ignore=W291,E731"] \ No newline at end of file diff --git a/README.md b/README.md index 5705c2f..277c007 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,12 @@ List of each experiment as in paper and how to reproduce it ```shell pip3 install black==23.10 cd hitchhiking_rotations && black --line-length 120 ./ + +# Using precommit +pip3 install pre-commit +cd hitchhiking_rotations && python3 -m pre_commit install +cd hitchhiking_rotations && python3 -m pre_commit run + ``` ### Add License Headers ```shell diff --git a/hitchhiking_rotations/cfgs/cfg_cube_image_to_pose.py b/hitchhiking_rotations/cfgs/cfg_cube_image_to_pose.py index 65cde46..b38b094 100644 --- a/hitchhiking_rotations/cfgs/cfg_cube_image_to_pose.py +++ b/hitchhiking_rotations/cfgs/cfg_cube_image_to_pose.py @@ -7,7 +7,7 @@ def get_cfg_cube_image_to_pose(device): shared_trainer_cfg = { "_target_": "hitchhiking_rotations.utils.Trainer", "lr": 0.001, - "optimizer": "SGD", + "optimizer": "Adam", "logger": "${logger}", "verbose": "${verbose}", "device": device, @@ -17,7 +17,7 @@ def get_cfg_cube_image_to_pose(device): return { "verbose": True, "batch_size": 32, - "epochs": 100, + "epochs": 1000, "training_data": { "_target_": "hitchhiking_rotations.datasets.CubeImageToPoseDataset", "mode": "train", @@ -45,7 +45,7 @@ def get_cfg_cube_image_to_pose(device): "metrics": ["l1", "l2", "geodesic_distance", "chordal_distance"], }, "trainers": { - "r9_l1": { + "r9_svd_l1": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -55,7 +55,7 @@ def get_cfg_cube_image_to_pose(device): "model": "${model9}", }, }, - "r9_l2": { + "r9_svd_l2": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -65,7 +65,7 @@ def get_cfg_cube_image_to_pose(device): "model": "${model9}", }, }, - "r9_geodesic_distance": { + "r9_svd_geodesic_distance": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -75,7 +75,7 @@ def get_cfg_cube_image_to_pose(device): "model": "${model9}", }, }, - "r9_chordal_distance": { + "r9_svd_chordal_distance": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -85,7 +85,67 @@ def get_cfg_cube_image_to_pose(device): "model": "${model9}", }, }, + "r9_l1": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:flatten}", + "postprocess_pred_loss": "${u:flatten}", + "postprocess_pred_logging": "${u:procrustes_to_rotmat}", + "loss": "${u:l1}", + "model": "${model9}", + }, + }, + "r9_l2": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:flatten}", + "postprocess_pred_loss": "${u:flatten}", + "postprocess_pred_logging": "${u:procrustes_to_rotmat}", + "loss": "${u:l2}", + "model": "${model9}", + }, + }, + "r9_geodesic_distance": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:passthrough}", + "postprocess_pred_loss": "${u:n_3x3}", + "postprocess_pred_logging": "${u:procrustes_to_rotmat}", + "loss": "${u:geodesic_distance}", + "model": "${model9}", + }, + }, + "r9_chordal_distance": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:passthrough}", + "postprocess_pred_loss": "${u:n_3x3}", + "postprocess_pred_logging": "${u:procrustes_to_rotmat}", + "loss": "${u:chordal_distance}", + "model": "${model9}", + }, + }, "r6_l1": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:rotmat_to_gramschmidt_f}", + "postprocess_pred_loss": "${u:flatten}", + "postprocess_pred_logging": "${u:gramschmidt_to_rotmat}", + "loss": "${u:l1}", + "model": "${model6}", + }, + }, + "r6_l2": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:rotmat_to_gramschmidt_f}", + "postprocess_pred_loss": "${u:flatten}", + "postprocess_pred_logging": "${u:gramschmidt_to_rotmat}", + "loss": "${u:l2}", + "model": "${model6}", + }, + }, + "r6_gso_l1": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -95,7 +155,7 @@ def get_cfg_cube_image_to_pose(device): "model": "${model6}", }, }, - "r6_l2": { + "r6_gso_l2": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -105,7 +165,7 @@ def get_cfg_cube_image_to_pose(device): "model": "${model6}", }, }, - "r6_geodesic_distance": { + "r6_gso_geodesic_distance": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -115,7 +175,7 @@ def get_cfg_cube_image_to_pose(device): "model": "${model6}", }, }, - "r6_chordal_distance": { + "r6_gso_chordal_distance": { **shared_trainer_cfg, **{ "preprocess_target": "${u:passthrough}", @@ -125,6 +185,16 @@ def get_cfg_cube_image_to_pose(device): "model": "${model6}", }, }, + "quat_c_geodesic_distance": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:passthrough}", + "postprocess_pred_loss": "${u:quaternion_to_rotmat}", + "postprocess_pred_logging": "${u:quaternion_to_rotmat}", + "loss": "${u:geodesic_distance}", + "model": "${model4}", + }, + }, "quat_c_chordal_distance": { **shared_trainer_cfg, **{ @@ -175,6 +245,46 @@ def get_cfg_cube_image_to_pose(device): "model": "${model4}", }, }, + "quat_rf_cosine_distance": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:rotmat_to_quaternion_rand_flip}", + "postprocess_pred_loss": "${u:passthrough}", + "postprocess_pred_logging": "${u:quaternion_to_rotmat}", + "loss": "${u:cosine_distance}", + "model": "${model4}", + }, + }, + "quat_rf_l2": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:rotmat_to_quaternion_rand_flip}", + "postprocess_pred_loss": "${u:passthrough}", + "postprocess_pred_logging": "${u:quaternion_to_rotmat}", + "loss": "${u:l2}", + "model": "${model4}", + }, + }, + "quat_rf_l1": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:rotmat_to_quaternion_rand_flip}", + "postprocess_pred_loss": "${u:passthrough}", + "postprocess_pred_logging": "${u:quaternion_to_rotmat}", + "loss": "${u:l1}", + "model": "${model4}", + }, + }, + "quat_rf_l2_dp": { + **shared_trainer_cfg, + **{ + "preprocess_target": "${u:rotmat_to_quaternion_rand_flip}", + "postprocess_pred_loss": "${u:passthrough}", + "postprocess_pred_logging": "${u:quaternion_to_rotmat}", + "loss": "${u:l2_dp}", + "model": "${model4}", + }, + }, "rotvec_l1": { **shared_trainer_cfg, **{ diff --git a/hitchhiking_rotations/cfgs/cfg_pose_to_cube_image.py b/hitchhiking_rotations/cfgs/cfg_pose_to_cube_image.py index 503611f..083785b 100644 --- a/hitchhiking_rotations/cfgs/cfg_pose_to_cube_image.py +++ b/hitchhiking_rotations/cfgs/cfg_pose_to_cube_image.py @@ -20,7 +20,7 @@ def get_cfg_pose_to_cube_image(device): return { "verbose": False, "batch_size": 128, - "epochs": 100, + "epochs": 1000, "training_data": { "_target_": "hitchhiking_rotations.datasets.PoseToCubeImageDataset", "mode": "train", diff --git a/hitchhiking_rotations/datasets/cube_data_generator.py b/hitchhiking_rotations/datasets/cube_data_generator.py index 03282bd..06b2f65 100644 --- a/hitchhiking_rotations/datasets/cube_data_generator.py +++ b/hitchhiking_rotations/datasets/cube_data_generator.py @@ -4,7 +4,6 @@ # See LICENSE file in the project root for details. # import mujoco -import torch import numpy as np from PIL import Image @@ -15,15 +14,15 @@ def __init__(self, height: int, width: int): - + - - - - - - - + + + + + + + diff --git a/hitchhiking_rotations/datasets/cube_dataset.py b/hitchhiking_rotations/datasets/cube_dataset.py index 5fbbf70..9e7731a 100644 --- a/hitchhiking_rotations/datasets/cube_dataset.py +++ b/hitchhiking_rotations/datasets/cube_dataset.py @@ -7,7 +7,6 @@ from hitchhiking_rotations.utils import save_pickle, load_pickle import os from os.path import join -import pickle from scipy.spatial.transform import Rotation import torch import roma @@ -55,3 +54,14 @@ def __init__(self, mode, dataset_size, device): def __getitem__(self, idx): return roma.unitquat_to_rotmat(self.quats[idx]).type(torch.float32), self.imgs[idx].type(torch.float32) / 255 + + +if __name__ == "__main__": + from PIL import Image + import numpy as np + + dataset = CubeImageToPoseDataset("train", 2048, "cpu") + for i in range(10): + img, quat = dataset[i] + img = Image.fromarray(np.uint8(img.cpu().numpy() * 255)) + img.save(join(HITCHHIKING_ROOT_DIR, "results", f"example_img_{i}.png")) diff --git a/hitchhiking_rotations/models/models.py b/hitchhiking_rotations/models/models.py index a3dd05c..c717b8d 100644 --- a/hitchhiking_rotations/models/models.py +++ b/hitchhiking_rotations/models/models.py @@ -3,10 +3,8 @@ # All rights reserved. Licensed under the MIT license. # See LICENSE file in the project root for details. # -import numpy as np import torch from torch import nn -import torch.nn.functional as F class MLP(nn.Module): @@ -31,8 +29,6 @@ def __init__(self, input_dim, width, height): IMAGE_CHANNEL = 3 Z_DIM = 10 G_HIDDEN = 64 - X_DIM = 64 - D_HIDDEN = 64 self.INP_SIZE = 5 self.input_dim = input_dim diff --git a/hitchhiking_rotations/utils/__init__.py b/hitchhiking_rotations/utils/__init__.py index 5160e9f..07ae532 100644 --- a/hitchhiking_rotations/utils/__init__.py +++ b/hitchhiking_rotations/utils/__init__.py @@ -9,5 +9,5 @@ from .logger import OrientationLogger from .trainer import Trainer from .loading import * -from .helper import passthrough, flatten +from .helper import passthrough, flatten, n_3x3 from .notation import RotRep diff --git a/hitchhiking_rotations/utils/conversions.py b/hitchhiking_rotations/utils/conversions.py index 790b5e3..8b40d00 100644 --- a/hitchhiking_rotations/utils/conversions.py +++ b/hitchhiking_rotations/utils/conversions.py @@ -7,8 +7,6 @@ import roma import torch -# x to rotmat - def euler_to_rotmat(inp: torch.Tensor) -> torch.Tensor: return euler_angles_to_matrix(inp.reshape(-1, 3), convention="XZY") diff --git a/hitchhiking_rotations/utils/helper.py b/hitchhiking_rotations/utils/helper.py index 111d297..7fb8bc1 100644 --- a/hitchhiking_rotations/utils/helper.py +++ b/hitchhiking_rotations/utils/helper.py @@ -11,3 +11,7 @@ def passthrough(*x): def flatten(x): return x.reshape(x.shape[0], -1) + + +def n_3x3(x): + return x.reshape(-1, 3, 3) diff --git a/hitchhiking_rotations/utils/loading.py b/hitchhiking_rotations/utils/loading.py index 7ccd3d2..ee97bfe 100644 --- a/hitchhiking_rotations/utils/loading.py +++ b/hitchhiking_rotations/utils/loading.py @@ -6,6 +6,7 @@ import os import yaml import pickle +from omegaconf import OmegaConf __all__ = ["file_path", "load_yaml", "load_pickle", "save_pickle", "save_omega_cfg"] @@ -46,18 +47,15 @@ def load_yaml(path: str) -> dict: def load_pickle(path: str) -> dict: - """Loads yaml file - + """Load pickle file Args: path (str): File path - Returns: (dict): Returns content of file """ - with open(path) as file: + + with open(path, "rb") as file: res = pickle.load(file) - if res is None: - res = {} return res @@ -73,21 +71,6 @@ def save_pickle(cfg, path: str): pickle.dump(cfg, file, protocol=pickle.HIGHEST_PROTOCOL) -def load_pickle(path: str) -> dict: - """Load pickle file - - Args: - path (str): File path - - Returns: - (dict): Returns content of file - """ - - with open(path, "rb") as file: - res = pickle.load(file) - return res - - def save_omega_cfg(cfg, path): """ Args: diff --git a/hitchhiking_rotations/utils/notation.py b/hitchhiking_rotations/utils/notation.py index 4748f58..044b875 100644 --- a/hitchhiking_rotations/utils/notation.py +++ b/hitchhiking_rotations/utils/notation.py @@ -11,8 +11,11 @@ class RotRep(Enum): SVD = "$\mathbb{R}^9$+SVD" QUAT_C = "Quat$^+$" QUAT = "Quat" + QUAT_RF = "Quat+RF" EULER = "Euler" EXP = "Exp" + ROTMAT = "$\mathbb{R}^9$" + RSIX = "$\mathbb{R}^6$" def __str__(self): return "%s" % self.value diff --git a/hitchhiking_rotations/utils/training_helper.py b/hitchhiking_rotations/utils/training_helper.py index 55f3438..8295a5e 100644 --- a/hitchhiking_rotations/utils/training_helper.py +++ b/hitchhiking_rotations/utils/training_helper.py @@ -7,6 +7,7 @@ import numpy as np import argparse import torch +from torch.utils.data import Dataset def default(*x): @@ -142,7 +143,7 @@ def get_loss_and_fout(loss: TrainingLoss, representation: Representation): TrainingLoss.QUAT_CP, ], f"Loss {loss} not supported for representation {representation}" if loss == TrainingLoss.CHORDIAL: - print(f"Cordial loss for rotations is the same as L2 loss. Using default L2 loss") + print("Cordial loss for rotations is the same as L2 loss. Using default L2 loss") loss = TrainingLoss.L2 if representation in [Representation.QUAT, Representation.QUAT_AUG, Representation.QUAT_HM]: @@ -174,9 +175,6 @@ def test_parsing(): print(get_loss_and_fout(TrainingLoss.GEODESIC, Representation.EULER) == (geodesic_loss, euler_to_rotmat)) -from torch.utils.data import Dataset, DataLoader - - class PointCloudDataset(Dataset): def __init__(self, pcd_path, rotated_pcd_path, out_rot_path, representation: Representation): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") @@ -220,6 +218,7 @@ def test_dataset(): out_rots_path = f"{data_path}/train_rotations.npy" dataset = PointCloudDataset(pcd_path, rotate_path, out_rots_path, args.representation) + print(dataset[0]) if __name__ == "__main__": diff --git a/scripts/run_all.py b/scripts/run_all.py new file mode 100644 index 0000000..db0899a --- /dev/null +++ b/scripts/run_all.py @@ -0,0 +1,15 @@ +from hitchhiking_rotations import HITCHHIKING_ROOT_DIR +import os + +p = os.path.join(HITCHHIKING_ROOT_DIR, "scripts", "train.py") + +for seed in range(10): + os.system(f"python3 {p} --experiment cube_image_to_pose --seed {seed}") + +for seed in range(10): + os.system(f"python3 {p} --experiment pose_to_cube_image --seed {seed}") + + +os.system("python3 " + str(os.path.join(HITCHHIKING_ROOT_DIR, "visu", "figure_19.py"))) +os.system("python3 " + str(os.path.join(HITCHHIKING_ROOT_DIR, "visu", "figure_12a.py"))) +os.system("python3 " + str(os.path.join(HITCHHIKING_ROOT_DIR, "visu", "figure_12b.py"))) diff --git a/scripts/train.py b/scripts/train.py index 81c0ab4..9c5cf02 100644 --- a/scripts/train.py +++ b/scripts/train.py @@ -25,7 +25,7 @@ "--experiment", type=str, choices=["cube_image_to_pose", "pose_to_cube_image", "pcd_to_pose"] + fourier_choices, - default="pose_to_cube_image", + default="cube_image_to_pose", help="Experiment Configuration", ) parser.add_argument( @@ -93,9 +93,12 @@ trainer.train_batch(x.clone(), target.clone(), epoch) - if cfg_exp.verbose: - scores = [t.logger.get_score("train", "loss") for t in trainers.values()] - bar.set_postfix({"running_train_loss": np.array(scores).mean()}) + try: + if cfg_exp.verbose: + scores = [t.logger.get_score("train", "loss") for t in trainers.values()] + bar.set_postfix({"running_train_loss": np.array(scores).mean()}) + except: + pass if validate_every_n > 0 and epoch % validate_every_n == 0: # Perform validation diff --git a/visu/figure_12a.py b/visu/figure_12a.py index 679af88..623d40b 100644 --- a/visu/figure_12a.py +++ b/visu/figure_12a.py @@ -37,8 +37,8 @@ df = pd.DataFrame.from_dict(df_res) mapping = { - "r9": RotRep.SVD, - "r6": RotRep.GSO, + "r9_svd": RotRep.SVD, + "r6_gso": RotRep.GSO, "quat_c": RotRep.QUAT_C, "rotvec": RotRep.EXP, "euler": RotRep.EULER, @@ -56,8 +56,18 @@ plt.subplot(1, 1, 1) -sns.boxplot(data=df, x="score", y="method", palette="Blues", orient="h", width=0.5, linewidth=1.5, fliersize=2.5) -plt.xlabel(f"Error - {selected_metric}") +sns.boxplot( + data=df, + x="score", + y="method", + palette="Blues", + orient="h", + width=0.5, + linewidth=1.5, + fliersize=2.5, + showfliers=True, +) +plt.xlabel("Error - Geodesic Distance") plt.ylabel("") plt.tight_layout() diff --git a/visu/figure_12b.py b/visu/figure_12b.py index 9fa81f0..10e1577 100644 --- a/visu/figure_12b.py +++ b/visu/figure_12b.py @@ -45,8 +45,8 @@ if rename_and_filter: mapping = { - "r9": RotRep.SVD, - "r6": RotRep.GSO, + "r9": RotRep.ROTMAT, + "r6": RotRep.RSIX, "quat_c": RotRep.QUAT_C, "quat_rf": str(RotRep.QUAT) + "_rf", "rotvec": RotRep.EXP, @@ -66,8 +66,8 @@ plt.subplot(1, 1, 1) -sns.boxplot(data=df, x="score", y="method", palette="Blues", orient="h", width=0.5, linewidth=1.5, fliersize=2.5) -plt.xlabel(f"Error - {selected_metric}") +sns.boxplot(data=df, x="score", y="method", palette="Greens", orient="h", width=0.5, linewidth=1.5, fliersize=2.5) +plt.xlabel("Error - MSE") plt.ylabel("") plt.tight_layout() diff --git a/visu/figure_19.py b/visu/figure_19.py new file mode 100644 index 0000000..45f9a75 --- /dev/null +++ b/visu/figure_19.py @@ -0,0 +1,120 @@ +import os +import numpy as np +import seaborn as sns +import matplotlib.pyplot as plt +from hitchhiking_rotations import HITCHHIKING_ROOT_DIR +from pathlib import Path +import pandas as pd +from hitchhiking_rotations.utils import RotRep + +plt.figure(figsize=(14, 14)) +plt.style.use(os.path.join(HITCHHIKING_ROOT_DIR, "assets", "prettyplots.mplstyle")) +sns.set_style("whitegrid") +plt.rcParams.update({"font.size": 11}) + +for j, selected_metric in enumerate(["geodesic_distance", "chordal_distance"]): + files = [ + str(s) for s in Path(os.path.join(HITCHHIKING_ROOT_DIR, "results", "cube_image_to_pose")).rglob("*result.npy") + ] + results = [np.load(file, allow_pickle=True) for file in files] + + df_res = {} + df_res["method"] = [] + df_res["score"] = [] + + for run in results: + for trainer_name, logging_dict in run.items(): + if trainer_name.find("test") == -1: + continue + + # only use metrics generated for testing + metrics_test = logging_dict["test"] + k = trainer_name[:-5] + + v = metrics_test[selected_metric]["sum"] / metrics_test[selected_metric]["count"] + + df_res["method"].append(k) + df_res["score"].append(v) + + df = pd.DataFrame.from_dict(df_res) + + mapping = { + "r9_svd_geodesic_distance": str(RotRep.SVD) + "-Geo", + "r9_svd_chordal_distance": str(RotRep.SVD) + "-Chordal", + "r9_svd_l2": str(RotRep.SVD) + "-MSE", + "r9_svd_l1": str(RotRep.SVD) + "-MAE", + " ": " ", + "r9_geodesic_distance": str(RotRep.ROTMAT) + "-Geo", + "r9_chordal_distance": str(RotRep.ROTMAT) + "-Chordal", + "r9_l2": str(RotRep.ROTMAT) + "-MSE", + "r9_l1": str(RotRep.ROTMAT) + "-MAE", + " ": " ", + "r6_gso_geodesic_distance": str(RotRep.GSO) + "-Geo", + "r6_gso_chordal_distance": str(RotRep.GSO) + "-Chordal", + "r6_gso_l2": str(RotRep.GSO) + "-MSE", + "r6_gso_l1": str(RotRep.GSO) + "-MAE", + "": "", + "r6_l2": str(RotRep.RSIX) + "-MSE", + "r6_l1": str(RotRep.RSIX) + "-MAE", + " ": " ", + "quat_c_geodesic_distance": str(RotRep.QUAT_C) + "-Geo", + "quat_c_chordal_distance": str(RotRep.QUAT_C) + "-Chordal", + "quat_c_cosine_distance": str(RotRep.QUAT_C) + "-CD", + "quat_c_l2_dp": str(RotRep.QUAT_C) + "-MSE-DP", + "quat_c_l2": str(RotRep.QUAT_C) + "-MSE", + "quat_c_l1": str(RotRep.QUAT_C) + "-MAE", + " ": " ", + # "quat_rf_cosine_distance": str(RotRep.QUAT_RF) + "-CD", + "quat_rf_l2_dp": str(RotRep.QUAT_RF) + "-MSE-DP", + # "quat_rf_l2": str(RotRep.QUAT_RF) + "-MSE", + # "quat_rf_l1": str(RotRep.QUAT_RF) + "-MAE", + " ": " ", + "rotvec_geodesic_distance": str(RotRep.EXP) + "-Geo", + "rotvec_chordal_distance": str(RotRep.EXP) + "-Chordal", + "rotvec_l2": str(RotRep.EXP) + "-MSE", + "rotvec_l1": str(RotRep.EXP) + "-MAE", + " ": " ", + "euler_geodesic_distance": str(RotRep.EULER) + "-Geo", + "euler_chordal_distance": str(RotRep.EULER) + "-Chordal", + "euler_l2": str(RotRep.EULER) + "-MSE", + "euler_l1": str(RotRep.EULER) + "-MAE", + } + + for k, v in mapping.items(): + df.loc[df["method"] == k, "method"] = v + + df["method"] = pd.Categorical(df["method"], categories=[v for v in mapping.values()], ordered=True) + + plt.subplot(1, 2, j + 1) + + sns.boxplot( + data=df, + x="score", + y="method", + palette="Blues", + orient="h", + width=0.5, + linewidth=1.5, + fliersize=2.5, + showfliers=False, + ) + + # plt.xlabel(f"Error - {selected_metric}") + + if j == 0: + plt.xlabel(f"Error - Geodesic distance") + print("Warning: Hardcoded label for the first plot. Please check if it is correct.") + print(f"Geodesic distance VS {selected_metric}") + elif j == 1: + plt.xlabel(f"Error - Chordal distance") + print("Warning: Hardcoded label for the first plot. Please check if it is correct.") + print(f"Chordal distance VS {selected_metric}") + plt.ylabel("") + # plt.xscale("log") + plt.tight_layout() + +out_p = os.path.join(HITCHHIKING_ROOT_DIR, "results", "cube_image_to_pose", f"figure_19_combined.pdf") + +plt.savefig(out_p) + +plt.show()