Skip to content

Commit

Permalink
Fix failing fp16 evaluation (#124)
Browse files Browse the repository at this point in the history
* fix: Fix wrong dtype used for evaluation

* build: Upgrade package version

* docs: Update changelog

* refactor: Improve code
  • Loading branch information
lorenzomammana authored Jul 8, 2024
1 parent 4e07340 commit 8a2ab23
Show file tree
Hide file tree
Showing 6 changed files with 13 additions and 9 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@
# Changelog
All notable changes to this project will be documented in this file.

### [2.1.12]

#### Fixed

- Fix wrong dtype used when evaluating finetuning or anomaly models trained in fp16 precision

### [2.1.11]

#### Fixed
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "quadra"
version = "2.1.11"
version = "2.1.12"
description = "Deep Learning experiment orchestration library"
authors = [
"Federico Belotti <[email protected]>",
Expand Down
2 changes: 1 addition & 1 deletion quadra/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "2.1.11"
__version__ = "2.1.12"


def get_version():
Expand Down
5 changes: 3 additions & 2 deletions quadra/tasks/anomaly.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,10 +386,11 @@ def test(self) -> None:
batch_labels = batch_item["label"]
image_labels.extend(batch_labels.tolist())
image_paths.extend(batch_item["image_path"])
batch_images = batch_images.to(device=self.device, dtype=self.deployment_model.model_dtype)
if self.model_data.get("anomaly_method") == "efficientad":
model_output = self.deployment_model(batch_images.to(self.device), None)
model_output = self.deployment_model(batch_images, None)
else:
model_output = self.deployment_model(batch_images.to(self.device))
model_output = self.deployment_model(batch_images)
anomaly_map, anomaly_score = model_output[0], model_output[1]
anomaly_map = anomaly_map.cpu()
anomaly_score = anomaly_score.cpu()
Expand Down
2 changes: 1 addition & 1 deletion quadra/tasks/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -1169,7 +1169,7 @@ def test(self) -> None:
with torch.set_grad_enabled(self.gradcam):
for batch_item in tqdm(test_dataloader):
im, target = batch_item
im = im.to(self.device).detach()
im = im.to(device=self.device, dtype=self.deployment_model.model_dtype).detach()

if self.gradcam:
# When gradcam is used we need to remove gradients
Expand Down
5 changes: 1 addition & 4 deletions quadra/tasks/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,10 +346,7 @@ def test(self) -> None:
image_list, mask_list, mask_pred_list, label_list = [], [], [], []
for batch in dataloader:
images, masks, labels = batch
images = images.to(self.device)
# TODO: This can be problematic for the future considering bfloat16 or float16-true.
if "16" in str(self.deployment_model.model_dtype):
images = images.half()
images = images.to(device=self.device, dtype=self.deployment_model.model_dtype)
if len(masks.shape) == 3: # BxHxW -> Bx1xHxW
masks = masks.unsqueeze(1)
with torch.no_grad():
Expand Down

0 comments on commit 8a2ab23

Please sign in to comment.