Skip to content

Commit

Permalink
Merge pull request #578 from Pale-Blue-Dot-97/dependabot/pip/requirem…
Browse files Browse the repository at this point in the history
…ents/numpy-2.2.0 (actually 2.0.2)

Bump numpy from 1.26.4 to 2.0.2 in /requirements
  • Loading branch information
Pale-Blue-Dot-97 authored Dec 20, 2024
2 parents bff9033 + ab13a3a commit 502c81f
Show file tree
Hide file tree
Showing 14 changed files with 91 additions and 93 deletions.
4 changes: 2 additions & 2 deletions minerva/datasets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
from typing import Any, Callable, Iterable, Literal, Optional, Sequence, Union

import numpy as np
from nptyping import NDArray
from numpy.typing import NDArray
from torch.utils.data import ConcatDataset, DataLoader
from torchgeo.datasets import (
GeoDataset,
Expand Down Expand Up @@ -221,7 +221,7 @@ def make_bounding_box(roi: Sequence[float] | bool = False) -> Optional[BoundingB
def load_all_samples(
dataloader: DataLoader[Iterable[Any]],
target_key: Literal["mask", "label"] = "mask",
) -> NDArray[Any, Any]:
) -> NDArray[Any]:
"""Loads all sample masks from parsed :class:`~torch.utils.data.DataLoader` and computes the modes of their classes.
Args:
Expand Down
2 changes: 1 addition & 1 deletion minerva/logger/tasklog.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ def log_null(self) -> None:
the length of the logs remains the same as the training logs.
"""
for metric in self.metrics.keys():
self.metrics[metric]["y"].append(np.NAN)
self.metrics[metric]["y"].append(np.nan)

def get_sub_metrics(
self, pattern: tuple[str, ...] = ("train", "val")
Expand Down
4 changes: 2 additions & 2 deletions minerva/models/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@

import numpy as np
import torch
from nptyping import NDArray
from numpy.typing import NDArray
from packaging.version import Version
from torch import Tensor
from torch._dynamo.eval_frame import OptimizedModule
Expand Down Expand Up @@ -573,7 +573,7 @@ def bilinear_init(in_channels: int, out_channels: int, kernel_size: int) -> Tens

og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weight: NDArray[Any, Any] = np.zeros(
weight: NDArray[Any] = np.zeros(
(in_channels, out_channels, kernel_size, kernel_size), dtype="float32"
)
weight[range(in_channels), range(out_channels), :, :] = filt
Expand Down
2 changes: 1 addition & 1 deletion minerva/pytorchtools.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def __init__(
self.counter: int = 0
self.best_score: Optional[float] = None
self.early_stop: bool = False
self.val_loss_min: float = np.Inf
self.val_loss_min: float = np.inf
self.delta: float = delta
self.path: str | Path = path
self.trace_func: Callable[..., None] = trace_func
Expand Down
7 changes: 4 additions & 3 deletions minerva/tasks/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,11 @@
SummaryWriter = None

import hydra
import numpy as np
import pandas as pd
import torch
import torch.distributed as dist
from nptyping import Int, NDArray
from numpy.typing import NDArray
from torch import Tensor
from torch._dynamo.eval_frame import OptimizedModule
from wandb.sdk.wandb_run import Run
Expand Down Expand Up @@ -524,8 +525,8 @@ def compute_classification_report(
labels (~typing.Sequence[int]): List of corresponding ground truth label masks.
"""
# Ensures predictions and labels are flattened.
preds: NDArray[Any, Int] = utils.batch_flatten(predictions)
targets: NDArray[Any, Int] = utils.batch_flatten(labels)
preds: NDArray[np.int_] = utils.batch_flatten(predictions)
targets: NDArray[np.int_] = utils.batch_flatten(labels)

# Uses utils to create a classification report in a DataFrame.
cr_df = utils.make_classification_report(preds, targets, self.params["classes"])
Expand Down
53 changes: 27 additions & 26 deletions minerva/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,7 @@
import torch
from geopy.exc import GeocoderUnavailable
from geopy.geocoders import Photon
from nptyping import Float, Int, NDArray, Shape
from numpy.typing import ArrayLike
from numpy.typing import ArrayLike, NDArray
from omegaconf import DictConfig, OmegaConf
from pandas import DataFrame
from rasterio.crs import CRS
Expand Down Expand Up @@ -713,7 +712,7 @@ def deg_to_dms(deg: float, axis: str = "lat") -> str:


def dec2deg(
dec_co: Sequence[float] | NDArray[Shape["*"], Float], # noqa: F722
dec_co: Sequence[float] | NDArray[np.float64], # noqa: F722
axis: str = "lat",
) -> list[str]:
"""Wrapper for :func:`deg_to_dms`.
Expand Down Expand Up @@ -846,7 +845,7 @@ def find_tensor_mode(mask: LongTensor) -> LongTensor:
return mode


def labels_to_ohe(labels: Sequence[int], n_classes: int) -> NDArray[Any, Any]:
def labels_to_ohe(labels: Sequence[int], n_classes: int) -> NDArray[Any]:
"""Convert an iterable of indices to one-hot encoded (:term:`OHE`) labels.
Args:
Expand All @@ -856,7 +855,7 @@ def labels_to_ohe(labels: Sequence[int], n_classes: int) -> NDArray[Any, Any]:
Returns:
~numpy.ndarray[~typing.Any]: Labels in OHE form.
"""
targets: NDArray[Any, Any] = np.array(labels).reshape(-1)
targets: NDArray[Any] = np.array(labels).reshape(-1)
ohe_labels = np.eye(n_classes)[targets]
assert isinstance(ohe_labels, np.ndarray)
return ohe_labels
Expand Down Expand Up @@ -947,7 +946,7 @@ def find_empty_classes(


def eliminate_classes(
empty_classes: list[int] | tuple[int, ...] | NDArray[Any, Int],
empty_classes: list[int] | tuple[int, ...] | NDArray[np.int_],
old_classes: dict[int, str],
old_cmap: Optional[dict[int, str]] = None,
) -> tuple[dict[int, str], dict[int, int], Optional[dict[int, str]]]:
Expand Down Expand Up @@ -1034,8 +1033,8 @@ def class_transform(label: int, matrix: dict[int, int]) -> int:

@overload
def mask_transform( # type: ignore[overload-overlap]
array: NDArray[Any, Int], matrix: dict[int, int]
) -> NDArray[Any, Int]: ... # pragma: no cover
array: NDArray[np.int_], matrix: dict[int, int]
) -> NDArray[np.int_]: ... # pragma: no cover


@overload
Expand All @@ -1045,17 +1044,17 @@ def mask_transform(


def mask_transform(
array: NDArray[Any, Int] | LongTensor,
array: NDArray[np.int_] | LongTensor,
matrix: dict[int, int],
) -> NDArray[Any, Int] | LongTensor:
) -> NDArray[np.int_] | LongTensor:
"""Transforms all labels of an N-dimensional array from one schema to another mapped by a supplied dictionary.
Args:
array (~numpy.ndarray[int]): N-dimensional array containing labels to be transformed.
array (~numpy.ndarray[int] | ~torch.LongTensor): N-dimensional array containing labels to be transformed.
matrix (dict[int, int]): Dictionary mapping old labels to new.
Returns:
~numpy.ndarray[int]: Array of transformed labels.
~numpy.ndarray[int] | ~torch.LongTensor: Array of transformed labels.
"""
for key in matrix.keys():
array[array == key] = matrix[key]
Expand All @@ -1064,11 +1063,11 @@ def mask_transform(


def check_test_empty(
pred: Sequence[int] | NDArray[Any, Int],
labels: Sequence[int] | NDArray[Any, Int],
pred: Sequence[int] | NDArray[np.int_],
labels: Sequence[int] | NDArray[np.int_],
class_labels: Optional[dict[int, str]] = None,
p_dist: bool = True,
) -> tuple[NDArray[Any, Int], NDArray[Any, Int], dict[int, str]]:
) -> tuple[NDArray[np.int_], NDArray[np.int_], dict[int, str]]:
"""Checks if any of the classes in the dataset were not present in both the predictions and ground truth labels.
Returns corrected and re-ordered predictions, labels and class labels.
Expand Down Expand Up @@ -1160,7 +1159,7 @@ def class_frac(patch: pd.Series) -> dict[Any, Any]:
return new_columns


def cloud_cover(scene: NDArray[Any, Any]) -> Any:
def cloud_cover(scene: NDArray[Any]) -> float:
"""Calculates percentage cloud cover for a given scene based on its scene CLD.
Args:
Expand All @@ -1169,7 +1168,9 @@ def cloud_cover(scene: NDArray[Any, Any]) -> Any:
Returns:
float: Percentage cloud cover of scene.
"""
return np.sum(scene) / scene.size
cloud_cover = np.sum(scene) / scene.size
assert isinstance(cloud_cover, float)
return cloud_cover


def threshold_scene_select(df: DataFrame, thres: float = 0.3) -> list[str]:
Expand Down Expand Up @@ -1435,11 +1436,11 @@ def calc_frac(count: float, total: float) -> str:
print(tabulate(df, headers="keys", tablefmt="psql")) # type: ignore


def batch_flatten(x: NDArray[Any, Any] | ArrayLike) -> NDArray[Shape["*"], Any]: # noqa: F722
"""Flattens the supplied array with :func:`numpy`.
def batch_flatten(x: ArrayLike) -> NDArray[Any]: # noqa: F722
"""Flattens the supplied array with :func:`numpy.flatten`.
Args:
x (~numpy.ndarray[~typing.Any] | ~nptyping.ArrayLike]): Array to be flattened.
x (~numpy.typing.ArrayLike]): Array to be flattened.
Returns:
~numpy.ndarray[~typing.Any]: Flattened :class:`~numpy.ndarray`.
Expand All @@ -1454,8 +1455,8 @@ def batch_flatten(x: NDArray[Any, Any] | ArrayLike) -> NDArray[Shape["*"], Any]:


def make_classification_report(
pred: Sequence[int] | NDArray[Any, Int],
labels: Sequence[int] | NDArray[Any, Int],
pred: Sequence[int] | NDArray[np.int_],
labels: Sequence[int] | NDArray[np.int_],
class_labels: Optional[dict[int, str]] = None,
print_cr: bool = True,
p_dist: bool = False,
Expand Down Expand Up @@ -1625,8 +1626,8 @@ def run_tensorboard(


def compute_roc_curves(
probs: NDArray[Any, Float],
labels: Sequence[int] | NDArray[Any, Int],
probs: NDArray[np.float64],
labels: Sequence[int] | NDArray[np.int_],
class_labels: list[int],
micro: bool = True,
macro: bool = True,
Expand Down Expand Up @@ -1721,7 +1722,7 @@ def compute_roc_curves(

if macro:
# Aggregate all false positive rates.
all_fpr: NDArray[Any, Any] = np.unique(
all_fpr: NDArray[Any] = np.unique(
np.concatenate([fpr[key] for key in populated_classes])
)

Expand Down Expand Up @@ -1787,7 +1788,7 @@ def print_config(conf: DictConfig) -> None:


def tsne_cluster(
embeddings: NDArray[Any, Any],
embeddings: NDArray[Any],
n_dim: int = 2,
lr: str = "auto",
n_iter: int = 1000,
Expand Down
Loading

0 comments on commit 502c81f

Please sign in to comment.