Skip to content

Commit

Permalink
chore: update tests and dependencies
Browse files Browse the repository at this point in the history
  • Loading branch information
JesperDramsch committed May 27, 2024
1 parent ec9f88c commit 68d7926
Show file tree
Hide file tree
Showing 19 changed files with 362 additions and 313 deletions.
15 changes: 9 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,12 @@ classifiers = [
]

dependencies = [
"torch>=2.2",
"torch-geometric>=2.3.1,<2.5",
"einops>=0.6.1",
"anemoi-utils@git+https://github.com/ecmwf/anemoi-utils.git@feature/add-utils-for-training-and-models",
"torch==2.3",
"torch-geometric==2.4",
"einops==0.6.1",
"hydra-core==1.3",
"anemoi-datasets==0.2.1",
"anemoi-utils==0.1.9",
]

[project.optional-dependencies]
Expand All @@ -53,8 +55,9 @@ docs = [
# For building the documentation
]

all = [
]
all = []

tests = ["pytest", "hypothesis", "omegaconf"]

dev = [
"sphinx",
Expand Down
Empty file.
254 changes: 0 additions & 254 deletions src/anemoi/models/data/dataset.py

This file was deleted.

16 changes: 0 additions & 16 deletions src/anemoi/models/data/scaling.py

This file was deleted.

File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@
import yaml
from omegaconf import OmegaConf

from anemoi.models.data.data_indices.index import BaseIndex
from anemoi.models.data.data_indices.index import DataIndex
from anemoi.models.data.data_indices.index import ModelIndex
from anemoi.models.data.data_indices.tensor import BaseTensorIndex
from anemoi.models.data.data_indices.tensor import InputTensorIndex
from anemoi.models.data.data_indices.tensor import OutputTensorIndex
from anemoi.models.data_indices.index import BaseIndex
from anemoi.models.data_indices.index import DataIndex
from anemoi.models.data_indices.index import ModelIndex
from anemoi.models.data_indices.tensor import BaseTensorIndex
from anemoi.models.data_indices.tensor import InputTensorIndex
from anemoi.models.data_indices.tensor import OutputTensorIndex


class IndexCollection:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
# nor does it submit to any jurisdiction.
#

from anemoi.models.data.data_indices.tensor import InputTensorIndex
from anemoi.models.data.data_indices.tensor import OutputTensorIndex
from anemoi.models.data_indices.tensor import InputTensorIndex
from anemoi.models.data_indices.tensor import OutputTensorIndex


class BaseIndex:
Expand Down
File renamed without changes.
4 changes: 2 additions & 2 deletions src/anemoi/models/layers/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
else:
_FLASH_ATTENTION_AVAILABLE = True

from src.anemoi.models.distributed.graph import shard_heads
from src.anemoi.models.distributed.graph import shard_sequence
from src.anemoi.models.distributed.transformer import shard_heads
from src.anemoi.models.distributed.transformer import shard_sequence

LOGGER = logging.getLogger(__name__)

Expand Down
4 changes: 2 additions & 2 deletions src/anemoi/models/layers/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@
from anemoi.models.layers.conv import GraphConv
from anemoi.models.layers.conv import GraphTransformerConv
from anemoi.models.layers.mlp import MLP
from src.anemoi.models.distributed.graph import shard_heads
from src.anemoi.models.distributed.graph import shard_sequence
from src.anemoi.models.distributed.graph import shard_tensor
from src.anemoi.models.distributed.graph import sync_tensor
from src.anemoi.models.distributed.transformer import shard_heads
from src.anemoi.models.distributed.transformer import shard_sequence

LOGGER = logging.getLogger(__name__)

Expand Down
8 changes: 5 additions & 3 deletions src/anemoi/models/preprocessing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,8 @@ def forward(self, x, in_place: bool = True) -> Tensor:

def _run_checks(self, x):
"""Run checks on the processed tensor."""
assert not torch.isnan(
x
).any(), f"NaNs ({torch.isnan(x).sum()}) found in processed tensor after {self.__class__.__name__}."
if not self.inverse:
# Forward transformation checks:
assert not torch.isnan(
x
).any(), f"NaNs ({torch.isnan(x).sum()}) found in processed tensor after {self.__class__.__name__}."
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import torch
from omegaconf import DictConfig

from anemoi.models.data.data_indices.collection import IndexCollection
from anemoi.models.data_indices.collection import IndexCollection


@pytest.fixture()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
import pytest
import torch

from anemoi.models.data.data_indices.index import DataIndex
from anemoi.models.data.data_indices.tensor import InputTensorIndex
from anemoi.models.data.data_indices.tensor import OutputTensorIndex
from anemoi.models.data_indices.index import DataIndex
from anemoi.models.data_indices.tensor import InputTensorIndex
from anemoi.models.data_indices.tensor import OutputTensorIndex


@pytest.fixture()
Expand Down
2 changes: 1 addition & 1 deletion tests/layers/processor/test_graphconv_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def test_graphconv_processor_init(graphconv_processor, graphconv_init):
_dst_grid_size,
_trainable_size,
) = graphconv_init
assert graphconv_processor.num_layers == num_layers
assert graphconv_processor.num_chunks == num_chunks
assert graphconv_processor.num_channels == num_channels
assert graphconv_processor.chunk_size == num_layers // num_chunks
assert isinstance(graphconv_processor.trainable, TrainableTensor)
Expand Down
Loading

0 comments on commit 68d7926

Please sign in to comment.