From e86b39e8f09f8946917a35a0622479d99791e007 Mon Sep 17 00:00:00 2001 From: Jesper Dramsch Date: Tue, 28 May 2024 13:25:12 +0000 Subject: [PATCH] fix: rm src --- pyproject.toml | 4 +--- src/anemoi/models/layers/attention.py | 4 ++-- src/anemoi/models/layers/block.py | 8 ++++---- src/anemoi/models/layers/mapper.py | 8 ++++---- src/anemoi/models/layers/processor.py | 6 +++--- src/anemoi/models/models/encoder_processor_decoder.py | 2 +- 6 files changed, 15 insertions(+), 17 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7cb7f0c..96fe4b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ docs = [ all = [] -tests = ["pytest", "hypothesis", "omegaconf"] +tests = ["pytest", "hypothesis"] dev = [ "sphinx", @@ -66,8 +66,6 @@ dev = [ "pandoc", "pytest", "hypothesis", - "hydra-core>=1.3", - "pytest", ] [project.urls] diff --git a/src/anemoi/models/layers/attention.py b/src/anemoi/models/layers/attention.py index 6ad5f75..2063ad0 100644 --- a/src/anemoi/models/layers/attention.py +++ b/src/anemoi/models/layers/attention.py @@ -24,8 +24,8 @@ else: _FLASH_ATTENTION_AVAILABLE = True -from src.anemoi.models.distributed.transformer import shard_heads -from src.anemoi.models.distributed.transformer import shard_sequence +from anemoi.models.distributed.transformer import shard_heads +from anemoi.models.distributed.transformer import shard_sequence LOGGER = logging.getLogger(__name__) diff --git a/src/anemoi/models/layers/block.py b/src/anemoi/models/layers/block.py index d920fa5..16283fe 100644 --- a/src/anemoi/models/layers/block.py +++ b/src/anemoi/models/layers/block.py @@ -21,14 +21,14 @@ from torch_geometric.typing import OptPairTensor from torch_geometric.typing import Size +from anemoi.models.distributed.graph import shard_tensor +from anemoi.models.distributed.graph import sync_tensor +from anemoi.models.distributed.transformer import shard_heads +from anemoi.models.distributed.transformer import shard_sequence from anemoi.models.layers.attention import MultiHeadSelfAttention from anemoi.models.layers.conv import GraphConv from anemoi.models.layers.conv import GraphTransformerConv from anemoi.models.layers.mlp import MLP -from src.anemoi.models.distributed.graph import shard_tensor -from src.anemoi.models.distributed.graph import sync_tensor -from src.anemoi.models.distributed.transformer import shard_heads -from src.anemoi.models.distributed.transformer import shard_sequence LOGGER = logging.getLogger(__name__) diff --git a/src/anemoi/models/layers/mapper.py b/src/anemoi/models/layers/mapper.py index 018dc80..9f5f90b 100644 --- a/src/anemoi/models/layers/mapper.py +++ b/src/anemoi/models/layers/mapper.py @@ -20,15 +20,15 @@ from torch_geometric.typing import Adj from torch_geometric.typing import PairTensor +from anemoi.models.distributed.graph import gather_tensor +from anemoi.models.distributed.graph import shard_tensor from anemoi.models.distributed.khop_edges import sort_edges_1hop +from anemoi.models.distributed.shapes import change_channels_in_shape +from anemoi.models.distributed.shapes import get_shape_shards from anemoi.models.layers.block import GraphConvMapperBlock from anemoi.models.layers.block import GraphTransformerMapperBlock from anemoi.models.layers.graph import TrainableTensor from anemoi.models.layers.mlp import MLP -from src.anemoi.models.distributed.graph import gather_tensor -from src.anemoi.models.distributed.graph import shard_tensor -from src.anemoi.models.distributed.shapes import change_channels_in_shape -from src.anemoi.models.distributed.shapes import get_shape_shards class BaseMapper(nn.Module, ABC): diff --git a/src/anemoi/models/layers/processor.py b/src/anemoi/models/layers/processor.py index 4c02df9..39a6f24 100644 --- a/src/anemoi/models/layers/processor.py +++ b/src/anemoi/models/layers/processor.py @@ -17,15 +17,15 @@ from torch.utils.checkpoint import checkpoint from torch_geometric.data import HeteroData +from anemoi.models.distributed.graph import shard_tensor from anemoi.models.distributed.khop_edges import sort_edges_1hop +from anemoi.models.distributed.shapes import change_channels_in_shape +from anemoi.models.distributed.shapes import get_shape_shards from anemoi.models.layers.chunk import GNNProcessorChunk from anemoi.models.layers.chunk import GraphTransformerProcessorChunk from anemoi.models.layers.chunk import TransformerProcessorChunk from anemoi.models.layers.graph import TrainableTensor from anemoi.models.layers.mapper import GraphEdgeMixin -from src.anemoi.models.distributed.graph import shard_tensor -from src.anemoi.models.distributed.shapes import change_channels_in_shape -from src.anemoi.models.distributed.shapes import get_shape_shards class BaseProcessor(nn.Module, ABC): diff --git a/src/anemoi/models/models/encoder_processor_decoder.py b/src/anemoi/models/models/encoder_processor_decoder.py index 55ce831..c2616bf 100644 --- a/src/anemoi/models/models/encoder_processor_decoder.py +++ b/src/anemoi/models/models/encoder_processor_decoder.py @@ -19,9 +19,9 @@ from torch.utils.checkpoint import checkpoint from torch_geometric.data import HeteroData +from anemoi.models.distributed.shapes import get_shape_shards from anemoi.models.layers.graph import TrainableTensor from anemoi.models.utils.config import DotConfig -from src.anemoi.models.distributed.shapes import get_shape_shards LOGGER = logging.getLogger(__name__)