diff --git a/erddapy/core/griddap.py b/erddapy/core/griddap.py index 535249c..f5e430a 100644 --- a/erddapy/core/griddap.py +++ b/erddapy/core/griddap.py @@ -1,21 +1,19 @@ """Griddap handling.""" import functools -from typing import Dict, List, Optional, Tuple, Union import pandas as pd from erddapy.core.url import urlopen -ListLike = Union[List[str], Tuple[str]] -OptionalStr = Optional[str] +ListLike = list[str] | tuple[str] @functools.lru_cache(maxsize=128) def _griddap_get_constraints( dataset_url: str, step: int, -) -> Tuple[Dict, List, List]: +) -> tuple[dict, list, list]: """ Fetch metadata of griddap dataset and set initial constraints. @@ -64,7 +62,7 @@ def _griddap_get_constraints( return constraints_dict, dim_names, variable_names -def _griddap_check_constraints(user_constraints: Dict, original_constraints: Dict): +def _griddap_check_constraints(user_constraints: dict, original_constraints: dict): """Check that constraints changed by user match those expected by dataset.""" if user_constraints.keys() != original_constraints.keys(): raise ValueError( diff --git a/erddapy/core/interfaces.py b/erddapy/core/interfaces.py index 74ce9f9..2a8b7c4 100644 --- a/erddapy/core/interfaces.py +++ b/erddapy/core/interfaces.py @@ -4,8 +4,7 @@ This module takes an URL or the bytes response of a request and converts it to Pandas, XArray, Iris, etc. objects. """ - -from typing import TYPE_CHECKING, Dict, Optional +from typing import TYPE_CHECKING import pandas as pd @@ -19,8 +18,8 @@ def to_pandas( url: str, - requests_kwargs: Optional[Dict] = None, - pandas_kwargs: Optional[Dict] = None, + requests_kwargs: dict | None = None, + pandas_kwargs: dict | None = None, ) -> "pd.DataFrame": """ Convert a URL to Pandas DataFrame. @@ -39,7 +38,7 @@ def to_pandas( def to_ncCF( url: str, protocol: str = None, - requests_kwargs: Optional[Dict] = None, + requests_kwargs: dict | None = None, ) -> "Dataset": """ Convert a URL to a netCDF4 Dataset. @@ -58,8 +57,8 @@ def to_ncCF( def to_xarray( url: str, response="opendap", - requests_kwargs: Optional[Dict] = None, - xarray_kwargs: Optional[Dict] = None, + requests_kwargs: dict | None = None, + xarray_kwargs: dict | None = None, ) -> "xr.Dataset": """ Convert a URL to an xarray dataset. @@ -83,8 +82,8 @@ def to_xarray( def to_iris( url: str, - requests_kwargs: Optional[Dict] = None, - iris_kwargs: Optional[Dict] = None, + requests_kwargs: dict | None = None, + iris_kwargs: dict | None = None, ): """ Convert a URL to an iris CubeList. diff --git a/erddapy/core/netcdf.py b/erddapy/core/netcdf.py index a3500df..97d20ca 100644 --- a/erddapy/core/netcdf.py +++ b/erddapy/core/netcdf.py @@ -1,15 +1,16 @@ """Handles netCDF responses.""" import platform +from collections.abc import Generator from contextlib import contextmanager from pathlib import Path -from typing import BinaryIO, Dict, Generator, Optional +from typing import BinaryIO from urllib.parse import urlparse from erddapy.core.url import urlopen -def _nc_dataset(url, requests_kwargs: Optional[Dict] = None): +def _nc_dataset(url, requests_kwargs: dict | None = None): """Return a netCDF4-python Dataset from memory and fallbacks to disk if that fails.""" from netCDF4 import Dataset diff --git a/erddapy/core/url.py b/erddapy/core/url.py index e5b86f6..913a555 100644 --- a/erddapy/core/url.py +++ b/erddapy/core/url.py @@ -4,19 +4,19 @@ import functools import io from datetime import datetime -from typing import BinaryIO, Dict, List, Optional, Tuple, Union +from typing import BinaryIO from urllib.parse import quote_plus import httpx import pytz from pandas import to_datetime -ListLike = Union[List[str], Tuple[str]] -OptionalStr = Optional[str] +ListLike = list[str] | tuple[str] +OptionalStr = str | None @functools.lru_cache(maxsize=128) -def _urlopen(url: str, auth: Optional[tuple] = None, **kwargs: Dict) -> BinaryIO: +def _urlopen(url: str, auth: tuple | None = None, **kwargs: dict) -> BinaryIO: if "timeout" not in kwargs.keys(): kwargs["timeout"] = 60 response = httpx.get(url, follow_redirects=True, auth=auth, **kwargs) @@ -29,7 +29,7 @@ def _urlopen(url: str, auth: Optional[tuple] = None, **kwargs: Dict) -> BinaryIO def urlopen( url: str, - requests_kwargs: Optional[Dict] = None, + requests_kwargs: dict | None = None, ) -> BinaryIO: """Thin wrapper around httpx get content. @@ -45,7 +45,7 @@ def urlopen( @functools.lru_cache(maxsize=128) -def check_url_response(url: str, **kwargs: Dict) -> str: +def check_url_response(url: str, **kwargs: dict) -> str: """ Shortcut to `raise_for_status` instead of fetching the whole content. @@ -58,7 +58,7 @@ def check_url_response(url: str, **kwargs: Dict) -> str: return url -def _distinct(url: str, distinct: Optional[bool] = False) -> str: +def _distinct(url: str, distinct: bool | None = False) -> str: """ Sort all of the rows in the results table. @@ -91,7 +91,7 @@ def _multi_urlopen(url: str) -> BinaryIO: return data -def _quote_string_constraints(kwargs: Dict) -> Dict: +def _quote_string_constraints(kwargs: dict) -> dict: """ Quote constraints of String variables. @@ -103,7 +103,7 @@ def _quote_string_constraints(kwargs: Dict) -> Dict: } -def _format_constraints_url(kwargs: Dict) -> str: +def _format_constraints_url(kwargs: dict) -> str: """Join the constraint variables with separator '&' to add to the download link.""" return "".join([f"&{k}{v}" for k, v in kwargs.items()]) @@ -115,7 +115,7 @@ def _check_substrings(constraint): def parse_dates( - date_time: Union[datetime, str], + date_time: datetime | str, dayfirst=False, yearfirst=False, ) -> float: @@ -147,7 +147,7 @@ def parse_dates( def get_search_url( server: str, response: str = "html", - search_for: Optional[str] = None, + search_for: str | None = None, protocol: str = "tabledap", items_per_page: int = 1000, page: int = 1, @@ -339,8 +339,8 @@ def get_download_url( server: str, dataset_id: OptionalStr = None, protocol: OptionalStr = None, - variables: Optional[ListLike] = None, - dim_names: Optional[ListLike] = None, + variables: ListLike | None = None, + dim_names: ListLike | None = None, response=None, constraints=None, distinct=False, diff --git a/erddapy/erddapy.py b/erddapy/erddapy.py index 0a9fe6a..78221b2 100644 --- a/erddapy/erddapy.py +++ b/erddapy/erddapy.py @@ -3,7 +3,6 @@ import functools import hashlib from pathlib import Path -from typing import Dict, List, Optional, Tuple, Union from urllib.request import urlretrieve import pandas as pd @@ -40,8 +39,8 @@ "ERDDAP", ] -ListLike = Union[List[str], Tuple[str]] -OptionalStr = Optional[str] +ListLike = list[str] | tuple[str] +OptionalStr = str | None class ERDDAP: @@ -126,13 +125,13 @@ def __init__( self.response = response # Initialized only via properties. - self.constraints: Optional[Dict] = None - self.server_functions: Optional[Dict] = None + self.constraints: dict | None = None + self.server_functions: dict | None = None self.dataset_id: OptionalStr = None - self.requests_kwargs: Dict = {} - self.auth: Optional[tuple] = None - self.variables: Optional[ListLike] = None - self.dim_names: Optional[ListLike] = None + self.requests_kwargs: dict = {} + self.auth: tuple | None = None + self.variables: ListLike | None = None + self.dim_names: ListLike | None = None self._get_variables = functools.lru_cache(maxsize=128)( self._get_variables_uncached, @@ -140,7 +139,7 @@ def __init__( # Caching the last `dataset_id` and `variables` list request for quicker multiple accesses, # will be overridden when requesting a new `dataset_id`. self._dataset_id: OptionalStr = None - self._variables: Dict = {} + self._variables: dict = {} def griddap_initialize( self, @@ -277,8 +276,8 @@ def get_download_url( self, dataset_id: OptionalStr = None, protocol: OptionalStr = None, - variables: Optional[ListLike] = None, - dim_names: Optional[ListLike] = None, + variables: ListLike | None = None, + dim_names: ListLike | None = None, response=None, constraints=None, distinct=False, @@ -344,7 +343,7 @@ def get_download_url( def to_pandas( self, - requests_kwargs: Optional[Dict] = None, + requests_kwargs: dict | None = None, **kw, ) -> "pd.DataFrame": """Save a data request to a pandas.DataFrame. @@ -373,7 +372,7 @@ def to_ncCF(self, protocol: str = None, **kw): def to_xarray( self, - requests_kwargs: Optional[Dict] = None, + requests_kwargs: dict | None = None, **kw, ): """Load the data request into a xarray.Dataset. @@ -404,7 +403,7 @@ def to_iris(self, **kw): url = self.get_download_url(response=response, distinct=distinct) return to_iris(url, iris_kwargs=dict(**kw)) - def _get_variables_uncached(self, dataset_id: OptionalStr = None) -> Dict: + def _get_variables_uncached(self, dataset_id: OptionalStr = None) -> dict: if not dataset_id: dataset_id = self.dataset_id @@ -426,7 +425,7 @@ def _get_variables_uncached(self, dataset_id: OptionalStr = None) -> Dict: variables.update({variable: attributes}) return variables - def get_var_by_attr(self, dataset_id: OptionalStr = None, **kwargs) -> List[str]: + def get_var_by_attr(self, dataset_id: OptionalStr = None, **kwargs) -> list[str]: """ Return a variable based on its attributes. diff --git a/erddapy/multiple_server_search.py b/erddapy/multiple_server_search.py index 7b0cf11..d779b67 100644 --- a/erddapy/multiple_server_search.py +++ b/erddapy/multiple_server_search.py @@ -1,7 +1,6 @@ """Multiple Server Search.""" import multiprocessing -from typing import Dict import pandas as pd @@ -19,7 +18,7 @@ from erddapy.servers.servers import servers -def _format_results(dfs: Dict[str, pd.DataFrame]) -> pd.DataFrame: +def _format_results(dfs: dict[str, pd.DataFrame]) -> pd.DataFrame: """Format dictionary of results into a Pandas dataframe.""" # we return None for bad server, so we need to filter them here df_all = pd.concat([list(df.values())[0] for df in dfs if df is not None]) @@ -30,7 +29,7 @@ def fetch_results( url: str, key: str, protocol, -) -> Dict[str, pd.DataFrame]: +) -> dict[str, pd.DataFrame]: """ Fetch search results from multiple servers.