Skip to content

Commit

Permalink
PEP 604
Browse files Browse the repository at this point in the history
  • Loading branch information
ocefpaf committed Feb 22, 2024
1 parent dced110 commit 1299bfb
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 48 deletions.
8 changes: 3 additions & 5 deletions erddapy/core/griddap.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,19 @@
"""Griddap handling."""

import functools
from typing import Dict, List, Optional, Tuple, Union

import pandas as pd

from erddapy.core.url import urlopen

ListLike = Union[List[str], Tuple[str]]
OptionalStr = Optional[str]
ListLike = list[str] | tuple[str]


@functools.lru_cache(maxsize=128)
def _griddap_get_constraints(
dataset_url: str,
step: int,
) -> Tuple[Dict, List, List]:
) -> tuple[dict, list, list]:
"""
Fetch metadata of griddap dataset and set initial constraints.
Expand Down Expand Up @@ -64,7 +62,7 @@ def _griddap_get_constraints(
return constraints_dict, dim_names, variable_names


def _griddap_check_constraints(user_constraints: Dict, original_constraints: Dict):
def _griddap_check_constraints(user_constraints: dict, original_constraints: dict):
"""Check that constraints changed by user match those expected by dataset."""
if user_constraints.keys() != original_constraints.keys():
raise ValueError(
Expand Down
17 changes: 8 additions & 9 deletions erddapy/core/interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@
This module takes an URL or the bytes response of a request and converts it to Pandas,
XArray, Iris, etc. objects.
"""

from typing import TYPE_CHECKING, Dict, Optional
from typing import TYPE_CHECKING

import pandas as pd

Expand All @@ -19,8 +18,8 @@

def to_pandas(
url: str,
requests_kwargs: Optional[Dict] = None,
pandas_kwargs: Optional[Dict] = None,
requests_kwargs: dict | None = None,
pandas_kwargs: dict | None = None,
) -> "pd.DataFrame":
"""
Convert a URL to Pandas DataFrame.
Expand All @@ -39,7 +38,7 @@ def to_pandas(
def to_ncCF(
url: str,
protocol: str = None,
requests_kwargs: Optional[Dict] = None,
requests_kwargs: dict | None = None,
) -> "Dataset":
"""
Convert a URL to a netCDF4 Dataset.
Expand All @@ -58,8 +57,8 @@ def to_ncCF(
def to_xarray(
url: str,
response="opendap",
requests_kwargs: Optional[Dict] = None,
xarray_kwargs: Optional[Dict] = None,
requests_kwargs: dict | None = None,
xarray_kwargs: dict | None = None,
) -> "xr.Dataset":
"""
Convert a URL to an xarray dataset.
Expand All @@ -83,8 +82,8 @@ def to_xarray(

def to_iris(
url: str,
requests_kwargs: Optional[Dict] = None,
iris_kwargs: Optional[Dict] = None,
requests_kwargs: dict | None = None,
iris_kwargs: dict | None = None,
):
"""
Convert a URL to an iris CubeList.
Expand Down
5 changes: 3 additions & 2 deletions erddapy/core/netcdf.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
"""Handles netCDF responses."""

import platform
from collections.abc import Generator
from contextlib import contextmanager
from pathlib import Path
from typing import BinaryIO, Dict, Generator, Optional
from typing import BinaryIO
from urllib.parse import urlparse

from erddapy.core.url import urlopen


def _nc_dataset(url, requests_kwargs: Optional[Dict] = None):
def _nc_dataset(url, requests_kwargs: dict | None = None):
"""Return a netCDF4-python Dataset from memory and fallbacks to disk if that fails."""
from netCDF4 import Dataset

Expand Down
26 changes: 13 additions & 13 deletions erddapy/core/url.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,19 @@
import functools
import io
from datetime import datetime
from typing import BinaryIO, Dict, List, Optional, Tuple, Union
from typing import BinaryIO
from urllib.parse import quote_plus

import httpx
import pytz
from pandas import to_datetime

ListLike = Union[List[str], Tuple[str]]
OptionalStr = Optional[str]
ListLike = list[str] | tuple[str]
OptionalStr = str | None


@functools.lru_cache(maxsize=128)
def _urlopen(url: str, auth: Optional[tuple] = None, **kwargs: Dict) -> BinaryIO:
def _urlopen(url: str, auth: tuple | None = None, **kwargs: dict) -> BinaryIO:
if "timeout" not in kwargs.keys():
kwargs["timeout"] = 60
response = httpx.get(url, follow_redirects=True, auth=auth, **kwargs)
Expand All @@ -29,7 +29,7 @@ def _urlopen(url: str, auth: Optional[tuple] = None, **kwargs: Dict) -> BinaryIO

def urlopen(
url: str,
requests_kwargs: Optional[Dict] = None,
requests_kwargs: dict | None = None,
) -> BinaryIO:
"""Thin wrapper around httpx get content.
Expand All @@ -45,7 +45,7 @@ def urlopen(


@functools.lru_cache(maxsize=128)
def check_url_response(url: str, **kwargs: Dict) -> str:
def check_url_response(url: str, **kwargs: dict) -> str:
"""
Shortcut to `raise_for_status` instead of fetching the whole content.
Expand All @@ -58,7 +58,7 @@ def check_url_response(url: str, **kwargs: Dict) -> str:
return url


def _distinct(url: str, distinct: Optional[bool] = False) -> str:
def _distinct(url: str, distinct: bool | None = False) -> str:
"""
Sort all of the rows in the results table.
Expand Down Expand Up @@ -91,7 +91,7 @@ def _multi_urlopen(url: str) -> BinaryIO:
return data


def _quote_string_constraints(kwargs: Dict) -> Dict:
def _quote_string_constraints(kwargs: dict) -> dict:
"""
Quote constraints of String variables.
Expand All @@ -103,7 +103,7 @@ def _quote_string_constraints(kwargs: Dict) -> Dict:
}


def _format_constraints_url(kwargs: Dict) -> str:
def _format_constraints_url(kwargs: dict) -> str:
"""Join the constraint variables with separator '&' to add to the download link."""
return "".join([f"&{k}{v}" for k, v in kwargs.items()])

Expand All @@ -115,7 +115,7 @@ def _check_substrings(constraint):


def parse_dates(
date_time: Union[datetime, str],
date_time: datetime | str,
dayfirst=False,
yearfirst=False,
) -> float:
Expand Down Expand Up @@ -147,7 +147,7 @@ def parse_dates(
def get_search_url(
server: str,
response: str = "html",
search_for: Optional[str] = None,
search_for: str | None = None,
protocol: str = "tabledap",
items_per_page: int = 1000,
page: int = 1,
Expand Down Expand Up @@ -339,8 +339,8 @@ def get_download_url(
server: str,
dataset_id: OptionalStr = None,
protocol: OptionalStr = None,
variables: Optional[ListLike] = None,
dim_names: Optional[ListLike] = None,
variables: ListLike | None = None,
dim_names: ListLike | None = None,
response=None,
constraints=None,
distinct=False,
Expand Down
31 changes: 15 additions & 16 deletions erddapy/erddapy.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import functools
import hashlib
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from urllib.request import urlretrieve

import pandas as pd
Expand Down Expand Up @@ -40,8 +39,8 @@
"ERDDAP",
]

ListLike = Union[List[str], Tuple[str]]
OptionalStr = Optional[str]
ListLike = list[str] | tuple[str]
OptionalStr = str | None


class ERDDAP:
Expand Down Expand Up @@ -126,21 +125,21 @@ def __init__(
self.response = response

# Initialized only via properties.
self.constraints: Optional[Dict] = None
self.server_functions: Optional[Dict] = None
self.constraints: dict | None = None
self.server_functions: dict | None = None
self.dataset_id: OptionalStr = None
self.requests_kwargs: Dict = {}
self.auth: Optional[tuple] = None
self.variables: Optional[ListLike] = None
self.dim_names: Optional[ListLike] = None
self.requests_kwargs: dict = {}
self.auth: tuple | None = None
self.variables: ListLike | None = None
self.dim_names: ListLike | None = None

self._get_variables = functools.lru_cache(maxsize=128)(
self._get_variables_uncached,
)
# Caching the last `dataset_id` and `variables` list request for quicker multiple accesses,
# will be overridden when requesting a new `dataset_id`.
self._dataset_id: OptionalStr = None
self._variables: Dict = {}
self._variables: dict = {}

def griddap_initialize(
self,
Expand Down Expand Up @@ -277,8 +276,8 @@ def get_download_url(
self,
dataset_id: OptionalStr = None,
protocol: OptionalStr = None,
variables: Optional[ListLike] = None,
dim_names: Optional[ListLike] = None,
variables: ListLike | None = None,
dim_names: ListLike | None = None,
response=None,
constraints=None,
distinct=False,
Expand Down Expand Up @@ -344,7 +343,7 @@ def get_download_url(

def to_pandas(
self,
requests_kwargs: Optional[Dict] = None,
requests_kwargs: dict | None = None,
**kw,
) -> "pd.DataFrame":
"""Save a data request to a pandas.DataFrame.
Expand Down Expand Up @@ -373,7 +372,7 @@ def to_ncCF(self, protocol: str = None, **kw):

def to_xarray(
self,
requests_kwargs: Optional[Dict] = None,
requests_kwargs: dict | None = None,
**kw,
):
"""Load the data request into a xarray.Dataset.
Expand Down Expand Up @@ -404,7 +403,7 @@ def to_iris(self, **kw):
url = self.get_download_url(response=response, distinct=distinct)
return to_iris(url, iris_kwargs=dict(**kw))

def _get_variables_uncached(self, dataset_id: OptionalStr = None) -> Dict:
def _get_variables_uncached(self, dataset_id: OptionalStr = None) -> dict:
if not dataset_id:
dataset_id = self.dataset_id

Expand All @@ -426,7 +425,7 @@ def _get_variables_uncached(self, dataset_id: OptionalStr = None) -> Dict:
variables.update({variable: attributes})
return variables

def get_var_by_attr(self, dataset_id: OptionalStr = None, **kwargs) -> List[str]:
def get_var_by_attr(self, dataset_id: OptionalStr = None, **kwargs) -> list[str]:
"""
Return a variable based on its attributes.
Expand Down
5 changes: 2 additions & 3 deletions erddapy/multiple_server_search.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""Multiple Server Search."""

import multiprocessing
from typing import Dict

import pandas as pd

Expand All @@ -19,7 +18,7 @@
from erddapy.servers.servers import servers


def _format_results(dfs: Dict[str, pd.DataFrame]) -> pd.DataFrame:
def _format_results(dfs: dict[str, pd.DataFrame]) -> pd.DataFrame:
"""Format dictionary of results into a Pandas dataframe."""
# we return None for bad server, so we need to filter them here
df_all = pd.concat([list(df.values())[0] for df in dfs if df is not None])
Expand All @@ -30,7 +29,7 @@ def fetch_results(
url: str,
key: str,
protocol,
) -> Dict[str, pd.DataFrame]:
) -> dict[str, pd.DataFrame]:
"""
Fetch search results from multiple servers.
Expand Down

0 comments on commit 1299bfb

Please sign in to comment.