diff --git a/autoPyTorch/datasets/base_dataset.py b/autoPyTorch/datasets/base_dataset.py index 9955e706f..0c46522f5 100644 --- a/autoPyTorch/datasets/base_dataset.py +++ b/autoPyTorch/datasets/base_dataset.py @@ -1,7 +1,7 @@ import os import uuid from abc import ABCMeta -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import numpy as np @@ -14,15 +14,7 @@ import torchvision from autoPyTorch.constants import CLASSIFICATION_OUTPUTS, STRING_TO_OUTPUT_TYPES -from autoPyTorch.datasets.resampling_strategy import ( - CrossValFunc, - CrossValFuncs, - CrossValTypes, - DEFAULT_RESAMPLING_PARAMETERS, - HoldOutFunc, - HoldOutFuncs, - HoldoutValTypes -) +from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutValTypes from autoPyTorch.utils.common import FitRequirement BaseDatasetInputType = Union[Tuple[np.ndarray, np.ndarray], Dataset] @@ -97,10 +89,9 @@ def __init__( resampling_strategy (Union[CrossValTypes, HoldoutValTypes]), (default=HoldoutValTypes.holdout_validation): strategy to split the training data. - resampling_strategy_args (Optional[Dict[str, Any]]): arguments - required for the chosen resampling strategy. If None, uses - the default values provided in DEFAULT_RESAMPLING_PARAMETERS - in ```datasets/resampling_strategy.py```. + resampling_strategy_args (Optional[Dict[str, Any]]): + arguments required for the chosen resampling strategy. + The details are provided in autoPytorch/datasets/resampling_strategy.py shuffle: Whether to shuffle the data before performing splits seed (int), (default=1): seed to be used for reproducibility. train_transforms (Optional[torchvision.transforms.Compose]): @@ -116,12 +107,17 @@ def __init__( if not hasattr(train_tensors[0], 'shape'): type_check(train_tensors, val_tensors) self.train_tensors, self.val_tensors, self.test_tensors = train_tensors, val_tensors, test_tensors - self.cross_validators: Dict[str, CrossValFunc] = {} - self.holdout_validators: Dict[str, HoldOutFunc] = {} self.random_state = np.random.RandomState(seed=seed) self.shuffle = shuffle + self.resampling_strategy = resampling_strategy - self.resampling_strategy_args = resampling_strategy_args + self.resampling_strategy_args: Dict[str, Any] = {} + if resampling_strategy_args is not None: + self.resampling_strategy_args = resampling_strategy_args + + self.shuffle_split = self.resampling_strategy_args.get('shuffle', False) + self.is_stratify = self.resampling_strategy_args.get('stratify', False) + self.task_type: Optional[str] = None self.issparse: bool = issparse(self.train_tensors[0]) self.input_shape: Tuple[int] = self.train_tensors[0].shape[1:] @@ -137,9 +133,6 @@ def __init__( # TODO: Look for a criteria to define small enough to preprocess self.is_small_preprocess = True - # Make sure cross validation splits are created once - self.cross_validators = CrossValFuncs.get_cross_validators(*CrossValTypes) - self.holdout_validators = HoldOutFuncs.get_holdout_validators(*HoldoutValTypes) self.splits = self.get_splits_from_resampling_strategy() # We also need to be able to transform the data, be it for pre-processing @@ -207,6 +200,29 @@ def __len__(self) -> int: def _get_indices(self) -> np.ndarray: return self.random_state.permutation(len(self)) if self.shuffle else np.arange(len(self)) + def _check_resampling_strategy_args(self) -> None: + if not any(isinstance(self.resampling_strategy, val_type) + for val_type in [HoldoutValTypes, CrossValTypes]): + raise ValueError(f"resampling_strategy {self.resampling_strategy} is not supported.") + + if self.resampling_strategy_args is not None and \ + not isinstance(self.resampling_strategy_args, dict): + + raise TypeError("resampling_strategy_args must be dict or None," + f" but got {type(self.resampling_strategy_args)}") + + val_share = self.resampling_strategy_args.get('val_share', None) + num_splits = self.resampling_strategy_args.get('num_splits', None) + + if val_share is not None and (val_share < 0 or val_share > 1): + raise ValueError(f"`val_share` must be between 0 and 1, got {val_share}.") + + if num_splits is not None: + if num_splits <= 0: + raise ValueError(f"`num_splits` must be a positive integer, got {num_splits}.") + elif not isinstance(num_splits, int): + raise ValueError(f"`num_splits` must be an integer, got {num_splits}.") + def get_splits_from_resampling_strategy(self) -> List[Tuple[List[int], List[int]]]: """ Creates a set of splits based on a resampling strategy provided @@ -214,100 +230,28 @@ def get_splits_from_resampling_strategy(self) -> List[Tuple[List[int], List[int] Returns (List[Tuple[List[int], List[int]]]): splits in the [train_indices, val_indices] format """ - splits = [] + # check if the requirements are met and if we can get splits + self._check_resampling_strategy_args() + + labels_to_stratify = self.train_tensors[-1] if self.is_stratify else None + kwargs: Dict[str, Any] = {} + kwargs.update( + random_state=self.random_state, + shuffle=self.shuffle_split, + indices=self._get_indices(), + labels_to_stratify=labels_to_stratify + ) + if isinstance(self.resampling_strategy, HoldoutValTypes): - val_share = DEFAULT_RESAMPLING_PARAMETERS[self.resampling_strategy].get( - 'val_share', None) - if self.resampling_strategy_args is not None: - val_share = self.resampling_strategy_args.get('val_share', val_share) - splits.append( - self.create_holdout_val_split( - holdout_val_type=self.resampling_strategy, - val_share=val_share, - ) - ) + val_share = self.resampling_strategy_args.get('val_share', None) + return self.resampling_strategy(val_share=val_share, **kwargs) + elif isinstance(self.resampling_strategy, CrossValTypes): - num_splits = DEFAULT_RESAMPLING_PARAMETERS[self.resampling_strategy].get( - 'num_splits', None) - if self.resampling_strategy_args is not None: - num_splits = self.resampling_strategy_args.get('num_splits', num_splits) - # Create the split if it was not created before - splits.extend( - self.create_cross_val_splits( - cross_val_type=self.resampling_strategy, - num_splits=cast(int, num_splits), - ) - ) + num_splits = self.resampling_strategy_args.get('num_splits', None) + return self.resampling_strategy(num_splits=num_splits, **kwargs) + else: raise ValueError(f"Unsupported resampling strategy={self.resampling_strategy}") - return splits - - def create_cross_val_splits( - self, - cross_val_type: CrossValTypes, - num_splits: int - ) -> List[Tuple[Union[List[int], np.ndarray], Union[List[int], np.ndarray]]]: - """ - This function creates the cross validation split for the given task. - - It is done once per dataset to have comparable results among pipelines - Args: - cross_val_type (CrossValTypes): - num_splits (int): number of splits to be created - - Returns: - (List[Tuple[Union[List[int], np.ndarray], Union[List[int], np.ndarray]]]): - list containing 'num_splits' splits. - """ - # Create just the split once - # This is gonna be called multiple times, because the current dataset - # is being used for multiple pipelines. That is, to be efficient with memory - # we dump the dataset to memory and read it on a need basis. So this function - # should be robust against multiple calls, and it does so by remembering the splits - if not isinstance(cross_val_type, CrossValTypes): - raise NotImplementedError(f'The selected `cross_val_type` "{cross_val_type}" is not implemented.') - kwargs = {} - if cross_val_type.is_stratified(): - # we need additional information about the data for stratification - kwargs["stratify"] = self.train_tensors[-1] - splits = self.cross_validators[cross_val_type.name]( - self.random_state, num_splits, self._get_indices(), **kwargs) - return splits - - def create_holdout_val_split( - self, - holdout_val_type: HoldoutValTypes, - val_share: float, - ) -> Tuple[np.ndarray, np.ndarray]: - """ - This function creates the holdout split for the given task. - - It is done once per dataset to have comparable results among pipelines - Args: - holdout_val_type (HoldoutValTypes): - val_share (float): share of the validation data - - Returns: - (Tuple[np.ndarray, np.ndarray]): Tuple containing (train_indices, val_indices) - """ - if holdout_val_type is None: - raise ValueError( - '`val_share` specified, but `holdout_val_type` not specified.' - ) - if self.val_tensors is not None: - raise ValueError( - '`val_share` specified, but the Dataset was a given a pre-defined split at initialization already.') - if val_share < 0 or val_share > 1: - raise ValueError(f"`val_share` must be between 0 and 1, got {val_share}.") - if not isinstance(holdout_val_type, HoldoutValTypes): - raise NotImplementedError(f'The specified `holdout_val_type` "{holdout_val_type}" is not supported.') - kwargs = {} - if holdout_val_type.is_stratified(): - # we need additional information about the data for stratification - kwargs["stratify"] = self.train_tensors[-1] - train, val = self.holdout_validators[holdout_val_type.name]( - self.random_state, val_share, self._get_indices(), **kwargs) - return train, val def get_dataset_for_training(self, split_id: int) -> Tuple[Dataset, Dataset]: """ diff --git a/autoPyTorch/datasets/image_dataset.py b/autoPyTorch/datasets/image_dataset.py index 4664dbaf5..6d915f513 100644 --- a/autoPyTorch/datasets/image_dataset.py +++ b/autoPyTorch/datasets/image_dataset.py @@ -42,10 +42,9 @@ class ImageDataset(BaseDataset): resampling_strategy (Union[CrossValTypes, HoldoutValTypes]), (default=HoldoutValTypes.holdout_validation): strategy to split the training data. - resampling_strategy_args (Optional[Dict[str, Any]]): arguments - required for the chosen resampling strategy. If None, uses - the default values provided in DEFAULT_RESAMPLING_PARAMETERS - in ```datasets/resampling_strategy.py```. + resampling_strategy_args (Optional[Dict[str, Any]]): + arguments required for the chosen resampling strategy. + The details are provided in autoPytorch/datasets/resampling_strategy.py shuffle: Whether to shuffle the data before performing splits seed (int), (default=1): seed to be used for reproducibility. train_transforms (Optional[torchvision.transforms.Compose]): diff --git a/autoPyTorch/datasets/resampling_strategy.py b/autoPyTorch/datasets/resampling_strategy.py index a1e599dd6..f031cd443 100644 --- a/autoPyTorch/datasets/resampling_strategy.py +++ b/autoPyTorch/datasets/resampling_strategy.py @@ -1,5 +1,5 @@ from enum import IntEnum -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import List, NamedTuple, Optional, Tuple, Union import numpy as np @@ -12,24 +12,14 @@ train_test_split ) -from typing_extensions import Protocol +from torch.utils.data import Dataset -# Use callback protocol as workaround, since callable with function fields count 'self' as argument -class CrossValFunc(Protocol): - def __call__(self, - random_state: np.random.RandomState, - num_splits: int, - indices: np.ndarray, - stratify: Optional[Any]) -> List[Tuple[np.ndarray, np.ndarray]]: - ... - - -class HoldOutFunc(Protocol): - def __call__(self, random_state: np.random.RandomState, val_share: float, - indices: np.ndarray, stratify: Optional[Any] - ) -> Tuple[np.ndarray, np.ndarray]: - ... +class _ResamplingStrategyArgs(NamedTuple): + val_share: float = 0.33 + num_splits: int = 5 + shuffle: bool = False + stratify: bool = False class CrossValTypes(IntEnum): @@ -47,162 +37,172 @@ class CrossValTypes(IntEnum): >>> for cross_val_type in CrossValTypes: print(cross_val_type.name, cross_val_type.value) - stratified_k_fold_cross_validation 1 - k_fold_cross_validation 2 - stratified_shuffle_split_cross_validation 3 - shuffle_split_cross_validation 4 - time_series_cross_validation 5 + k_fold_cross_validation 100 + time_series 101 """ - stratified_k_fold_cross_validation = 1 - k_fold_cross_validation = 2 - stratified_shuffle_split_cross_validation = 3 - shuffle_split_cross_validation = 4 - time_series_cross_validation = 5 + k_fold_cross_validation = 100 + time_series = 101 + + def __call__( + self, + indices: np.ndarray, + random_state: Optional[np.random.RandomState] = None, + num_splits: Optional[int] = None, + shuffle: bool = False, + labels_to_stratify: Optional[Union[Tuple[np.ndarray, np.ndarray], Dataset]] = None + ) -> List[Tuple[np.ndarray, np.ndarray]]: + """ + This function allows to call and type-check the specified function. + + Args: + random_state (np.random.RandomState): random number genetor for the reproducibility + num_splits (int): The number of splits in cross validation + indices (np.ndarray): The indices of data points in a dataset + shuffle (bool): If shuffle the indices or not + labels_to_stratify (Optional[Union[Tuple[np.ndarray, np.ndarray], Dataset]]): + The labels of the corresponding data points. It is used for the stratification. + + Returns: + splits (List[Tuple[np.ndarray, np.ndarray]]): + splits[a split identifier][0: train, 1: val][a data point identifier] + + """ - def is_stratified(self) -> bool: - stratified = [self.stratified_k_fold_cross_validation, - self.stratified_shuffle_split_cross_validation] - return getattr(self, self.name) in stratified + default_num_splits = _ResamplingStrategyArgs().num_splits + num_splits = num_splits if num_splits is not None else default_num_splits + split_fn = getattr(CrossValFuncs, self.name) + + return split_fn( + random_state=random_state if shuffle else None, + num_splits=num_splits, + indices=indices, + shuffle=shuffle, + labels_to_stratify=labels_to_stratify + ) class HoldoutValTypes(IntEnum): - """TODO: change to enum using functools.partial""" - """The type of hold out validation (refer to CrossValTypes' doc-string)""" - holdout_validation = 6 - stratified_holdout_validation = 7 - - def is_stratified(self) -> bool: - stratified = [self.stratified_holdout_validation] - return getattr(self, self.name) in stratified - - -# TODO: replace it with another way -RESAMPLING_STRATEGIES = [CrossValTypes, HoldoutValTypes] - -DEFAULT_RESAMPLING_PARAMETERS = { - HoldoutValTypes.holdout_validation: { - 'val_share': 0.33, - }, - HoldoutValTypes.stratified_holdout_validation: { - 'val_share': 0.33, - }, - CrossValTypes.k_fold_cross_validation: { - 'num_splits': 5, - }, - CrossValTypes.stratified_k_fold_cross_validation: { - 'num_splits': 5, - }, - CrossValTypes.shuffle_split_cross_validation: { - 'num_splits': 5, - }, - CrossValTypes.time_series_cross_validation: { - 'num_splits': 5, - }, -} # type: Dict[Union[HoldoutValTypes, CrossValTypes], Dict[str, Any]] - - -class HoldOutFuncs(): - @staticmethod - def holdout_validation(random_state: np.random.RandomState, - val_share: float, - indices: np.ndarray, - **kwargs: Any - ) -> Tuple[np.ndarray, np.ndarray]: - shuffle = kwargs.get('shuffle', True) - train, val = train_test_split(indices, test_size=val_share, - shuffle=shuffle, - random_state=random_state if shuffle else None, - ) - return train, val + """The type of holdout validation - @staticmethod - def stratified_holdout_validation(random_state: np.random.RandomState, - val_share: float, - indices: np.ndarray, - **kwargs: Any - ) -> Tuple[np.ndarray, np.ndarray]: - train, val = train_test_split(indices, test_size=val_share, shuffle=True, stratify=kwargs["stratify"], - random_state=random_state) - return train, val + This class is used to specify the holdout validation function + and is not supposed to be instantiated. - @classmethod - def get_holdout_validators(cls, *holdout_val_types: HoldoutValTypes) -> Dict[str, HoldOutFunc]: + Examples: This class is supposed to be used as follows + >>> holdout_type = HoldoutValTypes.holdout_validation + >>> print(holdout_type.name) - holdout_validators = { - holdout_val_type.name: getattr(cls, holdout_val_type.name) - for holdout_val_type in holdout_val_types - } - return holdout_validators + holdout_validation + >>> print(holdout_type.value) -class CrossValFuncs(): - @staticmethod - def shuffle_split_cross_validation(random_state: np.random.RandomState, - num_splits: int, - indices: np.ndarray, - **kwargs: Any - ) -> List[Tuple[np.ndarray, np.ndarray]]: - cv = ShuffleSplit(n_splits=num_splits, random_state=random_state) - splits = list(cv.split(indices)) - return splits + 0 - @staticmethod - def stratified_shuffle_split_cross_validation(random_state: np.random.RandomState, - num_splits: int, - indices: np.ndarray, - **kwargs: Any - ) -> List[Tuple[np.ndarray, np.ndarray]]: - cv = StratifiedShuffleSplit(n_splits=num_splits, random_state=random_state) - splits = list(cv.split(indices, kwargs["stratify"])) - return splits + >>> for holdout_type in HoldoutValTypes: + print(holdout_type.name) - @staticmethod - def stratified_k_fold_cross_validation(random_state: np.random.RandomState, - num_splits: int, - indices: np.ndarray, - **kwargs: Any - ) -> List[Tuple[np.ndarray, np.ndarray]]: - cv = StratifiedKFold(n_splits=num_splits, random_state=random_state) - splits = list(cv.split(indices, kwargs["stratify"])) - return splits + holdout_validation - @staticmethod - def k_fold_cross_validation(random_state: np.random.RandomState, - num_splits: int, - indices: np.ndarray, - **kwargs: Any - ) -> List[Tuple[np.ndarray, np.ndarray]]: + Additionally, HoldoutValTypes. can be called directly. + """ + + holdout_validation = 0 + + def __call__( + self, + indices: np.ndarray, + random_state: Optional[np.random.RandomState] = None, + val_share: Optional[float] = None, + shuffle: bool = False, + labels_to_stratify: Optional[Union[Tuple[np.ndarray, np.ndarray], Dataset]] = None + ) -> List[Tuple[np.ndarray, np.ndarray]]: """ - Standard k fold cross validation. + This function allows to call and type-check the specified function. Args: - indices (np.ndarray): array of indices to be split - num_splits (int): number of cross validation splits + random_state (np.random.RandomState): random number genetor for the reproducibility + val_share (float): The ratio of validation dataset vs the given dataset + indices (np.ndarray): The indices of data points in a dataset + shuffle (bool): If shuffle the indices or not + labels_to_stratify (Optional[Union[Tuple[np.ndarray, np.ndarray], Dataset]]): + The labels of the corresponding data points. It is used for the stratification. + Returns: + splits (List[Tuple[np.ndarray, np.ndarray]]): + splits[a split identifier][0: train, 1: val][a data point identifier] + + """ + + default_val_share = _ResamplingStrategyArgs().val_share + val_share = val_share if val_share is not None else default_val_share + split_fn = getattr(HoldoutFuncs, self.name) + + return split_fn( + random_state=random_state if shuffle else None, + val_share=val_share, + indices=indices, + shuffle=shuffle, + labels_to_stratify=labels_to_stratify + ) + + +class HoldoutFuncs(): + @staticmethod + def holdout_validation( + indices: np.ndarray, + random_state: Optional[np.random.RandomState] = None, + val_share: Optional[float] = None, + shuffle: bool = False, + labels_to_stratify: Optional[Union[Tuple[np.ndarray, np.ndarray], Dataset]] = None + ) -> List[Tuple[np.ndarray, np.ndarray]]: + + """ SKLearn requires shuffle=True for stratify """ + train, val = train_test_split( + indices, test_size=val_share, + shuffle=shuffle if labels_to_stratify is None else True, + random_state=random_state, + stratify=labels_to_stratify + ) + return [(train, val)] + + +class CrossValFuncs(): + # (shuffle, is_stratify) -> split_fn + _args2split_fn = { + (True, True): StratifiedShuffleSplit, + (True, False): ShuffleSplit, + (False, True): StratifiedKFold, + (False, False): KFold, + } + + @staticmethod + def k_fold_cross_validation( + indices: np.ndarray, + random_state: Optional[np.random.RandomState] = None, + num_splits: Optional[int] = None, + shuffle: bool = False, + labels_to_stratify: Optional[Union[Tuple[np.ndarray, np.ndarray], Dataset]] = None + ) -> List[Tuple[np.ndarray, np.ndarray]]: + """ Returns: splits (List[Tuple[List, List]]): list of tuples of training and validation indices """ - shuffle = kwargs.get('shuffle', True) - cv = KFold(n_splits=num_splits, random_state=random_state if shuffle else None, shuffle=shuffle) + + split_fn = CrossValFuncs._args2split_fn[(shuffle, labels_to_stratify is not None)] + cv = split_fn(n_splits=num_splits, random_state=random_state) splits = list(cv.split(indices)) return splits @staticmethod - def time_series_cross_validation(random_state: np.random.RandomState, - num_splits: int, - indices: np.ndarray, - **kwargs: Any - ) -> List[Tuple[np.ndarray, np.ndarray]]: + def time_series( + indices: np.ndarray, + random_state: Optional[np.random.RandomState] = None, + num_splits: Optional[int] = None, + shuffle: bool = False, + labels_to_stratify: Optional[Union[Tuple[np.ndarray, np.ndarray], Dataset]] = None + ) -> List[Tuple[np.ndarray, np.ndarray]]: """ Returns train and validation indices respecting the temporal ordering of the data. - Args: - indices (np.ndarray): array of indices to be split - num_splits (int): number of cross validation splits - - Returns: - splits (List[Tuple[List, List]]): list of tuples of training and validation indices - Examples: >>> indices = np.array([0, 1, 2, 3]) >>> CrossValFuncs.time_series_cross_validation(3, indices) @@ -211,14 +211,6 @@ def time_series_cross_validation(random_state: np.random.RandomState, ([0, 1, 2], [3])] """ - cv = TimeSeriesSplit(n_splits=num_splits, random_state=random_state) + cv = TimeSeriesSplit(n_splits=num_splits) splits = list(cv.split(indices)) return splits - - @classmethod - def get_cross_validators(cls, *cross_val_types: CrossValTypes) -> Dict[str, CrossValFunc]: - cross_validators = { - cross_val_type.name: getattr(cls, cross_val_type.name) - for cross_val_type in cross_val_types - } - return cross_validators diff --git a/autoPyTorch/datasets/tabular_dataset.py b/autoPyTorch/datasets/tabular_dataset.py index 19e483612..3516e585e 100644 --- a/autoPyTorch/datasets/tabular_dataset.py +++ b/autoPyTorch/datasets/tabular_dataset.py @@ -47,10 +47,9 @@ class TabularDataset(BaseDataset): resampling_strategy (Union[CrossValTypes, HoldoutValTypes]), (default=HoldoutValTypes.holdout_validation): strategy to split the training data. - resampling_strategy_args (Optional[Dict[str, Any]]): arguments - required for the chosen resampling strategy. If None, uses - the default values provided in DEFAULT_RESAMPLING_PARAMETERS - in ```datasets/resampling_strategy.py```. + resampling_strategy_args (Optional[Dict[str, Any]]): + arguments required for the chosen resampling strategy. + The details are provided in autoPytorch/datasets/resampling_strategy.py shuffle: Whether to shuffle the data before performing splits seed (int), (default=1): seed to be used for reproducibility. train_transforms (Optional[torchvision.transforms.Compose]): diff --git a/autoPyTorch/datasets/time_series_dataset.py b/autoPyTorch/datasets/time_series_dataset.py index edd07a80e..2e143bf20 100644 --- a/autoPyTorch/datasets/time_series_dataset.py +++ b/autoPyTorch/datasets/time_series_dataset.py @@ -5,18 +5,33 @@ import torchvision.transforms from autoPyTorch.datasets.base_dataset import BaseDataset -from autoPyTorch.datasets.resampling_strategy import ( - CrossValFuncs, - CrossValTypes, - HoldOutFuncs, - HoldoutValTypes -) +from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutValTypes TIME_SERIES_FORECASTING_INPUT = Tuple[np.ndarray, np.ndarray] # currently only numpy arrays are supported TIME_SERIES_REGRESSION_INPUT = Tuple[np.ndarray, np.ndarray] TIME_SERIES_CLASSIFICATION_INPUT = Tuple[np.ndarray, np.ndarray] +def _check_prohibited_resampling() -> None: + """Check if resampling strategy is suitable for a given task + + Args: + task_name (str): Typically the Dataset class name + resampling_strategy (Union[CrossValTypes, HoldoutValTypes]): + The splitting function + args (Union[CrossValTypes, HoldoutValTypes]): + The list of cross validation functions and + holdout validation functions that are suitable for the given task + + Returns: + None + + TODO: Especially, reject shuffle splits + """ + + pass + + class TimeSeriesForecastingDataset(BaseDataset): def __init__(self, target_variables: Tuple[int], @@ -60,8 +75,6 @@ def __init__(self, train_transforms=train_transforms, val_transforms=val_transforms, ) - self.cross_validators = CrossValFuncs.get_cross_validators(CrossValTypes.time_series_cross_validation) - self.holdout_validators = HoldOutFuncs.get_holdout_validators(HoldoutValTypes.holdout_validation) def _check_time_series_forecasting_inputs(target_variables: Tuple[int], @@ -95,8 +108,8 @@ def _prepare_time_series_forecasting_tensor(tensor: TIME_SERIES_FORECASTING_INPU population_size, time_series_length, num_features = tensor[0].shape num_targets = len(target_variables) num_datapoints = time_series_length - sequence_length - n_steps + 1 - x_tensor = np.zeros((num_datapoints, population_size, sequence_length, num_features), dtype=np.float) - y_tensor = np.zeros((num_datapoints, population_size, num_targets), dtype=np.float) + x_tensor = np.zeros((num_datapoints, population_size, sequence_length, num_features), dtype=np.float64) + y_tensor = np.zeros((num_datapoints, population_size, num_targets), dtype=np.float64) for p in range(population_size): for i in range(num_datapoints): @@ -117,16 +130,6 @@ def __init__(self, val=val, task_type="time_series_classification") super().__init__(train_tensors=train, val_tensors=val, shuffle=True) - self.cross_validators = CrossValFuncs.get_cross_validators( - CrossValTypes.stratified_k_fold_cross_validation, - CrossValTypes.k_fold_cross_validation, - CrossValTypes.shuffle_split_cross_validation, - CrossValTypes.stratified_shuffle_split_cross_validation - ) - self.holdout_validators = HoldOutFuncs.get_holdout_validators( - HoldoutValTypes.holdout_validation, - HoldoutValTypes.stratified_holdout_validation - ) class TimeSeriesRegressionDataset(BaseDataset): @@ -135,13 +138,6 @@ def __init__(self, train: Tuple[np.ndarray, np.ndarray], val: Optional[Tuple[np. val=val, task_type="time_series_regression") super().__init__(train_tensors=train, val_tensors=val, shuffle=True) - self.cross_validators = CrossValFuncs.get_cross_validators( - CrossValTypes.k_fold_cross_validation, - CrossValTypes.shuffle_split_cross_validation - ) - self.holdout_validators = HoldOutFuncs.get_holdout_validators( - HoldoutValTypes.holdout_validation - ) def _check_time_series_inputs(task_type: str, diff --git a/autoPyTorch/optimizer/smbo.py b/autoPyTorch/optimizer/smbo.py index ddd6e95a1..e1e070228 100644 --- a/autoPyTorch/optimizer/smbo.py +++ b/autoPyTorch/optimizer/smbo.py @@ -20,7 +20,6 @@ from autoPyTorch.datasets.base_dataset import BaseDataset from autoPyTorch.datasets.resampling_strategy import ( CrossValTypes, - DEFAULT_RESAMPLING_PARAMETERS, HoldoutValTypes, ) from autoPyTorch.ensemble.ensemble_builder import EnsembleBuilderManager @@ -173,9 +172,7 @@ def __init__(self, # Evaluation self.resampling_strategy = resampling_strategy - if resampling_strategy_args is None: - resampling_strategy_args = DEFAULT_RESAMPLING_PARAMETERS[resampling_strategy] - self.resampling_strategy_args = resampling_strategy_args + self.resampling_strategy_args = resampling_strategy_args if resampling_strategy_args is not None else {} # and a bunch of useful limits self.worst_possible_result = get_cost_of_crash(self.metric) diff --git a/examples/40_advanced/example_resampling_strategy.py b/examples/40_advanced/example_resampling_strategy.py index 270f518c8..e0b1ec77a 100644 --- a/examples/40_advanced/example_resampling_strategy.py +++ b/examples/40_advanced/example_resampling_strategy.py @@ -115,9 +115,9 @@ api = TabularClassificationTask( # For demonstration purposes, we use # Stratified hold out validation. However, - # one can also use CrossValTypes.stratified_k_fold_cross_validation. - resampling_strategy=HoldoutValTypes.stratified_holdout_validation, - resampling_strategy_args={'val_share': 0.33} + # one can also use CrossValTypes.k_fold_cross_validation. + resampling_strategy=HoldoutValTypes.holdout_validation, + resampling_strategy_args={'val_share': 0.33, 'stratify': True} ) ############################################################################ diff --git a/test/test_evaluation/test_train_evaluator.py b/test/test_evaluation/test_train_evaluator.py index ae35c097b..e0b0b74f0 100644 --- a/test/test_evaluation/test_train_evaluator.py +++ b/test/test_evaluation/test_train_evaluator.py @@ -112,7 +112,7 @@ def test_holdout(self, pipeline_mock): self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(evaluator.file_output.call_count, 1) - self.assertEqual(result, 0.5652173913043479) + self.assertEqual(result, 0.4782608695652174) self.assertEqual(pipeline_mock.fit.call_count, 1) # 3 calls because of train, holdout and test set self.assertEqual(pipeline_mock.predict_proba.call_count, 3) @@ -150,7 +150,7 @@ def test_cv(self, pipeline_mock): self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1) self.assertEqual(evaluator.file_output.call_count, 1) - self.assertEqual(result, 0.46235467431119603) + self.assertEqual(result, 0.4651019270584489) self.assertEqual(pipeline_mock.fit.call_count, 5) # 9 calls because of the training, holdout and # test set (3 sets x 5 folds = 15)