Skip to content

Commit

Permalink
[back] Get back to the original name to make the PR review procedure …
Browse files Browse the repository at this point in the history
…easier
  • Loading branch information
nabenabe0928 committed May 10, 2021
1 parent 1e00413 commit 1e82b21
Show file tree
Hide file tree
Showing 13 changed files with 67 additions and 67 deletions.
6 changes: 3 additions & 3 deletions autoPyTorch/api/base_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
STRING_TO_TASK_TYPES,
)
from autoPyTorch.datasets.base_dataset import BaseDataset
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutTypes
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutValTypes
from autoPyTorch.ensemble.ensemble_builder import EnsembleBuilderManager
from autoPyTorch.ensemble.ensemble_selection import EnsembleSelection
from autoPyTorch.ensemble.singlebest_ensemble import SingleBest
Expand Down Expand Up @@ -138,7 +138,7 @@ def __init__(
include_components: Optional[Dict] = None,
exclude_components: Optional[Dict] = None,
backend: Optional[Backend] = None,
resampling_strategy: Union[CrossValTypes, HoldoutTypes] = HoldoutTypes.holdout,
resampling_strategy: Union[CrossValTypes, HoldoutValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: Optional[Dict[str, Any]] = None,
search_space_updates: Optional[HyperparameterSearchSpaceUpdates] = None,
task_type: Optional[str] = None
Expand Down Expand Up @@ -1171,7 +1171,7 @@ def predict(
assert self.ensemble_ is not None, "Load models should error out if no ensemble"
self.ensemble_ = cast(Union[SingleBest, EnsembleSelection], self.ensemble_)

if isinstance(self.resampling_strategy, HoldoutTypes):
if isinstance(self.resampling_strategy, HoldoutValTypes):
models = self.models_
elif isinstance(self.resampling_strategy, CrossValTypes):
models = self.cv_models_
Expand Down
4 changes: 2 additions & 2 deletions autoPyTorch/api/tabular_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from autoPyTorch.datasets.base_dataset import BaseDataset
from autoPyTorch.datasets.resampling_strategy import (
CrossValTypes,
HoldoutTypes,
HoldoutValTypes,
)
from autoPyTorch.datasets.tabular_dataset import TabularDataset
from autoPyTorch.pipeline.tabular_classification import TabularClassificationPipeline
Expand Down Expand Up @@ -72,7 +72,7 @@ def __init__(
delete_output_folder_after_terminate: bool = True,
include_components: Optional[Dict] = None,
exclude_components: Optional[Dict] = None,
resampling_strategy: Union[CrossValTypes, HoldoutTypes] = HoldoutTypes.holdout,
resampling_strategy: Union[CrossValTypes, HoldoutValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: Optional[Dict[str, Any]] = None,
backend: Optional[Backend] = None,
search_space_updates: Optional[HyperparameterSearchSpaceUpdates] = None
Expand Down
4 changes: 2 additions & 2 deletions autoPyTorch/api/tabular_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from autoPyTorch.datasets.base_dataset import BaseDataset
from autoPyTorch.datasets.resampling_strategy import (
CrossValTypes,
HoldoutTypes,
HoldoutValTypes,
)
from autoPyTorch.datasets.tabular_dataset import TabularDataset
from autoPyTorch.pipeline.tabular_regression import TabularRegressionPipeline
Expand Down Expand Up @@ -64,7 +64,7 @@ def __init__(
delete_output_folder_after_terminate: bool = True,
include_components: Optional[Dict] = None,
exclude_components: Optional[Dict] = None,
resampling_strategy: Union[CrossValTypes, HoldoutTypes] = HoldoutTypes.holdout,
resampling_strategy: Union[CrossValTypes, HoldoutValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: Optional[Dict[str, Any]] = None,
backend: Optional[Backend] = None,
search_space_updates: Optional[HyperparameterSearchSpaceUpdates] = None
Expand Down
12 changes: 6 additions & 6 deletions autoPyTorch/datasets/base_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import torchvision

from autoPyTorch.constants import CLASSIFICATION_OUTPUTS, STRING_TO_OUTPUT_TYPES
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutTypes
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutValTypes
from autoPyTorch.utils.common import FitRequirement

BaseDatasetInputType = Union[Tuple[np.ndarray, np.ndarray], Dataset]
Expand Down Expand Up @@ -69,7 +69,7 @@ def __init__(
dataset_name: Optional[str] = None,
val_tensors: Optional[BaseDatasetInputType] = None,
test_tensors: Optional[BaseDatasetInputType] = None,
resampling_strategy: Union[CrossValTypes, HoldoutTypes] = HoldoutTypes.holdout,
resampling_strategy: Union[CrossValTypes, HoldoutValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: Optional[Dict[str, Any]] = None,
seed: Optional[int] = 42,
train_transforms: Optional[torchvision.transforms.Compose] = None,
Expand All @@ -85,8 +85,8 @@ def __init__(
validation data
test_tensors (An optional tuple of objects that have a __len__ and a __getitem__ attribute):
test data
resampling_strategy (Union[CrossValTypes, HoldoutTypes]),
(default=HoldoutTypes.holdout):
resampling_strategy (Union[CrossValTypes, HoldoutValTypes]),
(default=HoldoutValTypes.holdout_validation):
strategy to split the training data.
resampling_strategy_args (Optional[Dict[str, Any]]):
arguments required for the chosen resampling strategy.
Expand Down Expand Up @@ -196,7 +196,7 @@ def _get_indices(self) -> np.ndarray:

def _process_resampling_strategy_args(self) -> None:
if not any(isinstance(self.resampling_strategy, val_type)
for val_type in [HoldoutTypes, CrossValTypes]):
for val_type in [HoldoutValTypes, CrossValTypes]):
raise ValueError(f"resampling_strategy {self.resampling_strategy} is not supported.")

if self.resampling_strategy_args is not None and \
Expand Down Expand Up @@ -229,7 +229,7 @@ def get_splits_from_resampling_strategy(self) -> List[Tuple[List[int], List[int]

labels_to_stratify = self.train_tensors[-1] if self.is_stratify else None

if isinstance(self.resampling_strategy, HoldoutTypes):
if isinstance(self.resampling_strategy, HoldoutValTypes):
val_share = self.resampling_strategy_args['val_share']

return self.resampling_strategy(
Expand Down
8 changes: 4 additions & 4 deletions autoPyTorch/datasets/image_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from autoPyTorch.datasets.base_dataset import BaseDataset
from autoPyTorch.datasets.resampling_strategy import (
CrossValTypes,
HoldoutTypes,
HoldoutValTypes,
)

IMAGE_DATASET_INPUT = Union[Dataset, Tuple[Union[np.ndarray, List[str]], np.ndarray]]
Expand All @@ -39,8 +39,8 @@ class ImageDataset(BaseDataset):
validation data
test (Union[Dataset, Tuple[Union[np.ndarray, List[str]], np.ndarray]]):
testing data
resampling_strategy (Union[CrossValTypes, HoldoutTypes]),
(default=HoldoutTypes.holdout):
resampling_strategy (Union[CrossValTypes, HoldoutValTypes]),
(default=HoldoutValTypes.holdout_validation):
strategy to split the training data.
resampling_strategy_args (Optional[Dict[str, Any]]):
arguments required for the chosen resampling strategy.
Expand All @@ -56,7 +56,7 @@ def __init__(self,
train: IMAGE_DATASET_INPUT,
val: Optional[IMAGE_DATASET_INPUT] = None,
test: Optional[IMAGE_DATASET_INPUT] = None,
resampling_strategy: Union[CrossValTypes, HoldoutTypes] = HoldoutTypes.holdout,
resampling_strategy: Union[CrossValTypes, HoldoutValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: Optional[Dict[str, Any]] = None,
seed: Optional[int] = 42,
train_transforms: Optional[torchvision.transforms.Compose] = None,
Expand Down
28 changes: 14 additions & 14 deletions autoPyTorch/datasets/resampling_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class _ResamplingStrategyArgs(NamedTuple):

class HoldoutFuncs():
@staticmethod
def holdout(
def holdout_validation(
random_state: np.random.RandomState,
val_share: float,
indices: np.ndarray,
Expand All @@ -51,7 +51,7 @@ class CrossValFuncs():
}

@staticmethod
def k_fold(
def k_fold_cross_validation(
random_state: np.random.RandomState,
num_splits: int,
indices: np.ndarray,
Expand Down Expand Up @@ -106,18 +106,18 @@ class CrossValTypes(Enum):
and is not supposed to be instantiated.
Examples: This class is supposed to be used as follows
>>> cv_type = CrossValTypes.k_fold
>>> cv_type = CrossValTypes.k_fold_cross_validation
>>> print(cv_type.name)
k_fold
k_fold_cross_validation
>>> for cross_val_type in CrossValTypes:
print(cross_val_type.name, cross_val_type.value)
k_fold functools.partial(<function CrossValFuncs.k_fold at ...>)
k_fold_cross_validation functools.partial(<function CrossValFuncs.k_fold_cross_validation at ...>)
time_series <function CrossValFuncs.time_series>
"""
k_fold = partial(CrossValFuncs.k_fold)
k_fold_cross_validation = partial(CrossValFuncs.k_fold_cross_validation)
time_series = partial(CrossValFuncs.time_series)

def __call__(
Expand Down Expand Up @@ -153,31 +153,31 @@ def __call__(
)


class HoldoutTypes(Enum):
class HoldoutValTypes(Enum):
"""The type of holdout validation
This class is used to specify the holdout validation function
and is not supposed to be instantiated.
Examples: This class is supposed to be used as follows
>>> holdout_type = HoldoutTypes.holdout
>>> holdout_type = HoldoutValTypes.holdout_validation
>>> print(holdout_type.name)
holdout
holdout_validation
>>> print(holdout_type.value)
functools.partial(<function HoldoutTypes.holdout at ...>)
functools.partial(<function HoldoutValTypes.holdout_validation at ...>)
>>> for holdout_type in HoldoutTypes:
>>> for holdout_type in HoldoutValTypes:
print(holdout_type.name)
holdout
holdout_validation
Additionally, HoldoutTypes.<function> can be called directly.
Additionally, HoldoutValTypes.<function> can be called directly.
"""

holdout = partial(HoldoutFuncs.holdout)
holdout = partial(HoldoutFuncs.holdout_validation)

def __call__(
self,
Expand Down
8 changes: 4 additions & 4 deletions autoPyTorch/datasets/tabular_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from autoPyTorch.datasets.base_dataset import BaseDataset
from autoPyTorch.datasets.resampling_strategy import (
CrossValTypes,
HoldoutTypes,
HoldoutValTypes,
)


Expand All @@ -44,8 +44,8 @@ class TabularDataset(BaseDataset):
Y (Union[np.ndarray, pd.Series]): training data targets.
X_test (Optional[Union[np.ndarray, pd.DataFrame]]): input testing data.
Y_test (Optional[Union[np.ndarray, pd.DataFrame]]): testing data targets
resampling_strategy (Union[CrossValTypes, HoldoutTypes]),
(default=HoldoutTypes.holdout):
resampling_strategy (Union[CrossValTypes, HoldoutValTypes]),
(default=HoldoutValTypes.holdout_validation):
strategy to split the training data.
resampling_strategy_args (Optional[Dict[str, Any]]):
arguments required for the chosen resampling strategy.
Expand All @@ -66,7 +66,7 @@ def __init__(self,
Y: Union[np.ndarray, pd.Series],
X_test: Optional[Union[np.ndarray, pd.DataFrame]] = None,
Y_test: Optional[Union[np.ndarray, pd.DataFrame]] = None,
resampling_strategy: Union[CrossValTypes, HoldoutTypes] = HoldoutTypes.holdout,
resampling_strategy: Union[CrossValTypes, HoldoutValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: Optional[Dict[str, Any]] = None,
seed: Optional[int] = 42,
train_transforms: Optional[torchvision.transforms.Compose] = None,
Expand Down
8 changes: 4 additions & 4 deletions autoPyTorch/datasets/time_series_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torchvision.transforms

from autoPyTorch.datasets.base_dataset import BaseDataset
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutTypes
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutValTypes

TIME_SERIES_FORECASTING_INPUT = Tuple[np.ndarray, np.ndarray] # currently only numpy arrays are supported
TIME_SERIES_REGRESSION_INPUT = Tuple[np.ndarray, np.ndarray]
Expand All @@ -17,9 +17,9 @@ def _check_prohibited_resampling() -> None:
Args:
task_name (str): Typically the Dataset class name
resampling_strategy (Union[CrossValTypes, HoldoutTypes]):
resampling_strategy (Union[CrossValTypes, HoldoutValTypes]):
The splitting function
args (Union[CrossValTypes, HoldoutTypes]):
args (Union[CrossValTypes, HoldoutValTypes]):
The list of cross validation functions and
holdout validation functions that are suitable for the given task
Expand All @@ -39,7 +39,7 @@ def __init__(self,
n_steps: int,
train: TIME_SERIES_FORECASTING_INPUT,
val: Optional[TIME_SERIES_FORECASTING_INPUT] = None,
resampling_strategy: Union[CrossValTypes, HoldoutTypes] = HoldoutTypes.holdout,
resampling_strategy: Union[CrossValTypes, HoldoutValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: Optional[Dict[str, Any]] = None,
seed: Optional[int] = 42,
train_transforms: Optional[torchvision.transforms.Compose] = None,
Expand Down
4 changes: 2 additions & 2 deletions autoPyTorch/optimizer/smbo.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from autoPyTorch.datasets.base_dataset import BaseDataset
from autoPyTorch.datasets.resampling_strategy import (
CrossValTypes,
HoldoutTypes,
HoldoutValTypes,
)
from autoPyTorch.ensemble.ensemble_builder import EnsembleBuilderManager
from autoPyTorch.evaluation.tae import ExecuteTaFuncWithQueue, get_cost_of_crash
Expand Down Expand Up @@ -92,7 +92,7 @@ def __init__(self,
pipeline_config: typing.Dict[str, typing.Any],
start_num_run: int = 1,
seed: int = 1,
resampling_strategy: typing.Union[HoldoutTypes, CrossValTypes] = HoldoutTypes.holdout,
resampling_strategy: typing.Union[HoldoutValTypes, CrossValTypes] = HoldoutValTypes.holdout_validation,
resampling_strategy_args: typing.Optional[typing.Dict[str, typing.Any]] = None,
include: typing.Optional[typing.Dict[str, typing.Any]] = None,
exclude: typing.Optional[typing.Dict[str, typing.Any]] = None,
Expand Down
12 changes: 6 additions & 6 deletions examples/tabular/40_advanced/example_resampling_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import sklearn.model_selection

from autoPyTorch.api.tabular_classification import TabularClassificationTask
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutTypes
from autoPyTorch.datasets.resampling_strategy import CrossValTypes, HoldoutValTypes


if __name__ == '__main__':
Expand All @@ -48,11 +48,11 @@
# To maintain logs of the run, set the next two as False
delete_tmp_folder_after_terminate=True,
delete_output_folder_after_terminate=True,
# 'HoldoutTypes.holdout' with 'val_share': 0.33
# 'HoldoutValTypes.holdout_validation' with 'val_share': 0.33
# is the default argument setting for TabularClassificationTask.
# It is explicitly specified in this example for demonstrational
# purpose.
resampling_strategy=HoldoutTypes.holdout,
resampling_strategy=HoldoutValTypes.holdout_validation,
resampling_strategy_args={'val_share': 0.33}
)

Expand Down Expand Up @@ -90,7 +90,7 @@
# To maintain logs of the run, set the next two as False
delete_tmp_folder_after_terminate=True,
delete_output_folder_after_terminate=True,
resampling_strategy=CrossValTypes.k_fold,
resampling_strategy=CrossValTypes.k_fold_cross_validation,
resampling_strategy_args={'num_splits': 3}
)

Expand Down Expand Up @@ -130,8 +130,8 @@
delete_output_folder_after_terminate=True,
# For demonstration purposes, we use
# Stratified hold out validation. However,
# one can also use CrossValTypes.k_fold.
resampling_strategy=HoldoutTypes.holdout,
# one can also use CrossValTypes.k_fold_cross_validation.
resampling_strategy=HoldoutValTypes.holdout_validation,
resampling_strategy_args={'val_share': 0.33, 'stratify': True}
)

Expand Down
Loading

0 comments on commit 1e82b21

Please sign in to comment.