diff --git a/composer/datasets/streaming/download.py b/composer/datasets/streaming/download.py index 1652759a72..37241a545a 100644 --- a/composer/datasets/streaming/download.py +++ b/composer/datasets/streaming/download.py @@ -37,7 +37,7 @@ def get_object_store(remote: str) -> ObjectStore: elif remote.startswith('sftp://'): return _get_sftp_object_store(remote) else: - raise ValueError('unsupported upload scheme') + raise ValueError('unsupported download scheme') def _get_s3_object_store(remote: str) -> S3ObjectStore: @@ -62,9 +62,6 @@ def _get_sftp_object_store(remote: str) -> SFTPObjectStore: return object_store -__all__ = ['download_or_wait'] - - def download_from_local(remote: str, local: str) -> None: """Download a file from remote to local. diff --git a/docs/Makefile b/docs/Makefile index afadbdeb1e..3dbfde8033 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -7,19 +7,26 @@ SPHINXOPTS ?= -W --keep-going SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = _build +PYTHON_BIN ?= python +HTML_BUILDDIR ?= $(BUILDDIR)/_html +HTTP_MODULE = http.server # Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) +clean: + @$(SPHINXBUILD) -M clean "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + rm -rf $(SOURCEDIR)/api_reference doctest: @$(SPHINXBUILD) -M doctest "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) -q $(O) -clean: - rm -rf $(BUILDDIR)/* - rm -rf $(SOURCEDIR)/api_reference +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +host: + @$(PYTHON_BIN) -m $(HTTP_MODULE) --directory $(HTML_BUILDDIR) + -.PHONY: help Makefile clean doctest +.PHONY: clean doctest help host Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). diff --git a/docs/source/index.rst b/docs/source/index.rst index 77805e62db..a74c8b11cb 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -72,6 +72,7 @@ Composer is part of the broader Machine Learning community, and we welcome any c examples/exporting_for_inference.ipynb examples/glue/glue_entrypoint.ipynb examples/TPU_Training_in_composer.ipynb + tutorials/train_resnet50_on_aws.md .. toctree:: :hidden: diff --git a/docs/source/tutorials/train_resnet50_on_aws.md b/docs/source/tutorials/train_resnet50_on_aws.md new file mode 100644 index 0000000000..0cffcbaa7b --- /dev/null +++ b/docs/source/tutorials/train_resnet50_on_aws.md @@ -0,0 +1,182 @@ +# ☁️ Train ResNet-50 on AWS + +Composer is a PyTorch library that accelerates training for deep learning models while improving quality at significantly lower cost. Composer makes it possible to train ResNet-50 on the ImageNet dataset to the standard ***76.6% top-1 accuracy*** in ***27 minutes*** on an AWS EC2 instance for a mere ***$15***. In this tutorial we’ll teach you how simple it is to do this yourself! + +The starting point for this tutorial are the training recipes we present in our [Mosaic ResNet blog post](https://www.mosaicml.com/blog/mosaic-resnet). We’ll walk through: + +1. Launching an AWS EC2 instance capable of running GPU training +1. Configuring your AWS EC2 instance to run Composer with our pre-built Docker images +1. Running Composer training using the ResNet-50 Mild recipe introduced in our [blog post](https://www.mosaicml.com/blog/mosaic-resnet) + +## Prerequisites + +- AWS account with permissions to: + - Create/manage EC2 instances, EBS volumes + - Create/manage Security Groups, Key Pairs (alternatively, IT admin provided) +- AWS quota to create [Accelerated Computing](https://aws.amazon.com/ec2/instance-types/?trk=36c6da98-7b20-48fa-8225-4784bced9843&sc_channel=ps&sc_campaign=acquisition&sc_medium=ACQ-P|PS-GO|Brand|Desktop|SU|Compute|EC2|US|EN|Text&s_kwcid=AL!4422!3!536392622533!e!!g!!aws%20instance%20types&ef_id=CjwKCAjwi8iXBhBeEiwAKbUofUpKM9nHToU9fsBJKApR3ccQzKs3LxSJ97PKiW5SvFRFwW6BnYP5xxoCOTEQAvD_BwE:G:s&s_kwcid=AL!4422!3!536392622533!e!!g!!aws%20instance%20types#Accelerated_Computing) EC2 instances + + ```{eval-rst} + .. note:: + + We use a `p4d.24xlarge` instance in this tutorial. However these steps should run on any P-type EC2 instance. + + ``` + +- Download the latest [Imagenet Dataset](http://www.image-net.org/) + + ```{eval-rst} + .. note:: + + Due to the challenges associated with distributing ImageNet, we assume users to provide their own version of this dataset for the purpose of this tutorial. + + ``` + +- [MosaicML's ResNet-50 Recipes Docker Image](https://hub.docker.com/r/mosaicml/pytorch_vision/tags) + - Tag: `mosaicml/pytorch_vision:resnet50_recipes` + - The image comes pre-configured with the following dependencies: + - Mosaic ResNet Training recipes + - Training entrypoint: `train.py` + - Composer Version: [0.9.0](https://github.com/mosaicml/composer/tree/v0.9.0) + - PyTorch Version: 1.11.0 + - CUDA Version: 11.3 + - Python Version: 3.9 + - Ubuntu Version: 20.04 + +## Launching an AWS EC2 Instance + +First let’s create an EC2 instance that we can run GPU training on. + +1. Login to your AWS account and open the Management Console +1. For the purposes of this material, we will configure and launch a new `p4d.24xlarge` instance. On your `EC2 Dashboard` click the `Launch instance` button. + + ![Launch Instance](https://storage.googleapis.com/docs.mosaicml.com/images/tutorials/launch_instance.png) + +1. Name your instance and select an AMI, Instance type, Key pair and Network settings. The following settings were used for this tutorial, customize as required depending on your AWS setup and IT requirements: + - Name: `composer-r50-demo-a100x8` + - Amazon Machine Image (AMI): `Deep Learning AMI GPU PyTorch 1.12.0 (Amazon Linux 2)` + - Instance type: `p4d.24xlarge` + - Key pair: `Create key pair` (make sure to note where you save the private key) + - Key pair name: `composer_demo` + - Key pair type: `RSA` + - Private key format: `.pem` + - Network settings: Use defaults + - Storage (volumes): + + ![EBS Configuration](https://storage.googleapis.com/docs.mosaicml.com/images/tutorials/configure_ebs.png) + +1. Click `Launch instance`! + +## Configuring your AWS EC2 instance + +Next we will connect to our newly launched `p4d.24xlarge` instance, perform some basic system configuration to optimize our runtime environment and setup our dataset area. + +1. Navigate back to the `Instances` page in your AWS console. Click on the running instance you just launched and in the Details pane, copy the instance’s `Public IPv4 DNS` address. You will need this value to connect to the instance. +1. Using the private key you downloaded during the launch configuration and the instance’s public DNS address, connect to the system using SSH: + + + ```bash + ssh -i ec2-user@ + ``` + + For example, + + + ```bash + ssh -i ~/composer_demo.pem ec2-user@ec2-11-222-333-44.us-west-2.compute.amazon.com + ``` + +1. Now let’s create a `datasets` area to place the ImageNet data as follows: + + + ```bash + sudo mkdir -p /datasets/ImageNet + sudo chmod -R 777 /datasets + ``` + + (Optional )If the EC2 instance you selected comes direct attached [Instance Store Volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/add-instance-store-volumes.html), it can be mounted as follows: + + + ```bash + sudo mkfs -t xfs /dev/nvme1n1 + sudo mkdir ImageNet + sudo mount /dev/nvme1n1 /ImageNet + sudo chmod 777 ImageNet/ + ``` + + Instance Store Volumes (ISV) generally have better performance than EBS volumes since they are directly attached to the instance, at the expense of persistence. Thus Instance Store Volumes are ephemeral and any data stored on these volumes will be inaccessible after the instance is powered off. + + Regardless of whether you choose to use an EBS volume or Instance Store Volume to host your dataset, the ImageNet data can be copied to the `/datasets/Imagenet` folder. In our example, the directory tree under `/datasets` looks as follows: + + + ```bash + [ec2-user@ip-172-31-0-30 /]$ find ./datasets/ -maxdepth 2 + ./datasets/ + ./datasets/imagenet_files.tar + ./datasets/ImageNet + ./datasets/ImageNet/train + ./datasets/ImageNet/val + ``` + + Once you populate the dataset area, you’ll be ready to start training! + +## Train ResNet-50 on ImageNet + +Now that we have launched an EC2 instance, configured the runtime and populated the dataset area, we are ready to kick off training. + +1. Pull and run the `mosaicml/pytorch_vision:resnet50_recipes` Docker image. The image contains everything required to train including: pre-installed Composer, package dependencies, training entrypoint and recipe configuration files. + + + ```bash + docker run -it -v /datasets:/datasets --gpus all --shm-size 1g mosaicml/pytorch_vision:resnet50_recipes + ``` + + ```{eval-rst} + .. note:: + + The default shared memory size of a Docker container is typically too small for larger datasets. In this example, increasing the shared memory size to 1GB is usually sufficient. + + ``` + +2. Run ResNet-50 Training using the Mild recipe! + + + ```bash + composer train.py -f recipes/resnet50_mild.yaml --scale_schedule_ratio 0.36 \ + --train_dataset.imagenet.ffcv_dir /datasets/ImageNet/ffcv \ + --val_dataset.imagenet.ffcv_dir /datasets/ImageNet/ffcv + ``` + + ```{eval-rst} + .. note:: + + The ResNet-50 Mild and Medium recipes utilize the very efficient and high performing [FFCV dataloader](https://ffcv.io/), requiring the raw ImageNet data to be processed into FFCV format. Composer can automatically perform this step for you prior to launching the training run, simply append the following command line arguments to the training command above: + `--train_dataset.imagenet.datadir /datasets/ImageNet/` + `--val_dataset.imagenet.datadir /datasets/ImageNet/` + `--train_dataset.imagenet.ffcv_write_dataset` + --val_dataset.imagenet.ffcv_write_dataset` + The first two arguments simply specify the area of the raw ImageNet training and validation data, respectively. The second two arguments enable dataset conversion if the expected FFCV formatted files do not exist. + + To perform this conversion manually, please follow the instructions detailed in the [README](https://github.com/mosaicml/benchmarks/tree/main/blogs/resnet#running-a-recipe) in our [benchmarks repository](https://github.com/mosaicml/benchmarks/tree/main/blogs/resnet), which contains all the code associated with our original blog post. + + ``` + + +## Expected Results + +We’ve performed various sweeps on AWS EC2 instances to understand the efficiency frontier across time, accuracy and cost as shown below. + +![ResNet-50 on AWS, Explorer Runs](https://storage.googleapis.com/docs.mosaicml.com/images/tutorials/r50_aws_explorer.png) + +The recipe explored in this tutorial should result in a model trained to a Top-1 accuracy of 76.6% in about 27 minutes for a total cost of $14.77. + +![ResNet-50 on AWS, Explorer Recipe](https://storage.googleapis.com/docs.mosaicml.com/images/tutorials/r50_aws_explorer_recipe.png) + +You can explore the results of our other [ResNet-50 runs on AWS](https://explorer.mosaicml.com/imagenet?sortBy=costSameQuality&model=resnet50&cloud=aws&hardware=all&algorithms=all&baseline=r50_optimized_p4d&recipe=mosaicml_baseline&recipe=mosaicml_hot&recipe=mosaicml_medium&recipe=mosaicml_mild) in [Explorer](https://explorer.mosaicml.com/), our tool for exploring efficiency frontiers for different models and datasets with different speed-up techniques across various clouds. + +## Next steps + +- [Explore other ResNet-50 recipes on AWS using Explorer!](https://explorer.mosaicml.com/imagenet?sortBy=costSameQuality&model=resnet50&cloud=aws&hardware=all&algorithms=all&baseline=r50_optimized_p4d&recipe=mosaicml_baseline&recipe=mosaicml_hot&recipe=mosaicml_medium&recipe=mosaicml_mild) +- Check out our [GitHub repository](https://github.com/mosaicml/composer) for the latest information on Composer +- Check out [Composer + FFCV: Faster Together](https://www.mosaicml.com/blog/composer-ffcv-faster-together) blog post for more information on how FFCV and Composer work together +- Reproduce our record setting [MLPerf ResNet-50 benchmark](https://www.mosaicml.com/blog/mlperf-2022)! Note, you will require access to the `p4de.24xlarge` ([in preview](https://aws.amazon.com/ec2/instance-types/?trk=36c6da98-7b20-48fa-8225-4784bced9843&sc_channel=ps&sc_campaign=acquisition&sc_medium=ACQ-P|PS-GO|Brand|Desktop|SU|Compute|EC2|US|EN|Text&s_kwcid=AL!4422!3!536392622533!e!!g!!aws%20instance%20types&ef_id=CjwKCAjwi8iXBhBeEiwAKbUofUpKM9nHToU9fsBJKApR3ccQzKs3LxSJ97PKiW5SvFRFwW6BnYP5xxoCOTEQAvD_BwE:G:s&s_kwcid=AL!4422!3!536392622533!e!!g!!aws%20instance%20types)) EC2 instances which contain the Nvidia A100 80GB GPUs. Please see the [MLPerf Training Results v2.0 GitHub Repository](https://github.com/mlcommons/training_results_v2.0/tree/main/MosaicML) for additional details. +- Try training on your models and datasets using Composer! diff --git a/setup.py b/setup.py index 5a22e264e4..595d2ec076 100644 --- a/setup.py +++ b/setup.py @@ -101,7 +101,7 @@ def package_files(prefix: str, directory: str, extension: str): 'junitparser==2.8.0', 'coverage[toml]==6.4.4', 'fasteners==0.17.3', # object store tests require fasteners - 'pytest==7.1.2', + 'pytest==7.1.3', 'toml==0.10.2', 'ipython==7.32.0', 'ipykernel==6.15.2', diff --git a/tests/algorithms/test_gradient_clipping.py b/tests/algorithms/test_gradient_clipping.py index fd9a016bc9..6924199ed5 100644 --- a/tests/algorithms/test_gradient_clipping.py +++ b/tests/algorithms/test_gradient_clipping.py @@ -10,12 +10,8 @@ import composer.algorithms.gradient_clipping.gradient_clipping as gc_module from composer.algorithms.gradient_clipping import GradientClipping, apply_gradient_clipping from composer.algorithms.gradient_clipping.gradient_clipping import _apply_agc, _get_clipped_gradient_coeff -from composer.core import Engine +from composer.core import Engine, State from composer.core.event import Event -from tests.fixtures import dummy_fixtures - -# To satisfy pyright. -dummy_state = dummy_fixtures.dummy_state @pytest.fixture @@ -81,7 +77,7 @@ def test_gradient_clipping_functional(monkeypatch): @pytest.mark.parametrize('clipping_type', [('adaptive',), ('norm',), ('value',)]) -def test_gradient_clipping_algorithm(monkeypatch, clipping_type, simple_model_with_grads, dummy_state): +def test_gradient_clipping_algorithm(monkeypatch, clipping_type, simple_model_with_grads, dummy_state: State): model = simple_model_with_grads apply_gc_fn = Mock() monkeypatch.setattr(gc_module, 'apply_gradient_clipping', apply_gc_fn) @@ -98,8 +94,11 @@ def test_gradient_clipping_algorithm(monkeypatch, clipping_type, simple_model_wi apply_gc_fn.assert_called_once() -def test_gradient_clipping_algorithm_with_deepspeed_enabled(monkeypatch: pytest.MonkeyPatch, simple_model_with_grads, - dummy_state): +def test_gradient_clipping_algorithm_with_deepspeed_enabled( + monkeypatch: pytest.MonkeyPatch, + simple_model_with_grads, + dummy_state: State, +): clipping_threshold = 0.1191 apply_gc_fn = Mock() monkeypatch.setattr(gc_module, 'apply_gradient_clipping', apply_gc_fn) @@ -128,7 +127,11 @@ def test_gradient_clipping_algorithm_with_deepspeed_enabled(monkeypatch: pytest. apply_gc_fn.assert_not_called() -def test_algorithm_with_deepspeed_enabled_errors_out_for_non_norm(monkeypatch: pytest.MonkeyPatch, dummy_state): +def test_algorithm_with_deepspeed_enabled_errors_out_for_non_norm( + monkeypatch: pytest.MonkeyPatch, + dummy_state: State, + simple_model_with_grads, +): clipping_threshold = 0.1191 apply_gc_fn = Mock() monkeypatch.setattr(gc_module, 'apply_gradient_clipping', apply_gc_fn) diff --git a/tests/common/__init__.py b/tests/common/__init__.py index c2f2170da6..ce42d3dfcd 100644 --- a/tests/common/__init__.py +++ b/tests/common/__init__.py @@ -5,12 +5,10 @@ from typing import List, Type from tests.common.compare import deep_compare -from tests.common.datasets import (RandomClassificationDataset, RandomClassificationDatasetHparams, RandomImageDataset, - configure_dataset_hparams_for_synthetic) +from tests.common.datasets import RandomClassificationDataset, RandomImageDataset from tests.common.events import EventCounterCallback from tests.common.markers import device, world_size -from tests.common.models import (SimpleConvModel, SimpleConvModelHparams, SimpleModel, SimpleModelHparams, - configure_model_hparams_for_synthetic) +from tests.common.models import SimpleConvModel, SimpleModel from tests.common.state import assert_state_equivalent @@ -22,17 +20,12 @@ def get_module_subclasses(module: types.ModuleType, cls: Type) -> List[Type]: __all__ = [ 'assert_state_equivalent', 'RandomClassificationDataset', - 'RandomClassificationDatasetHparams', 'RandomImageDataset', - 'configure_dataset_hparams_for_synthetic', 'SimpleConvModel', 'SimpleModel', - 'SimpleModelHparams', - 'SimpleConvModelHparams', 'EventCounterCallback', 'deep_compare', 'device', 'world_size', - 'configure_model_hparams_for_synthetic', 'get_module_subclasses', ] diff --git a/tests/common/datasets.py b/tests/common/datasets.py index 67241ca7cc..b7d8bead01 100644 --- a/tests/common/datasets.py +++ b/tests/common/datasets.py @@ -1,24 +1,14 @@ # Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 -import dataclasses -from typing import List, Optional, Sequence +from typing import Sequence -import pytest import torch import torch.utils.data -import yahp as hp from PIL import Image from torch.utils.data import Dataset from torchvision.datasets import VisionDataset -from composer.datasets.dataset_hparams import DataLoaderHparams, DatasetHparams -from composer.datasets.glue_hparams import GLUEHparams -from composer.datasets.lm_dataset_hparams import LMDatasetHparams -from composer.datasets.synthetic_hparams import SyntheticHparamsMixin -from composer.models import ModelHparams -from tests.common.models import model_hparams_to_tokenizer_family - class RandomClassificationDataset(Dataset): """Classification dataset drawn from a normal distribution. @@ -41,32 +31,6 @@ def __getitem__(self, index: int): return self.x[index], self.y[index] -@dataclasses.dataclass -class RandomClassificationDatasetHparams(DatasetHparams, SyntheticHparamsMixin): - - data_shape: List[int] = hp.optional('data shape', default_factory=lambda: [1, 1, 1]) - num_classes: int = hp.optional('num_classes', default=2) - - def initialize_object(self, batch_size: int, dataloader_hparams: DataLoaderHparams): - assert self.data_shape is not None - assert self.num_classes is not None - dataset = RandomClassificationDataset( - size=self.synthetic_num_unique_samples, - shape=self.data_shape, - num_classes=self.num_classes, - ) - if self.shuffle: - sampler = torch.utils.data.RandomSampler(dataset) - else: - sampler = torch.utils.data.SequentialSampler(dataset) - return dataloader_hparams.initialize_object( - dataset=dataset, - batch_size=batch_size, - sampler=sampler, - drop_last=self.drop_last, - ) - - class RandomImageDataset(VisionDataset): """ Image Classification dataset with values drawn from a normal distribution Args: @@ -110,21 +74,3 @@ def __getitem__(self, index: int): return self.transform(x), y else: return x, y - - -def configure_dataset_hparams_for_synthetic( - dataset_hparams: DatasetHparams, - model_hparams: Optional[ModelHparams] = None, -) -> None: - if not isinstance(dataset_hparams, SyntheticHparamsMixin): - pytest.xfail(f'{dataset_hparams.__class__.__name__} does not support synthetic data or num_total_batches') - - assert isinstance(dataset_hparams, SyntheticHparamsMixin) - - dataset_hparams.use_synthetic = True - - if model_hparams and type(model_hparams) in model_hparams_to_tokenizer_family: - tokenizer_family = model_hparams_to_tokenizer_family[type(model_hparams)] - assert isinstance(dataset_hparams, (GLUEHparams, LMDatasetHparams)) - dataset_hparams.tokenizer_name = tokenizer_family - dataset_hparams.max_seq_length = 128 diff --git a/tests/common/hparams.py b/tests/common/hparams.py deleted file mode 100644 index 00f4674f0c..0000000000 --- a/tests/common/hparams.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2022 MosaicML Composer authors -# SPDX-License-Identifier: Apache-2.0 - -from typing import Any, Callable, Dict, Optional, TypeVar - -import yahp as hp -import yaml - -T = TypeVar('T') - - -def assert_in_registry(constructor: Callable, registry: Dict[str, Callable]): - """Assert that the ``registry`` contains ``constructor``.""" - registry_entries = set(registry.values()) - assert constructor in registry_entries, f'Constructor {constructor.__name__} is missing from the registry.' - - -def construct_from_yaml( - constructor: Callable[..., T], - yaml_dict: Optional[Dict[str, Any]] = None, -) -> T: - """Build ``constructor`` from ``yaml_dict`` - - Args: - constructor (Callable): The constructor to test (such as an Hparams class) - yaml_dict (Dict[str, Any], optional): The YAML. Defaults to ``None``, which is equivalent - to an empty dictionary. - """ - yaml_dict = {} if yaml_dict is None else yaml_dict - # ensure that yaml_dict is actually a dictionary of only json-serializable objects - yaml_dict = yaml.safe_load(yaml.safe_dump(yaml_dict)) - instance = hp.create(constructor, yaml_dict, cli_args=False) - return instance diff --git a/tests/common/models.py b/tests/common/models.py index 2e358ec909..bc46bbfc6e 100644 --- a/tests/common/models.py +++ b/tests/common/models.py @@ -3,24 +3,9 @@ """Contains commonly used models that are shared across the test suite.""" -import dataclasses -from typing import Any, Dict, Type - import torch -import yahp as hp -from composer.datasets.synthetic_lm import generate_synthetic_tokenizer from composer.models import ComposerClassifier -from composer.models.bert.bert_hparams import BERTForClassificationHparams, BERTHparams -from composer.models.deeplabv3.deeplabv3_hparams import DeepLabV3Hparams -from composer.models.gpt2.gpt2_hparams import GPT2Hparams -from composer.models.model_hparams import ModelHparams - -model_hparams_to_tokenizer_family: Dict[Type[ModelHparams], str] = { - GPT2Hparams: 'gpt2', - BERTForClassificationHparams: 'bert', - BERTHparams: 'bert' -} class SimpleModel(ComposerClassifier): @@ -58,18 +43,6 @@ def __init__(self, num_features: int = 1, num_classes: int = 2) -> None: self.fc2 = fc2 -@dataclasses.dataclass -class SimpleModelHparams(ModelHparams): - num_features: int = hp.optional('number of features', default=1) - num_classes: int = hp.optional('number of output classes', default=2) - - def initialize_object(self) -> SimpleModel: - return SimpleModel( - num_features=self.num_features, - num_classes=self.num_classes, - ) - - class SimpleConvModel(ComposerClassifier): """Small convolutional classifer. @@ -105,113 +78,3 @@ def __init__(self, num_channels: int = 3, num_classes: int = 2) -> None: # surgery tests self.conv1 = conv1 self.conv2 = conv2 - - -@dataclasses.dataclass -class SimpleConvModelHparams(ModelHparams): - num_channels: int = hp.optional('number of channels', default=3) - num_classes: int = hp.optional('number of output classes', default=2) - - def initialize_object(self) -> SimpleConvModel: - return SimpleConvModel( - num_channels=self.num_channels, - num_classes=self.num_classes, - ) - - -def configure_model_hparams_for_synthetic(model_hparams: ModelHparams) -> None: - # configure Transformer-based models for synthetic testing - if type(model_hparams) in model_hparams_to_tokenizer_family.keys(): - assert isinstance(model_hparams, (BERTHparams, GPT2Hparams, BERTForClassificationHparams)) - tokenizer_family = model_hparams_to_tokenizer_family[type(model_hparams)] - - # force a non-pretrained model - model_hparams.use_pretrained = False - model_hparams.pretrained_model_name = None - - # generate tokenizers and synthetic models - tokenizer = generate_synthetic_tokenizer(tokenizer_family=tokenizer_family) - model_hparams.model_config = generate_dummy_model_config(type(model_hparams), tokenizer) - - # configure DeepLabV3 models for synthetic testing - if isinstance(model_hparams, DeepLabV3Hparams): - model_hparams.backbone_weights = None # prevent downloading pretrained weights during test - model_hparams.sync_bn = False # sync_bn throws an error when run on CPU - - -def generate_dummy_model_config(cls: Type[hp.Hparams], tokenizer) -> Dict[str, Any]: - model_to_dummy_mapping: Dict[Type[hp.Hparams], Dict[str, Any]] = { - BERTHparams: { - 'architectures': ['BertForMaskedLM'], - 'attention_probs_dropout_prob': 0.1, - 'gradient_checkpointing': False, - 'hidden_act': 'gelu', - 'hidden_dropout_prob': 0.1, - 'hidden_size': 64, - 'initializer_range': 0.02, - 'intermediate_size': 256, - 'layer_norm_eps': 1e-12, - 'max_position_embeddings': 512, - 'model_type': 'bert', - 'num_attention_heads': 1, - 'num_hidden_layers': 1, - 'pad_token_id': tokenizer.pad_token_id, - 'position_embedding_type': 'absolute', - 'transformers_version': '4.6.0.dev0', - 'type_vocab_size': 2, - 'use_cache': True, - 'vocab_size': tokenizer.vocab_size, - }, - GPT2Hparams: { - 'activation_function': 'gelu_new', - 'architectures': ['GPT2LMHeadModel'], - 'attn_pdrop': 0.1, - 'bos_token_id': tokenizer.cls_token_id, - 'embd_pdrop': 0.1, - 'eos_token_id': tokenizer.cls_token_id, - 'initializer_range': 0.02, - 'layer_norm_epsilon': 0.00001, - 'model_type': 'gpt2', - 'n_ctx': 128, - 'n_embd': 64, - 'n_head': 1, - 'n_layer': 1, - 'n_positions': 128, - 'resid_pdrop': 0.1, - 'summary_activation': None, - 'summary_first_dropout': 0.1, - 'summary_proj_to_labels': True, - 'summary_type': 'cls_index', - 'summary_use_proj': True, - 'task_specific_params': { - 'text-generation': { - 'do_sample': True, - 'max_length': 50 - } - }, - 'vocab_size': tokenizer.vocab_size - }, - BERTForClassificationHparams: { - 'architectures': ['BertForSequenceClassification'], - 'attention_probs_dropout_prob': 0.1, - 'classifier_dropout': None, - 'gradient_checkpointing': False, - 'hidden_act': 'gelu', - 'hidden_dropout_prob': 0.1, - 'hidden_size': 64, - 'initializer_range': 0.02, - 'intermediate_size': 256, - 'layer_norm_eps': 1e-12, - 'max_position_embeddings': 512, - 'model_type': 'bert', - 'num_attention_heads': 1, - 'num_hidden_layers': 1, - 'pad_token_id': tokenizer.pad_token_id, - 'position_embedding_type': 'absolute', - 'transformers_version': '4.16.2', - 'type_vocab_size': 2, - 'use_cache': True, - 'vocab_size': tokenizer.vocab_size - } - } - return model_to_dummy_mapping[cls] diff --git a/tests/conftest.py b/tests/conftest.py index ee5e0ebc20..7281aab3cc 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,22 +21,12 @@ # Enforce deterministic mode before any tests start. reproducibility.configure_deterministic_mode() -# during the pytest refactor transition, this flag -# indicates whether to include the deprecated fixtures. -# used for internal development. -_include_deprecated_fixtures = True - # Add the path of any pytest fixture files you want to make global pytest_plugins = [ 'tests.fixtures.new_fixtures', 'tests.fixtures.synthetic_hf_state', ] -if _include_deprecated_fixtures: - pytest_plugins += [ - 'tests.fixtures.dummy_fixtures', - ] - def _add_option(parser: pytest.Parser, name: str, help: str, choices: Optional[List[str]] = None): parser.addoption( diff --git a/tests/datasets/test_streaming.py b/tests/datasets/test_streaming.py index b5eba85075..a612f9d4e8 100644 --- a/tests/datasets/test_streaming.py +++ b/tests/datasets/test_streaming.py @@ -19,8 +19,8 @@ @pytest.fixture def remote_local(tmp_path: pathlib.Path) -> Tuple[str, str]: - remote = tmp_path / 'remote' - local = tmp_path / 'local' + remote = tmp_path.joinpath('remote') + local = tmp_path.joinpath('local') remote.mkdir() local.mkdir() return str(remote), str(local) @@ -28,9 +28,9 @@ def remote_local(tmp_path: pathlib.Path) -> Tuple[str, str]: @pytest.fixture def compressed_remote_local(tmp_path: pathlib.Path) -> Tuple[str, str, str]: - compressed = tmp_path / 'compressed' - remote = tmp_path / 'remote' - local = tmp_path / 'local' + compressed = tmp_path.joinpath('compressed') + remote = tmp_path.joinpath('remote') + local = tmp_path.joinpath('local') list(x.mkdir() for x in [compressed, remote, local]) return tuple(str(x) for x in [compressed, remote, local]) diff --git a/tests/fixtures/dummy_fixtures.py b/tests/fixtures/dummy_fixtures.py deleted file mode 100644 index 884af3d075..0000000000 --- a/tests/fixtures/dummy_fixtures.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2022 MosaicML Composer authors -# SPDX-License-Identifier: Apache-2.0 - -from typing import Iterable, Tuple - -import pytest -import torch -import torch.utils.data -from torch.optim import Optimizer - -from composer.core import Precision, State -from composer.core.types import PyTorchScheduler -from composer.datasets.dataset_hparams import DataLoaderHparams, DatasetHparams -from composer.datasets.dataset_hparams_registry import dataset_registry -from composer.models import ModelHparams -from composer.optim import ExponentialScheduler -from composer.optim.optimizer_hparams_registry import AdamHparams -from composer.trainer.trainer_hparams import TrainerHparams, model_registry -from tests.common import RandomClassificationDatasetHparams, SimpleModel, SimpleModelHparams - - -@pytest.fixture -def dummy_in_shape() -> Tuple[int, ...]: - return 1, 5, 5 - - -@pytest.fixture -def dummy_num_classes() -> int: - return 2 - - -@pytest.fixture() -def dummy_train_batch_size() -> int: - return 16 - - -@pytest.fixture() -def dummy_val_batch_size() -> int: - return 32 - - -@pytest.fixture() -def dummy_train_n_samples() -> int: - return 1000 - - -@pytest.fixture -def dummy_model_hparams(dummy_in_shape: Tuple[int, ...], dummy_num_classes: int) -> SimpleModelHparams: - model_registry['simple'] = SimpleModelHparams - return SimpleModelHparams(num_features=dummy_in_shape[0], num_classes=dummy_num_classes) - - -@pytest.fixture -def dummy_model(dummy_in_shape: Tuple[int, ...], dummy_num_classes: int) -> SimpleModel: - return SimpleModel(num_features=dummy_in_shape[0], num_classes=dummy_num_classes) - - -@pytest.fixture -def dummy_train_dataset_hparams(dummy_model: SimpleModel, dummy_in_shape: Tuple[int]) -> DatasetHparams: - dataset_registry['random_classification'] = RandomClassificationDatasetHparams - assert dummy_model.num_classes is not None - return RandomClassificationDatasetHparams( - use_synthetic=True, - drop_last=True, - shuffle=False, - num_classes=dummy_model.num_classes, - data_shape=list(dummy_in_shape), - ) - - -@pytest.fixture -def dummy_val_dataset_hparams(dummy_model: SimpleModel, dummy_in_shape: Tuple[int]) -> DatasetHparams: - dataset_registry['random_classification'] = RandomClassificationDatasetHparams - assert dummy_model.num_classes is not None - return RandomClassificationDatasetHparams( - use_synthetic=True, - drop_last=False, - shuffle=False, - num_classes=dummy_model.num_classes, - data_shape=list(dummy_in_shape), - ) - - -@pytest.fixture -def dummy_optimizer(dummy_model: SimpleModel): - return torch.optim.SGD(dummy_model.parameters(), lr=0.001) - - -@pytest.fixture -def dummy_scheduler(dummy_optimizer: Optimizer): - return torch.optim.lr_scheduler.LambdaLR(dummy_optimizer, lambda _: 1.0) - - -@pytest.fixture() -def dummy_state( - dummy_model: SimpleModel, - dummy_train_dataloader: Iterable, - dummy_optimizer: Optimizer, - dummy_scheduler: PyTorchScheduler, - rank_zero_seed: int, - request: pytest.FixtureRequest, -) -> State: - if request.node.get_closest_marker('gpu') is not None: - # If using `dummy_state`, then not using the trainer, so move the model to the correct device - dummy_model = dummy_model.cuda() - state = State( - model=dummy_model, - run_name='dummy_run_name', - precision=Precision.FP32, - grad_accum=1, - rank_zero_seed=rank_zero_seed, - optimizers=dummy_optimizer, - max_duration='10ep', - ) - state.schedulers = dummy_scheduler - state.set_dataloader(dummy_train_dataloader, 'train') - - return state - - -@pytest.fixture -def dummy_dataloader_hparams() -> DataLoaderHparams: - return DataLoaderHparams( - num_workers=0, - prefetch_factor=2, - persistent_workers=False, - pin_memory=False, - timeout=0.0, - ) - - -@pytest.fixture -def dummy_train_dataloader( - dummy_train_dataset_hparams: DatasetHparams, - dummy_train_batch_size: int, - dummy_dataloader_hparams: DataLoaderHparams, -): - return dummy_train_dataset_hparams.initialize_object(dummy_train_batch_size, dummy_dataloader_hparams) - - -@pytest.fixture -def dummy_val_dataloader( - dummy_train_dataset_hparams: DatasetHparams, - dummy_val_batch_size: int, - dummy_dataloader_hparams: DataLoaderHparams, -): - return dummy_train_dataset_hparams.initialize_object(dummy_val_batch_size, dummy_dataloader_hparams) - - -@pytest.fixture -def composer_trainer_hparams( - dummy_model_hparams: ModelHparams, - dummy_train_dataset_hparams: DatasetHparams, - dummy_val_dataset_hparams: DatasetHparams, - dummy_train_batch_size: int, - dummy_val_batch_size: int, - rank_zero_seed: int, -) -> TrainerHparams: - return TrainerHparams( - algorithms=[], - optimizers=AdamHparams(), - schedulers=[ExponentialScheduler(gamma=0.9)], - max_duration='2ep', - precision=Precision.FP32, - train_batch_size=dummy_train_batch_size, - eval_batch_size=dummy_val_batch_size, - seed=rank_zero_seed, - dataloader=DataLoaderHparams( - num_workers=0, - prefetch_factor=2, - persistent_workers=False, - pin_memory=False, - timeout=0.0, - ), - model=dummy_model_hparams, - val_dataset=dummy_val_dataset_hparams, - train_dataset=dummy_train_dataset_hparams, - grad_accum=1, - train_subset_num_batches=3, - eval_subset_num_batches=3, - ) diff --git a/tests/fixtures/new_fixtures.py b/tests/fixtures/new_fixtures.py index 57d83343ac..2a495feb04 100644 --- a/tests/fixtures/new_fixtures.py +++ b/tests/fixtures/new_fixtures.py @@ -9,6 +9,7 @@ import coolname import pytest +import torch from torch.utils.data import DataLoader from composer.core import State @@ -93,3 +94,32 @@ def test_session_name(configure_dist: None) -> str: # ensure all ranks have the same name dist.broadcast_object_list(name_list) return name_list[0] + + +@pytest.fixture() +def dummy_state( + rank_zero_seed: int, + request: pytest.FixtureRequest, +) -> State: + + model = SimpleModel() + if request.node.get_closest_marker('gpu') is not None: + # If using `dummy_state`, then not using the trainer, so move the model to the correct device + model = model.cuda() + + optimizer = torch.optim.SGD(model.parameters(), lr=0.001) + scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1.0) + + state = State( + model=model, + run_name='dummy_run_name', + precision='fp32', + grad_accum=1, + rank_zero_seed=rank_zero_seed, + optimizers=optimizer, + max_duration='10ep', + ) + state.schedulers = scheduler + state.set_dataloader(DataLoader(RandomClassificationDataset()), 'train') + + return state diff --git a/tests/fixtures/synthetic_hf_state.py b/tests/fixtures/synthetic_hf_state.py index 33ba10e0f7..7abccdab76 100644 --- a/tests/fixtures/synthetic_hf_state.py +++ b/tests/fixtures/synthetic_hf_state.py @@ -1,7 +1,7 @@ # Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 -from typing import Tuple +from typing import Any, Dict, Tuple import pytest @@ -9,11 +9,88 @@ from composer.datasets.dataset_hparams import DataLoaderHparams from composer.datasets.lm_dataset_hparams import LMDatasetHparams from composer.datasets.synthetic_lm import generate_synthetic_tokenizer, synthetic_hf_dataset_builder -from composer.models import BERTHparams, GPT2Hparams, create_bert_mlm, create_gpt2 -from tests.common.models import generate_dummy_model_config +from composer.models import create_bert_mlm, create_gpt2 from tests.datasets import test_synthetic_lm_data +def generate_dummy_model_config(model: str, tokenizer) -> Dict[str, Any]: + model_to_dummy_mapping: Dict[str, Dict[str, Any]] = { + 'bert': { + 'architectures': ['BertForMaskedLM'], + 'attention_probs_dropout_prob': 0.1, + 'gradient_checkpointing': False, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 64, + 'initializer_range': 0.02, + 'intermediate_size': 256, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 1, + 'num_hidden_layers': 1, + 'pad_token_id': tokenizer.pad_token_id, + 'position_embedding_type': 'absolute', + 'transformers_version': '4.6.0.dev0', + 'type_vocab_size': 2, + 'use_cache': True, + 'vocab_size': tokenizer.vocab_size, + }, + 'gpt2': { + 'activation_function': 'gelu_new', + 'architectures': ['GPT2LMHeadModel'], + 'attn_pdrop': 0.1, + 'bos_token_id': tokenizer.cls_token_id, + 'embd_pdrop': 0.1, + 'eos_token_id': tokenizer.cls_token_id, + 'initializer_range': 0.02, + 'layer_norm_epsilon': 0.00001, + 'model_type': 'gpt2', + 'n_ctx': 128, + 'n_embd': 64, + 'n_head': 1, + 'n_layer': 1, + 'n_positions': 128, + 'resid_pdrop': 0.1, + 'summary_activation': None, + 'summary_first_dropout': 0.1, + 'summary_proj_to_labels': True, + 'summary_type': 'cls_index', + 'summary_use_proj': True, + 'task_specific_params': { + 'text-generation': { + 'do_sample': True, + 'max_length': 50 + } + }, + 'vocab_size': tokenizer.vocab_size + }, + 'bert_classification': { + 'architectures': ['BertForSequenceClassification'], + 'attention_probs_dropout_prob': 0.1, + 'classifier_dropout': None, + 'gradient_checkpointing': False, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 64, + 'initializer_range': 0.02, + 'intermediate_size': 256, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 1, + 'num_hidden_layers': 1, + 'pad_token_id': tokenizer.pad_token_id, + 'position_embedding_type': 'absolute', + 'transformers_version': '4.16.2', + 'type_vocab_size': 2, + 'use_cache': True, + 'vocab_size': tokenizer.vocab_size + } + } + return model_to_dummy_mapping[model] + + def make_dataset_configs(model_family=('bert', 'gpt2')) -> list: model_family = list(model_family) lm_dataset_configs = [ @@ -34,13 +111,11 @@ def make_lm_tokenizer(config: dict): def make_dummy_lm(model_name: str, max_position_embeddings: int, tokenizer): if model_name == 'gpt2': - class_name = GPT2Hparams - model_config = generate_dummy_model_config(class_name, tokenizer) + model_config = generate_dummy_model_config(model_name, tokenizer) model_config['max_position_embeddings'] = max_position_embeddings model = create_gpt2(model_config=model_config) elif model_name == 'bert': - class_name = BERTHparams - model_config = generate_dummy_model_config(class_name, tokenizer) + model_config = generate_dummy_model_config(model_name, tokenizer) model_config['max_position_embeddings'] = max_position_embeddings model = create_bert_mlm(model_config=model_config) else: diff --git a/tests/hparams/__init__.py b/tests/hparams/__init__.py new file mode 100644 index 0000000000..cba5725129 --- /dev/null +++ b/tests/hparams/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2022 MosaicML Composer authors +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/hparams/common.py b/tests/hparams/common.py new file mode 100644 index 0000000000..cb0cd8b401 --- /dev/null +++ b/tests/hparams/common.py @@ -0,0 +1,221 @@ +# Copyright 2022 MosaicML Composer authors +# SPDX-License-Identifier: Apache-2.0 + +import dataclasses +from typing import Any, Callable, Dict, List, Optional, Type, TypeVar + +import pytest +import yahp as hp +import yaml +from torch.utils.data import RandomSampler, SequentialSampler + +from composer.datasets.dataset_hparams import DataLoaderHparams, DatasetHparams +from composer.datasets.glue_hparams import GLUEHparams +from composer.datasets.lm_dataset_hparams import LMDatasetHparams +from composer.datasets.synthetic_hparams import SyntheticHparamsMixin +from composer.datasets.synthetic_lm import generate_synthetic_tokenizer +from composer.models import ModelHparams +from composer.models.bert.bert_hparams import BERTForClassificationHparams, BERTHparams +from composer.models.deeplabv3.deeplabv3_hparams import DeepLabV3Hparams +from composer.models.gpt2.gpt2_hparams import GPT2Hparams +from composer.models.model_hparams import ModelHparams +from tests.common import RandomClassificationDataset, SimpleConvModel, SimpleModel + +T = TypeVar('T') + + +def assert_in_registry(constructor: Callable, registry: Dict[str, Callable]): + """Assert that the ``registry`` contains ``constructor``.""" + registry_entries = set(registry.values()) + assert constructor in registry_entries, f'Constructor {constructor.__name__} is missing from the registry.' + + +def construct_from_yaml( + constructor: Callable[..., T], + yaml_dict: Optional[Dict[str, Any]] = None, +) -> T: + """Build ``constructor`` from ``yaml_dict`` + + Args: + constructor (Callable): The constructor to test (such as an Hparams class) + yaml_dict (Dict[str, Any], optional): The YAML. Defaults to ``None``, which is equivalent + to an empty dictionary. + """ + yaml_dict = {} if yaml_dict is None else yaml_dict + # ensure that yaml_dict is actually a dictionary of only json-serializable objects + yaml_dict = yaml.safe_load(yaml.safe_dump(yaml_dict)) + instance = hp.create(constructor, yaml_dict, cli_args=False) + return instance + + +model_hparams_to_tokenizer_family: Dict[Type[ModelHparams], str] = { + GPT2Hparams: 'gpt2', + BERTForClassificationHparams: 'bert', + BERTHparams: 'bert' +} + + +@dataclasses.dataclass +class RandomClassificationDatasetHparams(DatasetHparams, SyntheticHparamsMixin): + + data_shape: List[int] = hp.optional('data shape', default_factory=lambda: [1, 1, 1]) + num_classes: int = hp.optional('num_classes', default=2) + + def initialize_object(self, batch_size: int, dataloader_hparams: DataLoaderHparams): + assert self.data_shape is not None + assert self.num_classes is not None + dataset = RandomClassificationDataset( + size=self.synthetic_num_unique_samples, + shape=self.data_shape, + num_classes=self.num_classes, + ) + if self.shuffle: + sampler = RandomSampler(dataset) + else: + sampler = SequentialSampler(dataset) + return dataloader_hparams.initialize_object( + dataset=dataset, + batch_size=batch_size, + sampler=sampler, + drop_last=self.drop_last, + ) + + +def configure_dataset_hparams_for_synthetic( + dataset_hparams: DatasetHparams, + model_hparams: Optional[ModelHparams] = None, +) -> None: + if not isinstance(dataset_hparams, SyntheticHparamsMixin): + pytest.xfail(f'{dataset_hparams.__class__.__name__} does not support synthetic data or num_total_batches') + + assert isinstance(dataset_hparams, SyntheticHparamsMixin) + + dataset_hparams.use_synthetic = True + + if model_hparams and type(model_hparams) in model_hparams_to_tokenizer_family: + tokenizer_family = model_hparams_to_tokenizer_family[type(model_hparams)] + assert isinstance(dataset_hparams, (GLUEHparams, LMDatasetHparams)) + dataset_hparams.tokenizer_name = tokenizer_family + dataset_hparams.max_seq_length = 128 + + +@dataclasses.dataclass +class SimpleModelHparams(ModelHparams): + num_features: int = hp.optional('number of features', default=1) + num_classes: int = hp.optional('number of output classes', default=2) + + def initialize_object(self) -> SimpleModel: + return SimpleModel( + num_features=self.num_features, + num_classes=self.num_classes, + ) + + +@dataclasses.dataclass +class SimpleConvModelHparams(ModelHparams): + num_channels: int = hp.optional('number of channels', default=3) + num_classes: int = hp.optional('number of output classes', default=2) + + def initialize_object(self) -> SimpleConvModel: + return SimpleConvModel( + num_channels=self.num_channels, + num_classes=self.num_classes, + ) + + +def configure_model_hparams_for_synthetic(model_hparams: ModelHparams) -> None: + # configure Transformer-based models for synthetic testing + if type(model_hparams) in model_hparams_to_tokenizer_family.keys(): + assert isinstance(model_hparams, (BERTHparams, GPT2Hparams, BERTForClassificationHparams)) + tokenizer_family = model_hparams_to_tokenizer_family[type(model_hparams)] + + # force a non-pretrained model + model_hparams.use_pretrained = False + model_hparams.pretrained_model_name = None + + # generate tokenizers and synthetic models + tokenizer = generate_synthetic_tokenizer(tokenizer_family=tokenizer_family) + model_hparams.model_config = generate_dummy_model_config(type(model_hparams), tokenizer) + + # configure DeepLabV3 models for synthetic testing + if isinstance(model_hparams, DeepLabV3Hparams): + model_hparams.backbone_weights = None # prevent downloading pretrained weights during test + model_hparams.sync_bn = False # sync_bn throws an error when run on CPU + + +def generate_dummy_model_config(cls: Type[hp.Hparams], tokenizer) -> Dict[str, Any]: + model_to_dummy_mapping: Dict[Type[hp.Hparams], Dict[str, Any]] = { + BERTHparams: { + 'architectures': ['BertForMaskedLM'], + 'attention_probs_dropout_prob': 0.1, + 'gradient_checkpointing': False, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 64, + 'initializer_range': 0.02, + 'intermediate_size': 256, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 1, + 'num_hidden_layers': 1, + 'pad_token_id': tokenizer.pad_token_id, + 'position_embedding_type': 'absolute', + 'transformers_version': '4.6.0.dev0', + 'type_vocab_size': 2, + 'use_cache': True, + 'vocab_size': tokenizer.vocab_size, + }, + GPT2Hparams: { + 'activation_function': 'gelu_new', + 'architectures': ['GPT2LMHeadModel'], + 'attn_pdrop': 0.1, + 'bos_token_id': tokenizer.cls_token_id, + 'embd_pdrop': 0.1, + 'eos_token_id': tokenizer.cls_token_id, + 'initializer_range': 0.02, + 'layer_norm_epsilon': 0.00001, + 'model_type': 'gpt2', + 'n_ctx': 128, + 'n_embd': 64, + 'n_head': 1, + 'n_layer': 1, + 'n_positions': 128, + 'resid_pdrop': 0.1, + 'summary_activation': None, + 'summary_first_dropout': 0.1, + 'summary_proj_to_labels': True, + 'summary_type': 'cls_index', + 'summary_use_proj': True, + 'task_specific_params': { + 'text-generation': { + 'do_sample': True, + 'max_length': 50 + } + }, + 'vocab_size': tokenizer.vocab_size + }, + BERTForClassificationHparams: { + 'architectures': ['BertForSequenceClassification'], + 'attention_probs_dropout_prob': 0.1, + 'classifier_dropout': None, + 'gradient_checkpointing': False, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 64, + 'initializer_range': 0.02, + 'intermediate_size': 256, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 1, + 'num_hidden_layers': 1, + 'pad_token_id': tokenizer.pad_token_id, + 'position_embedding_type': 'absolute', + 'transformers_version': '4.16.2', + 'type_vocab_size': 2, + 'use_cache': True, + 'vocab_size': tokenizer.vocab_size + } + } + return model_to_dummy_mapping[cls] diff --git a/tests/algorithms/test_algorithm_hparams.py b/tests/hparams/test_algorithm_hparams.py similarity index 94% rename from tests/algorithms/test_algorithm_hparams.py rename to tests/hparams/test_algorithm_hparams.py index 6cd3290c4f..98e109e19c 100644 --- a/tests/algorithms/test_algorithm_hparams.py +++ b/tests/hparams/test_algorithm_hparams.py @@ -10,7 +10,7 @@ from composer.core import Algorithm from tests.algorithms.algorithm_settings import get_alg_kwargs, get_algs_with_marks from tests.common import get_module_subclasses -from tests.common.hparams import assert_in_registry, construct_from_yaml +from tests.hparams.common import assert_in_registry, construct_from_yaml @pytest.mark.parametrize('alg_cls', get_algs_with_marks()) diff --git a/tests/algorithms/test_algorithm_registry.py b/tests/hparams/test_algorithm_registry.py similarity index 100% rename from tests/algorithms/test_algorithm_registry.py rename to tests/hparams/test_algorithm_registry.py diff --git a/tests/callbacks/test_callback_hparams.py b/tests/hparams/test_callback_hparams.py similarity index 92% rename from tests/callbacks/test_callback_hparams.py rename to tests/hparams/test_callback_hparams.py index 1ba99036e7..a87272ae88 100644 --- a/tests/callbacks/test_callback_hparams.py +++ b/tests/hparams/test_callback_hparams.py @@ -12,10 +12,11 @@ from composer.loggers.logger_hparams_registry import ObjectStoreLoggerHparams, logger_registry from composer.profiler import JSONTraceHandler, SystemProfiler, TorchProfiler, TraceHandler from tests.callbacks.callback_settings import get_cb_hparams_and_marks, get_cb_kwargs, get_cbs_and_marks -from tests.common.hparams import assert_in_registry, construct_from_yaml +from tests.hparams.common import assert_in_registry, construct_from_yaml @pytest.mark.parametrize('constructor', get_cb_hparams_and_marks()) +@pytest.mark.filterwarnings(r'ignore:Call to deprecated create function:DeprecationWarning') def test_callback_hparams_is_constructable( constructor: Union[Type[Callback], Type[hp.Hparams]], monkeypatch: pytest.MonkeyPatch, diff --git a/tests/datasets/test_dataset_registry.py b/tests/hparams/test_dataset_registry.py similarity index 94% rename from tests/datasets/test_dataset_registry.py rename to tests/hparams/test_dataset_registry.py index 4286226d1c..9555c72928 100644 --- a/tests/datasets/test_dataset_registry.py +++ b/tests/hparams/test_dataset_registry.py @@ -79,6 +79,17 @@ } +@pytest.fixture +def dummy_dataloader_hparams() -> DataLoaderHparams: + return DataLoaderHparams( + num_workers=0, + prefetch_factor=2, + persistent_workers=False, + pin_memory=False, + timeout=0.0, + ) + + @pytest.mark.parametrize('dataset_name', dataset_registry.keys()) def test_dataset(dataset_name: str, dummy_dataloader_hparams: DataLoaderHparams) -> None: hparams_cls = dataset_registry[dataset_name] diff --git a/tests/hparams/test_eval_hparams.py b/tests/hparams/test_eval_hparams.py new file mode 100644 index 0000000000..a2d5de110f --- /dev/null +++ b/tests/hparams/test_eval_hparams.py @@ -0,0 +1,64 @@ +# Copyright 2022 MosaicML Composer authors +# SPDX-License-Identifier: Apache-2.0 + +from composer.core import Event +from composer.trainer.trainer_hparams import EvaluatorHparams, TrainerHparams +from tests.common import EventCounterCallback +from tests.hparams.common import DataLoaderHparams, RandomClassificationDatasetHparams, SimpleModelHparams + + +def test_eval_hparams(): + """Test that `eval_interval` and `eval_subset_num_batches` work when specified via hparams.""" + # Create the trainer from hparams + composer_trainer_hparams = TrainerHparams( + model=SimpleModelHparams(), + train_dataset=RandomClassificationDatasetHparams(), + dataloader=DataLoaderHparams( + num_workers=0, + persistent_workers=False, + pin_memory=False, + ), + max_duration='2ep', + eval_batch_size=1, + train_batch_size=1, + eval_interval='2ep', + eval_subset_num_batches=2, + callbacks=[EventCounterCallback()], + ) + + composer_trainer_hparams.evaluators = [ + EvaluatorHparams( + label='eval1', + eval_interval='3ep', # will run, since eval_at_fit_end = True + subset_num_batches=1, + eval_dataset=RandomClassificationDatasetHparams(), + ), + EvaluatorHparams( + label='eval2', + eval_dataset=RandomClassificationDatasetHparams(), + metric_names=['Accuracy'], + ), + ] + trainer = composer_trainer_hparams.initialize_object() + + # Validate that `subset_num_batches` was set correctly + assert trainer.state.evaluators[0].subset_num_batches == composer_trainer_hparams.evaluators[0].subset_num_batches + assert trainer.state.evaluators[1].subset_num_batches == composer_trainer_hparams.eval_subset_num_batches + + # Train the model + trainer.fit() + + # Validate that `eval_interval` and `subset_num_batches` was set correctly for the evaluator that actually + # ran + assert 'eval1' in trainer.state.eval_metrics + assert 'eval2' in trainer.state.eval_metrics + event_counter_callback = None + for callback in trainer.state.callbacks: + if isinstance(callback, EventCounterCallback): + event_counter_callback = callback + break + assert event_counter_callback is not None + assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == 2 + # increment by one for the extra call to `Event.EVAL_BATCH_START` during the evaluation at FIT end. + assert event_counter_callback.event_to_num_calls[ + Event.EVAL_BATCH_START] == composer_trainer_hparams.eval_subset_num_batches + 1 diff --git a/tests/models/test_model_registry.py b/tests/hparams/test_model_registry.py similarity index 100% rename from tests/models/test_model_registry.py rename to tests/hparams/test_model_registry.py diff --git a/tests/models/test_model_yamls.py b/tests/hparams/test_model_yamls.py similarity index 97% rename from tests/models/test_model_yamls.py rename to tests/hparams/test_model_yamls.py index b86ce52fcf..3837d7b96c 100644 --- a/tests/models/test_model_yamls.py +++ b/tests/hparams/test_model_yamls.py @@ -10,7 +10,7 @@ import composer from composer.core.precision import Precision from composer.trainer.trainer_hparams import TrainerHparams -from tests.common import configure_dataset_hparams_for_synthetic, configure_model_hparams_for_synthetic +from tests.hparams.common import configure_dataset_hparams_for_synthetic, configure_model_hparams_for_synthetic def walk_model_yamls(): diff --git a/tests/utils/object_store/test_object_store_hparams.py b/tests/hparams/test_object_store_hparams.py similarity index 97% rename from tests/utils/object_store/test_object_store_hparams.py rename to tests/hparams/test_object_store_hparams.py index d002edab8e..7745689e8e 100644 --- a/tests/utils/object_store/test_object_store_hparams.py +++ b/tests/hparams/test_object_store_hparams.py @@ -9,7 +9,7 @@ from composer.utils.object_store import ObjectStore from composer.utils.object_store.object_store_hparams import (ObjectStoreHparams, SFTPObjectStoreHparams, object_store_registry) -from tests.common.hparams import assert_in_registry, construct_from_yaml +from tests.hparams.common import assert_in_registry, construct_from_yaml from tests.utils.object_store.object_store_settings import (get_object_store_ctx, object_store_hparam_kwargs, object_store_hparams) diff --git a/tests/optim/test_optimizer_registry.py b/tests/hparams/test_optimizer_registry.py similarity index 95% rename from tests/optim/test_optimizer_registry.py rename to tests/hparams/test_optimizer_registry.py index f387890bd0..e7296d7c34 100644 --- a/tests/optim/test_optimizer_registry.py +++ b/tests/hparams/test_optimizer_registry.py @@ -8,8 +8,8 @@ import composer.optim.optimizer_hparams_registry from composer.optim.optimizer_hparams_registry import OptimizerHparams, optimizer_registry from tests.common import get_module_subclasses -from tests.common.hparams import assert_in_registry, construct_from_yaml from tests.common.models import SimpleModel +from tests.hparams.common import assert_in_registry, construct_from_yaml optimizer_hparam_classes = get_module_subclasses(composer.optim.optimizer_hparams_registry, OptimizerHparams) diff --git a/tests/profiler/test_profiler_hparams.py b/tests/hparams/test_profiler_hparams.py similarity index 92% rename from tests/profiler/test_profiler_hparams.py rename to tests/hparams/test_profiler_hparams.py index 4e674f614c..2ba090a748 100644 --- a/tests/profiler/test_profiler_hparams.py +++ b/tests/hparams/test_profiler_hparams.py @@ -4,7 +4,7 @@ import pathlib from composer.profiler import Profiler -from tests.common.hparams import construct_from_yaml +from tests.hparams.common import construct_from_yaml def test_profiler_is_constructable_from_hparams(tmp_path: pathlib.Path): diff --git a/tests/optim/test_scheduler_registry.py b/tests/hparams/test_scheduler_registry.py similarity index 97% rename from tests/optim/test_scheduler_registry.py rename to tests/hparams/test_scheduler_registry.py index 8226773ca2..a585196752 100644 --- a/tests/optim/test_scheduler_registry.py +++ b/tests/hparams/test_scheduler_registry.py @@ -11,7 +11,7 @@ MultiStepWithWarmupScheduler, PolynomialScheduler, PolynomialWithWarmupScheduler, StepScheduler) from composer.optim.scheduler_hparams_registry import scheduler_registry -from tests.common.hparams import construct_from_yaml +from tests.hparams.common import construct_from_yaml # Cannot query the module and use an isinstance check because schedulers have no base class -- they're just functions # that return functions. Instead, using the registry diff --git a/tests/trainer/test_trainer_hparams.py b/tests/hparams/test_trainer_hparams.py similarity index 94% rename from tests/trainer/test_trainer_hparams.py rename to tests/hparams/test_trainer_hparams.py index f7817d1ed3..54c168a390 100644 --- a/tests/trainer/test_trainer_hparams.py +++ b/tests/hparams/test_trainer_hparams.py @@ -12,8 +12,7 @@ from composer.trainer import Trainer from composer.trainer.trainer_hparams import (EvalHparams, EvalKwargs, ExperimentHparams, FitHparams, FitKwargs, TrainerHparams) -from tests.common import SimpleModelHparams -from tests.common.datasets import RandomClassificationDatasetHparams +from tests.hparams.common import RandomClassificationDatasetHparams, SimpleModelHparams if TYPE_CHECKING: from typing import TypedDict diff --git a/tests/test_yamls.py b/tests/hparams/test_yamls.py similarity index 90% rename from tests/test_yamls.py rename to tests/hparams/test_yamls.py index 49f9e95672..423ad1f8fd 100644 --- a/tests/test_yamls.py +++ b/tests/hparams/test_yamls.py @@ -21,8 +21,8 @@ def recur_generate_yaml_paths(path: str): def generate_yaml_paths(): - yaml_path = os.path.join(os.path.dirname(pathlib.Path(os.path.abspath(os.path.dirname(__file__)))), 'composer', - 'yamls') + yaml_path = os.path.join(os.path.dirname(pathlib.Path(os.path.abspath(os.path.dirname(__file__)))), '..', + 'composer', 'yamls') return recur_generate_yaml_paths(yaml_path) diff --git a/tests/metrics/test_current_metrics.py b/tests/metrics/test_current_metrics.py index ceaff460b3..1887b706a4 100644 --- a/tests/metrics/test_current_metrics.py +++ b/tests/metrics/test_current_metrics.py @@ -1,15 +1,16 @@ # Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 -from typing import Iterable, Tuple from unittest.mock import MagicMock import pytest +from torch.utils.data import DataLoader from composer.core import Callback, State from composer.loggers import Logger from composer.trainer import Trainer from tests.common import SimpleModel +from tests.common.datasets import RandomClassificationDataset class MetricsCallback(Callback): @@ -43,29 +44,32 @@ def eval_end(self, state: State, logger: Logger) -> None: @pytest.mark.parametrize('eval_interval', ['1ba', '1ep', '0ep']) -def test_current_metrics( - dummy_train_dataloader: Iterable, - dummy_val_dataloader: Iterable, - dummy_num_classes: int, - dummy_in_shape: Tuple[int, ...], - eval_interval: str, -): +def test_current_metrics(eval_interval: str,): # Configure the trainer - num_channels = dummy_in_shape[0] mock_logger_destination = MagicMock() mock_logger_destination.log_metrics = MagicMock() - model = SimpleModel(num_features=num_channels, num_classes=dummy_num_classes) + model = SimpleModel(num_features=1, num_classes=2) compute_val_metrics = eval_interval != '0ep' train_subset_num_batches = 2 eval_subset_num_batches = 2 num_epochs = 2 metrics_callback = MetricsCallback(compute_val_metrics=compute_val_metrics,) + dataset_kwargs = { + 'num_classes': 2, + 'shape': (1, 5, 5), + } # Create the trainer trainer = Trainer( model=model, - train_dataloader=dummy_train_dataloader, - eval_dataloader=dummy_val_dataloader, + train_dataloader=DataLoader( + RandomClassificationDataset(**dataset_kwargs), + batch_size=16, + ), + eval_dataloader=DataLoader( + RandomClassificationDataset(**dataset_kwargs), + batch_size=8, + ), max_duration=num_epochs, train_subset_num_batches=train_subset_num_batches, eval_subset_num_batches=eval_subset_num_batches, diff --git a/tests/models/test_composer_model.py b/tests/models/test_composer_model.py index c0f11505d7..8403aff7cc 100644 --- a/tests/models/test_composer_model.py +++ b/tests/models/test_composer_model.py @@ -7,8 +7,10 @@ import pytest import torch +from torch.utils.data import DataLoader from composer.trainer import Trainer +from tests.common.datasets import RandomClassificationDataset from tests.common.models import SimpleConvModel, SimpleModel @@ -17,37 +19,42 @@ def test_composermodel_torchscriptable(model): torch.jit.script(model()) -def test_model_access_to_logger(dummy_train_dataloader: Iterable): +@pytest.fixture() +def dataloader(): + return DataLoader(RandomClassificationDataset()) + + +def test_model_access_to_logger(dataloader: Iterable): model = SimpleModel(num_features=1, num_classes=1) assert model.logger is None - trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dummy_train_dataloader) + trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader) assert model.logger is trainer.logger -def test_model_deepcopy(dummy_train_dataloader: Iterable): +def test_model_deepcopy(dataloader: Iterable): model = SimpleModel(num_features=1, num_classes=1) assert model.logger is None - trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dummy_train_dataloader) + trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader) assert model.logger is not None copied_model = copy.deepcopy(trainer.state.model) assert copied_model.logger is model.logger assert model.num_classes == copied_model.num_classes -def test_model_copy(dummy_train_dataloader: Iterable): +def test_model_copy(dataloader: Iterable): model = SimpleModel(num_features=1, num_classes=1) assert model.logger is None - trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dummy_train_dataloader) + trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader) assert model.logger is not None copied_model = copy.copy(trainer.state.model) assert copied_model.logger is model.logger assert model.num_classes == copied_model.num_classes -def test_model_pickle(dummy_train_dataloader: Iterable): +def test_model_pickle(dataloader: Iterable): model = SimpleModel(num_features=1, num_classes=1) assert model.logger is None - trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dummy_train_dataloader) + trainer = Trainer(model=model, max_duration='1ep', train_dataloader=dataloader) assert model.logger is not None pickled_model = pickle.dumps(trainer.state.model) restored_model = pickle.loads(pickled_model) diff --git a/tests/optim/test_scheduler.py b/tests/optim/test_scheduler.py index 0dd9a1adf5..dad5a939a1 100644 --- a/tests/optim/test_scheduler.py +++ b/tests/optim/test_scheduler.py @@ -2,30 +2,30 @@ # SPDX-License-Identifier: Apache-2.0 import contextlib -from typing import Iterable, List, Optional, Type +from typing import List, Optional, Type import pytest -import torch -import torch.utils.data +from torch.utils.data import DataLoader from composer.core import State, Time from composer.core.time import TimeUnit -from composer.models.base import ComposerModel from composer.optim.scheduler import (ComposerScheduler, ConstantWithWarmupScheduler, CosineAnnealingScheduler, CosineAnnealingWarmRestartsScheduler, CosineAnnealingWithWarmupScheduler, ExponentialScheduler, LinearScheduler, LinearWithWarmupScheduler, MultiStepScheduler, MultiStepWithWarmupScheduler, PolynomialScheduler, PolynomialWithWarmupScheduler, StepScheduler) from composer.trainer.trainer import Trainer +from tests.common.datasets import RandomClassificationDataset +from tests.common.models import SimpleModel MAX_DURATION = '1000ep' STEPS_PER_EPOCH = 1000 @pytest.fixture -def dummy_schedulers_state(dummy_model: torch.nn.Module, rank_zero_seed: int): +def dummy_schedulers_state(rank_zero_seed: int): state = State( - model=dummy_model, + model=SimpleModel(), run_name='run_name', rank_zero_seed=rank_zero_seed, max_duration=MAX_DURATION, @@ -153,12 +153,16 @@ def test_scheduler_init(scheduler: ComposerScheduler, ssr: float, test_times: Li (lambda state: 0.01, 1.5, ValueError), # this should error since the ssr != 1.0 and the lambda doesn't support ssr ]) -def test_scheduler_trains(scheduler: ComposerScheduler, ssr: float, dummy_model: ComposerModel, rank_zero_seed: int, - dummy_train_dataloader: Iterable, should_raise: Optional[Type[Exception]]): +def test_scheduler_trains( + scheduler: ComposerScheduler, + ssr: float, + rank_zero_seed: int, + should_raise: Optional[Type[Exception]], +): with pytest.raises(should_raise) if should_raise is not None else contextlib.nullcontext(): trainer = Trainer( - model=dummy_model, - train_dataloader=dummy_train_dataloader, + model=SimpleModel(), + train_dataloader=DataLoader(RandomClassificationDataset()), max_duration='2ep', train_subset_num_batches=5, scale_schedule_ratio=ssr, diff --git a/tests/trainer/test_ddp_sync_strategy.py b/tests/trainer/test_ddp_sync_strategy.py index e3eb94a54f..f3f7a5735c 100644 --- a/tests/trainer/test_ddp_sync_strategy.py +++ b/tests/trainer/test_ddp_sync_strategy.py @@ -1,16 +1,18 @@ # Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 -from typing import Iterable, List, Optional +from typing import List, Optional import pytest import torch import torch.nn as nn from torch import Tensor +from torch.utils.data import DataLoader from composer.core import State from composer.trainer.ddp import ddp_sync_context, prepare_ddp_module from composer.utils import dist +from tests.common.datasets import RandomClassificationDataset class MinimalConditionalModel(nn.Module): @@ -45,8 +47,11 @@ def loss(self, output: Tensor, target: Tensor): pytest.param('forced_sync', ([-1, None, None], [-1, -1, None], [-1.5, -1.5, None]), id='forced_sync'), ]) @pytest.mark.world_size(2) -def test_ddp_sync_strategy(ddp_sync_strategy: str, expected_grads: List[List[Optional[float]]], - dummy_train_dataloader: Iterable, rank_zero_seed: int): +def test_ddp_sync_strategy( + ddp_sync_strategy: str, + expected_grads: List[List[Optional[float]]], + rank_zero_seed: int, +): original_model = MinimalConditionalModel() # ddp = DDP(backend="gloo", find_unused_parameters=True, sync_strategy=ddp_sync_strategy, timeout=5.) optimizer = torch.optim.SGD(original_model.parameters(), 0.1) @@ -57,7 +62,7 @@ def test_ddp_sync_strategy(ddp_sync_strategy: str, expected_grads: List[List[Opt optimizers=optimizer, grad_accum=2, max_duration='1ep', - dataloader=dummy_train_dataloader, + dataloader=DataLoader(RandomClassificationDataset()), dataloader_label='train', precision='fp32', ) diff --git a/tests/trainer/test_scale_schedule.py b/tests/trainer/test_scale_schedule.py index 95064fd2b3..a7ef29e400 100644 --- a/tests/trainer/test_scale_schedule.py +++ b/tests/trainer/test_scale_schedule.py @@ -8,14 +8,15 @@ import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import ExponentialLR +from torch.utils.data import DataLoader +from composer import Trainer from composer.core import Callback, State, TimeUnit from composer.core.types import PyTorchScheduler from composer.loggers.logger import Logger from composer.optim import MultiStepScheduler -from composer.optim.optimizer_hparams_registry import SGDHparams from composer.trainer._scale_schedule import scale_pytorch_scheduler -from composer.trainer.trainer_hparams import TrainerHparams +from tests.common.datasets import RandomClassificationDataset from tests.common.models import SimpleModel @@ -104,18 +105,19 @@ class TestScaleScheduleTrainer(): def test_epochs_scaled( self, ssr: float, - composer_trainer_hparams: TrainerHparams, ): + model = SimpleModel() + optimizers = torch.optim.SGD(model.parameters(), lr=1.0) + trainer = Trainer( + model=model, + train_dataloader=DataLoader(RandomClassificationDataset()), + optimizers=optimizers, + schedulers=[MultiStepScheduler(milestones=['30ba', '50ba'], gamma=0.1)], + scale_schedule_ratio=ssr, + callbacks=[CheckScaleSchedule(ssr)], + max_duration='10ep', + ) - composer_trainer_hparams.optimizers = SGDHparams(lr=1.0) - composer_trainer_hparams.max_duration = '10ep' - composer_trainer_hparams.schedulers = [MultiStepScheduler(milestones=['30ba', '50ba'], gamma=0.1)] - - composer_trainer_hparams.scale_schedule_ratio = ssr - trainer = composer_trainer_hparams.initialize_object() - - trainer = composer_trainer_hparams.initialize_object() - trainer.state.callbacks.append(CheckScaleSchedule(ssr)) trainer.state.train_metrics = {} # avoid metrics construction assert trainer.state.max_duration is not None diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 495d1fb51d..c025321166 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -459,6 +459,7 @@ def test_precision( max_duration=max_duration, train_dataloader=train_dataloader, precision=precision, + device=device, ) if not should_error: @@ -470,6 +471,7 @@ def test_precision( model=copied_model, max_duration=max_duration, train_dataloader=train_dataloader, + device=device, ) with ctx: fit_trainer.fit(precision=precision) diff --git a/tests/trainer/test_trainer_eval.py b/tests/trainer/test_trainer_eval.py index 10a96b4009..d9c24de57d 100644 --- a/tests/trainer/test_trainer_eval.py +++ b/tests/trainer/test_trainer_eval.py @@ -11,11 +11,8 @@ from composer.core.evaluator import Evaluator, evaluate_periodically from composer.core.state import State from composer.core.time import Time, TimeUnit -from composer.datasets.evaluator_hparams import EvaluatorHparams from composer.trainer import Trainer -from composer.trainer.trainer_hparams import TrainerHparams from tests.common import EventCounterCallback, RandomClassificationDataset, SimpleModel -from tests.common.datasets import RandomClassificationDatasetHparams def test_eval(): @@ -218,52 +215,6 @@ def test_eval_params_init( assert event_counter_callback.event_to_num_calls[Event.EVAL_BATCH_START] == 1 -def test_eval_hparams(composer_trainer_hparams: TrainerHparams): - """Test that `eval_interval` and `eval_subset_num_batches` work when specified via hparams.""" - # Create the trainer from hparams - composer_trainer_hparams.eval_interval = '2ep' - composer_trainer_hparams.eval_subset_num_batches = 2 - composer_trainer_hparams.evaluators = [ - EvaluatorHparams( - label='eval1', - eval_interval='3ep', # will run, since eval_at_fit_end = True - subset_num_batches=1, - eval_dataset=RandomClassificationDatasetHparams(), - ), - EvaluatorHparams( - label='eval2', - eval_dataset=RandomClassificationDatasetHparams(), - metric_names=['Accuracy'], - ), - ] - composer_trainer_hparams.val_dataset = None - composer_trainer_hparams.callbacks = [EventCounterCallback()] - composer_trainer_hparams.max_duration = '2ep' - trainer = composer_trainer_hparams.initialize_object() - - # Validate that `subset_num_batches` was set correctly - assert trainer.state.evaluators[0].subset_num_batches == composer_trainer_hparams.evaluators[0].subset_num_batches - assert trainer.state.evaluators[1].subset_num_batches == composer_trainer_hparams.eval_subset_num_batches - - # Train the model - trainer.fit() - - # Validate that `eval_interval` and `subset_num_batches` was set correctly for the evaluator that actually - # ran - assert 'eval1' in trainer.state.eval_metrics - assert 'eval2' in trainer.state.eval_metrics - event_counter_callback = None - for callback in trainer.state.callbacks: - if isinstance(callback, EventCounterCallback): - event_counter_callback = callback - break - assert event_counter_callback is not None - assert event_counter_callback.event_to_num_calls[Event.EVAL_START] == 2 - # increment by one for the extra call to `Event.EVAL_BATCH_START` during the evaluation at FIT end. - assert event_counter_callback.event_to_num_calls[ - Event.EVAL_BATCH_START] == composer_trainer_hparams.eval_subset_num_batches + 1 - - def test_eval_params_evaluator(): """Test the `eval_subset_num_batches` and `eval_interval` works when specified as part of an evaluator.""" # Construct the trainer