From 82172b89a6bb38ee6b956272eb4183a71ab9ecb3 Mon Sep 17 00:00:00 2001 From: Eitan Turok Date: Tue, 24 Sep 2024 18:56:42 +0000 Subject: [PATCH] remove icecream --- tests/algorithms/test_algorithms_train.py | 4 +--- tests/checkpoint/test_state_dict.py | 3 +-- tests/common/models.py | 1 - 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/algorithms/test_algorithms_train.py b/tests/algorithms/test_algorithms_train.py index a9f4951ac4..a73a649b70 100644 --- a/tests/algorithms/test_algorithms_train.py +++ b/tests/algorithms/test_algorithms_train.py @@ -6,8 +6,7 @@ from composer import Algorithm, Trainer from composer.algorithms import GyroDropout, LayerFreezing from tests.algorithms.algorithm_settings import get_alg_dataloader, get_alg_kwargs, get_alg_model, get_algs_with_marks -from icecream import install -install() + @pytest.mark.gpu @pytest.mark.parametrize('alg_cls', get_algs_with_marks()) @@ -16,7 +15,6 @@ def test_algorithm_trains(alg_cls: type[Algorithm]): alg_kwargs = get_alg_kwargs(alg_cls) model = get_alg_model(alg_cls) dataloader = get_alg_dataloader(alg_cls) - ic(model, dataloader) trainer = Trainer( model=model, train_dataloader=dataloader, diff --git a/tests/checkpoint/test_state_dict.py b/tests/checkpoint/test_state_dict.py index feb5b9233a..33e6b2ae27 100644 --- a/tests/checkpoint/test_state_dict.py +++ b/tests/checkpoint/test_state_dict.py @@ -21,8 +21,7 @@ from tests.common.compare import deep_compare from tests.common.markers import world_size from tests.common.models import EvenSimplerMLP, SimpleComposerMLP, configure_tiny_gpt2_hf_model -from icecream import install -install() + @pytest.mark.gpu @pytest.mark.parametrize('use_composer_model', [True, False]) diff --git a/tests/common/models.py b/tests/common/models.py index e430353e9e..bdef2b0967 100644 --- a/tests/common/models.py +++ b/tests/common/models.py @@ -480,7 +480,6 @@ def loss(self, outputs: torch.Tensor, batch: tuple[Any, torch.Tensor], *args, ** def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None: _, targets = batch - ic(metric, outputs, targets) metric.update(outputs.squeeze(dim=0), targets.squeeze(dim=0)) def forward(self, batch: tuple[torch.Tensor, Any]) -> torch.Tensor: