Skip to content

Commit

Permalink
fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
fxmarty committed Jul 26, 2023
1 parent d1f160a commit c70a3db
Show file tree
Hide file tree
Showing 6 changed files with 5 additions and 91 deletions.
16 changes: 0 additions & 16 deletions tests/bettertransformer/test_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,22 +160,6 @@ def test_logits(self, model_type: str):
),
)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_autocast(self, model_type: str):
model_ids = (
MODELS_DICT[model_type] if isinstance(MODELS_DICT[model_type], tuple) else (MODELS_DICT[model_type],)
)
for model_id in model_ids:
self._test_raise_autocast(model_id, model_type=model_type)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_train(self, model_type: str):
model_ids = (
MODELS_DICT[model_type] if isinstance(MODELS_DICT[model_type], tuple) else (MODELS_DICT[model_type],)
)
for model_id in model_ids:
self._test_raise_train(model_id, model_type=model_type)

@parameterized.expand(grid_parameters(FULL_GRID))
def test_invert_modules(self, test_name: str, model_type: str, keep_original_model=False):
if model_type in ["hubert", "wav2vec2"] and keep_original_model is True:
Expand Down
5 changes: 0 additions & 5 deletions tests/bettertransformer/test_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,11 +181,6 @@ def test_generation(self, test_name: str, model_type: str, batch_size: int, padd
f" Maxdiff: {(result_vanilla - result_bettertransformer).abs().max()}",
)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_autocast(self, model_type: str):
model_id = MODELS_DICT[model_type]
self._test_raise_autocast(model_id, model_type=model_type)

@parameterized.expand(SUPPORTED_ARCH)
@pytest.mark.training
def test_train(self, model_type: str):
Expand Down
15 changes: 0 additions & 15 deletions tests/bettertransformer/test_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,21 +209,6 @@ def check_accelerate_compatibility_cpu_gpu(self, keep_original_model=True, max_m
self.assertTrue(torch.allclose(output_bt[0][1, 3:], torch.zeros_like(output_bt[0][1, 3:])))
gc.collect()

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_autocast(self, model_type: str):
if model_type == "rocbert":
self.skipTest(
"unrelated issue with torch.amp.autocast with rocbert (expected scalar type BFloat16 but found Float)"
)

model_id = MODELS_DICT[model_type]
self._test_raise_autocast(model_id, model_type)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_train(self, model_type: str):
model_id = MODELS_DICT[model_type]
self._test_raise_train(model_id, model_type)

@pytest.mark.gpu_test
@pytest.mark.accelerate_test
def test_accelerate_compatibility_cpu_gpu(self):
Expand Down
13 changes: 0 additions & 13 deletions tests/bettertransformer/test_encoder_decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,19 +89,6 @@ def test_logits_backward(self, test_name: str, model_type: str, padding, max_len
model_id = MODELS_DICT[model_type]
self._test_logits_backward(model_id, model_type=model_type, padding=padding, max_length=max_length)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_autocast(self, model_type: str):
model_id = MODELS_DICT[model_type]
self._test_raise_autocast(model_id, model_type=model_type)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_train(self, model_type: str):
model_id = MODELS_DICT[model_type]
if model_type not in ["blenderbot", "pegasus", "t5"]:
self._test_raise_train(model_id, model_type=model_type)
else:
self._test_train_decoder(model_id, model_type=model_type)

@parameterized.expand(grid_parameters(FULL_GRID))
def test_invert_modules(self, test_name: str, model_type: str, keep_original_model=False):
model_id = MODELS_DICT[model_type]
Expand Down
12 changes: 0 additions & 12 deletions tests/bettertransformer/test_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,18 +73,6 @@ def test_logits(self, model_type: str):
model_id = MODELS_DICT[model_type]
self._test_logits(model_id, model_type=model_type)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_autocast(self, model_type: str):
model_id = MODELS_DICT[model_type]
self._test_raise_autocast(model_id, model_type=model_type)

@parameterized.expand(SUPPORTED_ARCH)
def test_raise_train(self, model_type: str):
if model_type in ["blip-2"]:
self.skipTest("can be trained")
model_id = MODELS_DICT[model_type]
self._test_raise_train(model_id, model_type=model_type)

@parameterized.expand(
grid_parameters(
{
Expand Down
35 changes: 5 additions & 30 deletions tests/bettertransformer/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,6 @@ class BetterTransformersTestMixin(unittest.TestCase):
- `test_logits`: This tests if the converted model produces the same logits
than the original model.
- `test_raise_on_save`: Test if the converion properly raises an error if someone tries to save the model using `save_pretrained`.
- `test_raise_autocast`: A tests that checks if the conversion raises an error if the model is run under
`torch.cuda.amp.autocast`.
- `test_raise_train`: A tests that checks if the conversion raises an error if the model is run in training mode.
"""

def prepare_inputs_for_class(self, model_id=None, model_type=None):
Expand Down Expand Up @@ -168,6 +165,7 @@ def _test_logits_backward(self, model_id: str, model_type: str, **preprocessor_k
# `torch.random.set_rng_state`. An alternative could be to make dropout stateful,
# and to replace them with a static pattern for this test. Currently, we use
# functional dropout though.
# We need to be in train mode to take the right path.
random_config = set_dropout_to_zero(random_config)

# m2m_100 randomly drops layers, which makes testing flaky (see `skip_the_layer` in transformers, some other models use it as well)
Expand Down Expand Up @@ -229,9 +227,13 @@ def _test_logits(self, model_id: str, model_type: str, **preprocessor_kwargs):
hf_random_model = AutoModel.from_pretrained(model_id).eval()
random_config = hf_random_model.config

hf_random_model = hf_random_model.eval()

torch.manual_seed(0)
converted_model = BetterTransformer.transform(hf_random_model, keep_original_model=True)

self.assertFalse(hf_random_model.training)
self.assertFalse(converted_model.training)
self.assertFalse(
hasattr(hf_random_model, "use_bettertransformer"),
f"The model {hf_random_model.__class__.__name__} has been converted to a `fast` model by mistake.",
Expand Down Expand Up @@ -290,33 +292,6 @@ def assert_equal(self, tensor1, tensor2, atol: float, model_name: str):
f" Maxdiff: {torch.abs(tensor1 - tensor2).max()}",
)

def _test_raise_autocast(self, model_id: str, model_type: str, **kwargs):
r"""
A tests that checks if the conversion raises an error if the model is run under
`torch.cuda.amp.autocast`.
"""
inputs = self.prepare_inputs_for_class(model_id=model_id, model_type=model_type, **kwargs)
hf_random_model = AutoModel.from_pretrained(model_id).eval()

# Check for the autocast on CPU
with self.assertRaises(ValueError), torch.amp.autocast("cpu"):
bt_model = BetterTransformer.transform(hf_random_model, keep_original_model=True)
_ = bt_model(**inputs)

def _test_raise_train(self, model_id: str, model_type: str, **kwargs):
r"""
A tests that checks if the conversion raises an error if the model is run under
`model.train()`.
"""
inputs = self.prepare_inputs_for_class(model_id=model_id, model_type=model_type, **kwargs)

hf_random_model = AutoModel.from_pretrained(model_id).eval()
# Check for training mode
with self.assertRaises(ValueError):
bt_model = BetterTransformer.transform(hf_random_model, keep_original_model=True)
bt_model.train()
_ = bt_model(**inputs)

def _test_train_decoder(self, model_id: str, model_type: str, **kwargs):
r"""
A tests that checks if the training works as expected for decoder models.
Expand Down

0 comments on commit c70a3db

Please sign in to comment.