Skip to content

Commit

Permalink
Merge pull request #2292 from huggingface/inference_tests
Browse files Browse the repository at this point in the history
Do full inference test against test vectors for test_* models
  • Loading branch information
rwightman authored Oct 2, 2024
2 parents 44f1a34 + 1a2d8bb commit 14d55a7
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 8 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
python: ['3.10', '3.11']
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.1.0', vision: '0.16.0'}]
python: ['3.10', '3.12']
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.4.1', vision: '0.19.1'}]
testmarker: ['-k "not test_models"', '-m base', '-m cfg', '-m torchscript', '-m features', '-m fxforward', '-m fxbackward']
exclude:
- python: '3.11'
- python: '3.12'
torch: {base: '1.13.0', vision: '0.14.0'}
runs-on: ${{ matrix.os }}

Expand All @@ -46,7 +46,7 @@ jobs:
sudo sed -i 's/azure\.//' /etc/apt/sources.list
sudo apt update
sudo apt install -y google-perftools
pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu --index-url https://download.pytorch.org/whl/cpu
- name: Install requirements
run: |
pip install -r requirements.txt
Expand Down
53 changes: 49 additions & 4 deletions tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
has_fx_feature_extraction = False

import timm
from timm import list_models, create_model, set_scriptable, get_pretrained_cfg_value
from timm import list_models, list_pretrained, create_model, set_scriptable, get_pretrained_cfg_value
from timm.layers import Format, get_spatial_dim, get_channel_dim
from timm.models import get_notrace_modules, get_notrace_functions

Expand All @@ -39,7 +39,8 @@
torch_device = os.environ.get('TORCH_DEVICE', 'cpu')
timeout = os.environ.get('TIMEOUT')
timeout120 = int(timeout) if timeout else 120
timeout300 = int(timeout) if timeout else 300
timeout240 = int(timeout) if timeout else 240
timeout360 = int(timeout) if timeout else 360

if hasattr(torch._C, '_jit_set_profiling_executor'):
# legacy executor is too slow to compile large models for unit tests
Expand Down Expand Up @@ -118,6 +119,50 @@ def _get_input_size(model=None, model_name='', target=None):
return input_size


@pytest.mark.base
@pytest.mark.timeout(timeout240)
@pytest.mark.parametrize('model_name', list_pretrained('test_*'))
@pytest.mark.parametrize('batch_size', [1])
def test_model_inference(model_name, batch_size):
"""Run a single forward pass with each model"""
from PIL import Image
from huggingface_hub import snapshot_download
import tempfile
import safetensors

model = create_model(model_name, pretrained=True)
model.eval()
pp = timm.data.create_transform(**timm.data.resolve_data_config(model=model))

with tempfile.TemporaryDirectory() as temp_dir:
snapshot_download(
repo_id='timm/' + model_name, repo_type='model', local_dir=temp_dir, allow_patterns='test/*'
)
rand_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'rand_tensors.safetensors'))
owl_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'owl_tensors.safetensors'))
test_owl = Image.open(os.path.join(temp_dir, 'test', 'test_owl.jpg'))

with torch.no_grad():
rand_output = model(rand_tensors['input'])
rand_features = model.forward_features(rand_tensors['input'])
rand_pre_logits = model.forward_head(rand_features, pre_logits=True)
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4)
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4)
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4)

def _test_owl(owl_input):
owl_output = model(owl_input)
owl_features = model.forward_features(owl_input)
owl_pre_logits = model.forward_head(owl_features.clone(), pre_logits=True)
assert owl_output.softmax(1).argmax(1) == 24 # owl
assert torch.allclose(owl_output, owl_tensors['output'], rtol=1e-3, atol=1e-4)
assert torch.allclose(owl_features, owl_tensors['features'], rtol=1e-3, atol=1e-4)
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=1e-3, atol=1e-4)

_test_owl(owl_tensors['input']) # test with original pp owl tensor
_test_owl(pp(test_owl).unsqueeze(0)) # re-process from original jpg


@pytest.mark.base
@pytest.mark.timeout(timeout120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
Expand Down Expand Up @@ -182,7 +227,7 @@ def test_model_backward(model_name, batch_size):
)

@pytest.mark.cfg
@pytest.mark.timeout(timeout300)
@pytest.mark.timeout(timeout360)
@pytest.mark.parametrize('model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + NON_STD_FILTERS, include_tags=True))
@pytest.mark.parametrize('batch_size', [1])
Expand Down Expand Up @@ -260,7 +305,7 @@ def test_model_default_cfgs(model_name, batch_size):


@pytest.mark.cfg
@pytest.mark.timeout(timeout300)
@pytest.mark.timeout(timeout360)
@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_default_cfgs_non_std(model_name, batch_size):
Expand Down

0 comments on commit 14d55a7

Please sign in to comment.