Skip to content

Commit

Permalink
fix repeat_interleave benchmark bug
Browse files Browse the repository at this point in the history
  • Loading branch information
kiddyjinjin committed Nov 4, 2024
1 parent e2c1936 commit 9da92ff
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 4 deletions.
5 changes: 3 additions & 2 deletions benchmark/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,11 +177,12 @@ def setup_once(request):
print("\nThis is query mode; all benchmark functions will be skipped.")
else:
note_info = (
f"\n\nNote: The 'size' field below is for backward compatibility with previous versions of the benchmark. "
f"\nThis field will be removed in a future release."
f"\n\nNote: The 'size' field below is for backward compatibility with previous versions of the benchmark. "
f"\nThis field will be removed in a future release."
)
print(note_info)


@pytest.fixture()
def extract_and_log_op_attributes(request):
print("")
Expand Down
16 changes: 14 additions & 2 deletions benchmark/test_tensor_concat_perf.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,20 @@ def repeat_input_fn(shape, cur_dtype, device):

def repeat_interleave_self_input_fn(shape, dtype, device):
inp = generate_tensor_input(shape, dtype, device)
repeats = 3
yield inp, repeats
repeats = torch.randint(
low=0,
high=0x2F,
size=[
shape[0],
],
device=device,
)
dim = 0
# repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
yield inp, repeats, dim
if Config.bench_level == BenchLevel.COMPREHENSIVE:
# repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
yield inp, 3


@pytest.mark.parametrize(
Expand Down

0 comments on commit 9da92ff

Please sign in to comment.