Skip to content

Commit

Permalink
[mpact][benchmark] Add new benchmarks (#36)
Browse files Browse the repository at this point in the history
* add benchmarks

* modify comments
  • Loading branch information
yinying-lisa-li authored Jun 17, 2024
1 parent d1a5ea0 commit 845dfbe
Showing 1 changed file with 66 additions and 51 deletions.
117 changes: 66 additions & 51 deletions benchmark/python/benchmarks/kernels_benchmark.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
import argparse
import numpy as np
from mpact.models.kernels import *
from mpact_benchmark.utils.benchmark_utils import benchmark, Backends


Expand All @@ -23,15 +24,7 @@
)
def matmul() -> torch.nn.Module:
"""Matrix multiplication."""

class Net(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x, y):
return torch.matmul(x, y)

return Net()
return MMNet()


@benchmark(
Expand All @@ -53,15 +46,7 @@ def forward(self, x, y):
)
def matvec() -> torch.nn.Module:
"""Matrix-vector multiplication."""

class Net(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x, y):
return torch.mv(x, y)

return Net()
return MVNet()


@benchmark(
Expand All @@ -85,15 +70,7 @@ def forward(self, x, y):
)
def add() -> torch.nn.Module:
"""Element-wise addition."""

class Net(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x, y):
return torch.add(x, y)

return Net()
return AddNet()


@benchmark(
Expand All @@ -115,15 +92,7 @@ def forward(self, x, y):
)
def elt_mul() -> torch.nn.Module:
"""Element-wise addition."""

class Net(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x, y):
return torch.mul(x, y)

return Net()
return MulNet()


@benchmark(
Expand All @@ -144,15 +113,7 @@ def forward(self, x, y):
)
def nop() -> torch.nn.Module:
"""Returns matrix unmodified (speed of light)."""

class Net(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x):
return x

return Net()
return SelfNet()


@benchmark(
Expand All @@ -175,15 +136,67 @@ def forward(self, x):
)
def sddmm() -> torch.nn.Module:
"""SDDMM: C = S ○ (A X B) Sampled dense-dense matrix-matrix multiplication."""
return SDDMMNet()


class Net(torch.nn.Module):
def __init__(self):
super().__init__()
@benchmark(
[
{
"name": f"{fmt}_{shape}_{dtype.__name__}",
"shape": shape,
"formats": (fmt,),
"dtype": dtype,
# TODO: add mpact and torch inductor once they work.
"backends": [
b
for b in Backends
if b.value
in (
Backends.TORCH_SPARSE_EAGER.value,
Backends.TORCH_DENSE_EAGER.value,
)
],
"drange": (1, 100),
"sparsity": [0, 0.5, 0.9, 0.99],
}
for shape in [([2**i, 2**i],) for i in range(5, 8)]
for fmt in ["dense"]
for dtype in [np.float64]
]
)
def feature_scale() -> torch.nn.Module:
"""Scales feature matrix in GNN."""
return FeatureScale()

def forward(self, x, y, z):
return torch.mul(x, torch.matmul(y, z))

return Net()
@benchmark(
[
{
"name": f"{fmt}_{shape}_{dtype.__name__}",
"shape": shape,
"formats": (fmt,),
"dtype": dtype,
# TODO: add mpact and torch inductor once they work.
"backends": [
b
for b in Backends
if b.value
in (
Backends.TORCH_SPARSE_EAGER.value,
Backends.TORCH_DENSE_EAGER.value,
)
],
"drange": (1, 100),
"sparsity": [0, 0.5, 0.9, 0.99],
}
for shape in [([2**i, 2**i],) for i in range(5, 8)]
for fmt in ["dense"]
for dtype in [np.float64]
]
)
def normalization() -> torch.nn.Module:
"""Normalizes adjacency matrix in GNN."""
return Normalization()


if __name__ == "__main__":
Expand All @@ -201,6 +214,8 @@ def forward(self, x, y, z):
"matvec",
"elt_mul",
"sddmm",
"feature_scale",
"normalization",
]
if arguments.benchmark_filter:
benchmark_list = arguments.benchmark_filter.split(",")
Expand Down

0 comments on commit 845dfbe

Please sign in to comment.