Skip to content

Commit

Permalink
Always Link libtorch and libtorch_cpu to ensure the functionality for…
Browse files Browse the repository at this point in the history
… AOT mode (pytorch#127381)

Fix pytorch#126763: The root cause is that the produced library does not link any torch library because the vec ISA is invalid, and then it cannot run into another path without linking `libtorch` and `libtorch_cpu`.

https://github.com/pytorch/pytorch/blob/main/torch/_inductor/codecache.py#L1637-L1642

Pull Request resolved: pytorch#127381
Approved by: https://github.com/desertfire
  • Loading branch information
EikanWang authored and petrex committed Jun 5, 2024
1 parent bafa43c commit 5233c6c
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 8 deletions.
22 changes: 14 additions & 8 deletions test/inductor/test_aot_inductor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import torch
import torch._export
import torch._inductor
import torch._inductor.config
import torch.nn as nn
from torch._dynamo.testing import rand_strided, same
from torch._dynamo.utils import counters
Expand Down Expand Up @@ -1313,14 +1314,19 @@ def fn(a, b, alpha=1.0):
with self.assertRaises(RuntimeError):
torch._export.aot_compile(fn, args=(a, b), kwargs={"alpha": 2.0})

so_path = torch._export.aot_compile(
torch.ops.aten.add, args=(a, b), kwargs={"alpha": 2.0}, same_signature=False
)
kernel_runner = AOTIRunnerUtil.load_runner(self.device, so_path)
res = kernel_runner.run([a, b])
self.assertTrue(isinstance(res, list))
self.assertTrue(len(res) == 1)
self.assertEqual(fn(a, b, alpha=2.0), res[0])
for simdlen in [0, None]:
with torch._inductor.config.patch({"cpp.simdlen": simdlen}):
so_path = torch._export.aot_compile(
torch.ops.aten.add,
args=(a, b),
kwargs={"alpha": 2.0},
same_signature=False,
)
kernel_runner = AOTIRunnerUtil.load_runner(self.device, so_path)
res = kernel_runner.run([a, b])
self.assertTrue(isinstance(res, list))
self.assertTrue(len(res) == 1)
self.assertEqual(fn(a, b, alpha=2.0), res[0])

def test_buffer_mutation_2(self):
class Model(torch.nn.Module):
Expand Down
5 changes: 5 additions & 0 deletions torch/_inductor/codecache.py
Original file line number Diff line number Diff line change
Expand Up @@ -1717,6 +1717,11 @@ def get_include_and_linking_paths(
else:
libs = ["omp"] if config.is_fbcode() else ["gomp"]

# For AOT mode, the produced library relies on torch cpu to set grad mode
# like aoti_torch_grad_mode_set_enabled
if aot_mode and sys.platform == "linux" and not config.is_fbcode():
libs += ["torch", "torch_cpu"]

# Unconditionally import c10 for non-abi-compatible mode to use TORCH_CHECK - See PyTorch #108690
if not config.abi_compatible:
libs += ["c10"]
Expand Down

0 comments on commit 5233c6c

Please sign in to comment.