From 2ef0e2014830883f315a5c95a151643760ba8c73 Mon Sep 17 00:00:00 2001 From: Xiaodong Wang Date: Wed, 25 Sep 2024 23:56:48 -0700 Subject: [PATCH] Change init method from fork-server to spawn (#2427) Summary: Pull Request resolved: https://github.com/pytorch/torchrec/pull/2427 It seems AMD GPU runtime doesn't quite work with forkserver for some reasons that we still need to debug. Basically hipMalloc will fail for subprocess. So just change it to spawn - I think it should be fine? Reviewed By: joebos Differential Revision: D63311340 --- torchrec/distributed/test_utils/multi_process.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/torchrec/distributed/test_utils/multi_process.py b/torchrec/distributed/test_utils/multi_process.py index 5cfd3339f..f3233e9b0 100644 --- a/torchrec/distributed/test_utils/multi_process.py +++ b/torchrec/distributed/test_utils/multi_process.py @@ -24,6 +24,11 @@ ) +# AMD's HIP runtime doesn't seem to work with forkserver; hipMalloc will fail +# Therefore we use spawn for HIP runtime until AMD fixes the issue +_MP_INIT_MODE = "forkserver" if torch.version.hip is None else "spawn" + + class MultiProcessContext: def __init__( self, @@ -126,7 +131,7 @@ def _run_multi_process_test( # pyre-ignore **kwargs, ) -> None: - ctx = multiprocessing.get_context("forkserver") + ctx = multiprocessing.get_context(_MP_INIT_MODE) processes = [] for rank in range(world_size): kwargs["rank"] = rank @@ -152,7 +157,7 @@ def _run_multi_process_test_per_rank( world_size: int, kwargs_per_rank: List[Dict[str, Any]], ) -> None: - ctx = multiprocessing.get_context("forkserver") + ctx = multiprocessing.get_context(_MP_INIT_MODE) processes = [] for rank in range(world_size): kwargs = {}