From 509b0d2277f3b6c119a78c61cf78ffb572b0ad41 Mon Sep 17 00:00:00 2001 From: Keyan Pishdadian Date: Tue, 5 Nov 2024 17:03:16 -0800 Subject: [PATCH] Don't display Cache Load Factor if compute kernel is not uvm caching (#2529) Summary: Pull Request resolved: https://github.com/pytorch/torchrec/pull/2529 This statistic may add confusion unless the `fused_uvm_caching` kernel is being used, see also: T187360685 Reviewed By: PaulZhang12 Differential Revision: D65231346 fbshipit-source-id: f5410d1183e5682b82a0256ae2eddb38b9f1c767 --- torchrec/distributed/planner/stats.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/torchrec/distributed/planner/stats.py b/torchrec/distributed/planner/stats.py index 9455b7549..bc3f090f9 100644 --- a/torchrec/distributed/planner/stats.py +++ b/torchrec/distributed/planner/stats.py @@ -16,6 +16,7 @@ from torch import nn +from torchrec.distributed.embedding_types import EmbeddingComputeKernel from torchrec.distributed.planner.constants import BIGINT_DTYPE, NUM_POOLINGS from torchrec.distributed.planner.shard_estimators import _calculate_shard_io_sizes from torchrec.distributed.planner.storage_reservations import ( @@ -421,11 +422,14 @@ def log( if hasattr(sharder, "fused_params") and sharder.fused_params else None ) - cache_load_factor = str( - so.cache_load_factor - if so.cache_load_factor is not None - else sharder_cache_load_factor - ) + cache_load_factor = "None" + # Surfacing cache load factor does not make sense if not using uvm caching. + if so.compute_kernel == EmbeddingComputeKernel.FUSED_UVM_CACHING.value: + cache_load_factor = str( + so.cache_load_factor + if so.cache_load_factor is not None + else sharder_cache_load_factor + ) hash_size = so.tensor.shape[0] param_table.append( [