diff --git a/fairscale/nn/data_parallel/fully_sharded_data_parallel.py b/fairscale/nn/data_parallel/fully_sharded_data_parallel.py index cdd6e6e8c..5584e6d02 100644 --- a/fairscale/nn/data_parallel/fully_sharded_data_parallel.py +++ b/fairscale/nn/data_parallel/fully_sharded_data_parallel.py @@ -1821,7 +1821,10 @@ def _post_reduction_hook(self, param: Parameter, reduced_grad: torch.Tensor) -> param._saved_grad_shard.data += reduced_grad.data reduced_grad = param._saved_grad_shard.data elif (param.grad is None) and self.fp32_reduce_scatter: - param.main_grad = reduced_grad.data + if getattr(param, "main_grad", None) is not None: + param.main_grad.add_(reduced_grad.data) + else: + param.main_grad = reduced_grad.data # Optionally move gradients to CPU, typically used if one is running the optimizer on the CPU. Once the full # backwards pass completes, we will set `.grad` to the CPU copy.