From 78b5acba1624ef45cc8e1af76a6e31d9dc76c0b1 Mon Sep 17 00:00:00 2001 From: Danielle Pintz Date: Mon, 18 Sep 2023 18:31:16 -0700 Subject: [PATCH] Update docstring of handle_sharded_tensor_elasticity Differential Revision: D49399892 --- torchsnapshot/manifest_ops.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/torchsnapshot/manifest_ops.py b/torchsnapshot/manifest_ops.py index 9e45849..138b037 100644 --- a/torchsnapshot/manifest_ops.py +++ b/torchsnapshot/manifest_ops.py @@ -126,8 +126,8 @@ def handle_sharded_tensor_elasticity( :class:`ShardedTensor` can be elastic in several ways: - A rank loads a portion of a sharded tensor different from what it saved - - A rank loads a sharded tensor that it did not participate in saving - - A rank doesn't load a sharded tensor that it participated in saving + - A rank loads a sharded tensor that did not participate in saving + - A rank doesn't load a sharded tensor that participated in saving The first scenario is taken care of by :func:`get_manifest_for_rank`, which makes all shards available to all instances of :class:`ShardedTensorEntry`. @@ -143,7 +143,7 @@ def handle_sharded_tensor_elasticity( NOTE: this function only takes effect if all sharded tensors are at the root of the state dict. This means the elastic behavior is supported for - most model but not supported for most optimizers. + most models but not supported for most optimizers. Args: manifest: The local manifest for the rank.