From 6e3e13cb280b684ebedb5c2aecb36efb545ebfce Mon Sep 17 00:00:00 2001 From: Logan Adams <114770087+loadams@users.noreply.github.com> Date: Fri, 13 Dec 2024 15:35:12 -0800 Subject: [PATCH] Remove warnings from autodoc and sphinx (#6788) Co-authored-by: Olatunji Ruwase --- deepspeed/runtime/fp16/onebit/zoadam.py | 4 +++- deepspeed/runtime/lr_schedules.py | 4 ++-- docs/code-docs/source/monitor.rst | 18 +++++++++--------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/deepspeed/runtime/fp16/onebit/zoadam.py b/deepspeed/runtime/fp16/onebit/zoadam.py index 803bd929742d..70282ec41714 100644 --- a/deepspeed/runtime/fp16/onebit/zoadam.py +++ b/deepspeed/runtime/fp16/onebit/zoadam.py @@ -12,9 +12,11 @@ class ZeroOneAdam(torch.optim.Optimizer): - """Implements the 0/1 Adam algorithm. Currently GPU-only. + """ + Implements the 0/1 Adam algorithm. Currently GPU-only. For usage example please see https://www.deepspeed.ai/tutorials/zero-one-adam/ For technical details please read https://arxiv.org/abs/2202.06009 + Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. diff --git a/deepspeed/runtime/lr_schedules.py b/deepspeed/runtime/lr_schedules.py index f25a19e8e499..899358e2c5ef 100755 --- a/deepspeed/runtime/lr_schedules.py +++ b/deepspeed/runtime/lr_schedules.py @@ -274,7 +274,7 @@ class LRRangeTest(object): """Sets the learning rate of each parameter group according to learning rate range test (LRRT) policy. The policy increases learning rate starting from a base value with a constant frequency, as detailed in - the paper `A disciplined approach to neural network hyper-parameters: Part1`_. + the paper `A disciplined approach to neural network hyper-parameters: Part 1 `_ LRRT policy is used for finding maximum LR that trains a model without divergence, and can be used to configure the LR boundaries for Cyclic LR schedules. @@ -379,7 +379,7 @@ class OneCycle(object): 1CLR policy changes the learning rate after every batch. `step` should be called after a batch has been used for training. - This implementation was adapted from the github repo: `pytorch/pytorch`_ + This implementation was adapted from the github repo: `PyTorch `_. Args: optimizer (Optimizer): Wrapped optimizer. diff --git a/docs/code-docs/source/monitor.rst b/docs/code-docs/source/monitor.rst index 694c72b9b870..b185ed433c1c 100644 --- a/docs/code-docs/source/monitor.rst +++ b/docs/code-docs/source/monitor.rst @@ -9,15 +9,15 @@ overview of what DeepSpeed will log automatically. :header: "Field", "Description", "Condition" :widths: 20, 20, 10 - `Train/Samples/train_loss`,The training loss.,None - `Train/Samples/lr`,The learning rate during training.,None - `Train/Samples/loss_scale`,The loss scale when training using `fp16`.,`fp16` must be enabled. - `Train/Eigenvalues/ModelBlockParam_{i}`,Eigen values per param block.,`eigenvalue` must be enabled. - `Train/Samples/elapsed_time_ms_forward`,The global duration of the forward pass.,`flops_profiler.enabled` or `wall_clock_breakdown`. - `Train/Samples/elapsed_time_ms_backward`,The global duration of the forward pass.,`flops_profiler.enabled` or `wall_clock_breakdown`. - `Train/Samples/elapsed_time_ms_backward_inner`,The backward time that does not include the gradient reduction time. Only in cases where the gradient reduction is not overlapped, if it is overlapped then the inner time should be about the same as the entire backward time.,`flops_profiler.enabled` or `wall_clock_breakdown`. - `Train/Samples/elapsed_time_ms_backward_allreduce`,The global duration of the allreduce operation.,`flops_profiler.enabled` or `wall_clock_breakdown`. - `Train/Samples/elapsed_time_ms_step`,The optimizer step time,`flops_profiler.enabled` or `wall_clock_breakdown`. + `Train/Samples/train_loss`,"The training loss.",None + `Train/Samples/lr`,"The learning rate during training.",None + `Train/Samples/loss_scale`,"The loss scale when training using `fp16`.",`fp16` must be enabled. + `Train/Eigenvalues/ModelBlockParam_{i}`,"Eigen values per param block.",`eigenvalue` must be enabled. + `Train/Samples/elapsed_time_ms_forward`,"The global duration of the forward pass.",`flops_profiler.enabled` or `wall_clock_breakdown`. + `Train/Samples/elapsed_time_ms_backward`,"The global duration of the forward pass.",`flops_profiler.enabled` or `wall_clock_breakdown`. + `Train/Samples/elapsed_time_ms_backward_inner`,"The backward time that does not include the gradient reduction time. Only in cases where the gradient reduction is not overlapped, if it is overlapped then the inner time should be about the same as the entire backward time.",`flops_profiler.enabled` or `wall_clock_breakdown`. + `Train/Samples/elapsed_time_ms_backward_allreduce`,"The global duration of the allreduce operation.",`flops_profiler.enabled` or `wall_clock_breakdown`. + `Train/Samples/elapsed_time_ms_step`,"The optimizer step time.",`flops_profiler.enabled` or `wall_clock_breakdown`. TensorBoard -----------