From 3fa29e0dabc50e9457b90158a0741a1aa464dd4e Mon Sep 17 00:00:00 2001 From: Turingg <117662096+Turingg@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:15:55 +0800 Subject: [PATCH] [Docathon][Fix COPY-FROM No.11-15](#6349) --- .../api/paddle/distributed/fleet/Fleet_cn.rst | 151 +++--------------- docs/api/paddle/optimizer/Adadelta_cn.rst | 18 +-- docs/api/paddle/optimizer/AdamW_cn.rst | 18 +-- docs/api/paddle/optimizer/Adam_cn.rst | 17 +- docs/api/paddle/optimizer/Adamax_cn.rst | 17 +- 5 files changed, 25 insertions(+), 196 deletions(-) diff --git a/docs/api/paddle/distributed/fleet/Fleet_cn.rst b/docs/api/paddle/distributed/fleet/Fleet_cn.rst index de60da44cc4..595aabd6e0d 100644 --- a/docs/api/paddle/distributed/fleet/Fleet_cn.rst +++ b/docs/api/paddle/distributed/fleet/Fleet_cn.rst @@ -29,34 +29,19 @@ None **代码示例 1** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() +COPY-FROM: paddle.distributed.fleet.Fleet.init:code-init-example1 **代码示例 2** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init(is_collective=True) +COPY-FROM: paddle.distributed.fleet.Fleet.init:code-init-example2 **代码示例 3** -.. code-block:: python - - import paddle.distributed.fleet as fleet - role = fleet.PaddleCloudRoleMaker() - fleet.init(role) +COPY-FROM: paddle.distributed.fleet.Fleet.init:code-init-example3 **代码示例 4** -.. code-block:: python - - import paddle.distributed.fleet as fleet - strategy = fleet.DistributedStrategy() - fleet.init(strategy=strategy) - +COPY-FROM: paddle.distributed.fleet.Fleet.init:code-init-example4 is_first_worker() ''''''''' @@ -69,13 +54,7 @@ True/False **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.is_first_worker() - - +COPY-FROM: paddle.distributed.fleet.Fleet.is_first_worker worker_index() ''''''''' @@ -88,11 +67,7 @@ int **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.worker_index() +COPY-FROM: paddle.distributed.fleet.Fleet.worker_index worker_num() @@ -105,11 +80,7 @@ int **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.worker_num() +COPY-FROM: paddle.distributed.fleet.Fleet.worker_num is_worker() @@ -122,11 +93,7 @@ True/False **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.is_worker() +COPY-FROM: paddle.distributed.fleet.Fleet.is_worker worker_endpoints(to_string=False) @@ -139,11 +106,7 @@ list/string **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.worker_endpoints() +COPY-FROM: paddle.distributed.fleet.Fleet.worker_endpoints server_num() @@ -161,11 +124,7 @@ int **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.server_num() +COPY-FROM: paddle.distributed.fleet.Fleet.server_num server_index() @@ -185,11 +144,7 @@ int **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.server_index() +COPY-FROM: paddle.distributed.fleet.Fleet.server_index server_endpoints(to_string=False) @@ -208,11 +163,7 @@ list/string **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.server_endpoints() +COPY-FROM: paddle.distributed.fleet.Fleet.server_endpoints is_server() @@ -231,11 +182,7 @@ True/False **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.is_server() +COPY-FROM: paddle.distributed.fleet.Fleet.is_server barrier_worker() @@ -248,11 +195,7 @@ barrier_worker() **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.barrier_worker() +COPY-FROM: paddle.distributed.fleet.Fleet.barrier_worker init_worker() @@ -265,11 +208,7 @@ worker 节点在训练前的初始化,包括通信模块,参数同步等 **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.init_worker() +COPY-FROM: paddle.distributed.fleet.Fleet.init_worker init_server(*args, **kwargs) @@ -282,11 +221,7 @@ server 节点的初始化,包括 server 端参数初始化,模型加载等 **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.init_server() +COPY-FROM: paddle.distributed.fleet.Fleet.init_server run_server() @@ -299,12 +234,7 @@ server 节点的运行,此命令会将 ParameterServer 的进程启动并常 **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.init_server() - fleet.run_server() +COPY-FROM: paddle.distributed.fleet.Fleet.run_server stop_worker() @@ -317,13 +247,7 @@ stop_worker() **代码示例** -.. code-block:: python - - import paddle.distributed.fleet as fleet - fleet.init() - fleet.init_worker() - "..." - fleet.stop_worker() +COPY-FROM: paddle.distributed.fleet.Fleet.stop_worker save_inference_model(executor, dirname, feeded_var_names, target_vars, main_program=None, export_for_deployment=True) @@ -348,20 +272,7 @@ save_inference_model(executor, dirname, feeded_var_names, target_vars, main_prog **代码示例** -.. code-block:: text - - import paddle - paddle.enable_static() - import paddle.distributed.fleet as fleet - - fleet.init() - - # build net - # loss = Net() - # fleet.distributed_optimizer(...) - - exe = paddle.static.Executor(paddle.CPUPlace()) - fleet.save_inference_model(exe, "dirname", ["feed_varname"], [loss], paddle.static.default_main_program()) +COPY-FROM: paddle.distributed.fleet.Fleet.save_inference_model save_persistables(executor, dirname, main_program=None) @@ -381,19 +292,7 @@ save_persistables(executor, dirname, main_program=None) **代码示例** -.. code-block:: text - - import paddle - paddle.enable_static() - import paddle.distributed.fleet as fleet - - fleet.init() - - # build net - # fleet.distributed_optimizer(...) - - exe = paddle.static.Executor(paddle.CPUPlace()) - fleet.save_persistables(exe, "dirname", paddle.static.default_main_program()) +COPY-FROM: paddle.distributed.fleet.Fleet.save_persistables distributed_optimizer(optimizer, strategy=None) @@ -408,15 +307,7 @@ distributed_optimizer(optimizer, strategy=None) **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - import paddle.distributed.fleet as fleet - fleet.init(is_collective=True) - strategy = fleet.DistributedStrategy() - optimizer = paddle.optimizer.SGD(learning_rate=0.001) - optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) +COPY-FROM: paddle.distributed.fleet.Fleet.distributed_optimizer qat_init(place, scope, test_program=None) diff --git a/docs/api/paddle/optimizer/Adadelta_cn.rst b/docs/api/paddle/optimizer/Adadelta_cn.rst index 7e8496129fc..426b59cee13 100644 --- a/docs/api/paddle/optimizer/Adadelta_cn.rst +++ b/docs/api/paddle/optimizer/Adadelta_cn.rst @@ -138,23 +138,7 @@ set_lr_scheduler(scheduler) **代码示例** -.. code-block:: python - import paddle - linear = paddle.nn.Linear(10, 10) - adadelta = paddle.optimizer.Adadelta(weight_decay=0.01, - learning_rate=0.1, parameters=linear.parameters()) - # set learning rate manually by class LRScheduler - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2,4,6], gamma=0.8) - adadelta.set_lr_scheduler(scheduler) - lr = adadelta.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.5 - # set learning rate manually by another LRScheduler - scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.1, step_size=5, gamma=0.6) - adadelta.set_lr_scheduler(scheduler) - lr = adadelta.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.1 +COPY-FROM: paddle.optimizer.Adadelta.set_lr_scheduler get_lr() ''''''''' diff --git a/docs/api/paddle/optimizer/AdamW_cn.rst b/docs/api/paddle/optimizer/AdamW_cn.rst index 76ceee8443e..3b87099affd 100755 --- a/docs/api/paddle/optimizer/AdamW_cn.rst +++ b/docs/api/paddle/optimizer/AdamW_cn.rst @@ -142,23 +142,7 @@ set_lr_scheduler(scheduler) **代码示例** -.. code-block:: python - import paddle - linear = paddle.nn.Linear(10, 10) - adam = paddle.optimizer.AdamW(weight_decay=0.01, - learning_rate=0.1, parameters=linear.parameters()) - # set learning rate manually by class LRScheduler - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2,4,6], gamma=0.8) - adam.set_lr_scheduler(scheduler) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.5 - # set learning rate manually by another LRScheduler - scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.1, step_size=5, gamma=0.6) - adam.set_lr_scheduler(scheduler) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.1 +COPY-FROM: paddle.optimizer.AdamW.set_lr_scheduler get_lr() ''''''''' diff --git a/docs/api/paddle/optimizer/Adam_cn.rst b/docs/api/paddle/optimizer/Adam_cn.rst index 4060c562311..3ac9a375498 100755 --- a/docs/api/paddle/optimizer/Adam_cn.rst +++ b/docs/api/paddle/optimizer/Adam_cn.rst @@ -161,22 +161,7 @@ set_lr_scheduler(scheduler) **代码示例** -.. code-block:: python - import paddle - linear = paddle.nn.Linear(10, 10) - adam = paddle.optimizer.Adam(0.1, parameters=linear.parameters()) - # set learning rate manually by class LRScheduler - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2,4,6], gamma=0.8) - adam.set_lr_scheduler(scheduler) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.5 - # set learning rate manually by another LRScheduler - scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.1, step_size=5, gamma=0.6) - adam.set_lr_scheduler(scheduler) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.1 +COPY-FROM: paddle.optimizer.Adam.set_lr_scheduler get_lr() ''''''''' diff --git a/docs/api/paddle/optimizer/Adamax_cn.rst b/docs/api/paddle/optimizer/Adamax_cn.rst index 71841790dfe..e3b1d366aa6 100755 --- a/docs/api/paddle/optimizer/Adamax_cn.rst +++ b/docs/api/paddle/optimizer/Adamax_cn.rst @@ -145,22 +145,7 @@ set_lr_scheduler(scheduler) **代码示例** -.. code-block:: python - import paddle - linear = paddle.nn.Linear(10, 10) - adam = paddle.optimizer.Adamax(0.1, parameters=linear.parameters()) - # set learning rate manually by class LRScheduler - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2,4,6], gamma=0.8) - adam.set_lr_scheduler(scheduler) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.5 - # set learning rate manually by another LRScheduler - scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.1, step_size=5, gamma=0.6) - adam.set_lr_scheduler(scheduler) - lr = adam.get_lr() - print("current lr is {}".format(lr)) - # current lr is 0.1 +COPY-FROM: paddle.optimizer.Adamax.set_lr_scheduler get_lr() '''''''''