From 260a4f7a7acadd0a8adfa09288805a66f2277ecf Mon Sep 17 00:00:00 2001 From: qiuwenbo Date: Tue, 6 Jun 2023 18:11:51 +0800 Subject: [PATCH 01/22] =?UTF-8?q?[=E5=B0=9D=E8=AF=95]=20=E7=BB=99tensor?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E4=B8=80=E4=B8=AA=E5=B1=9E=E6=80=A7,=20?= =?UTF-8?q?=E8=BF=99=E4=B8=AA=E5=B1=9E=E6=80=A7=E6=98=AF=E4=B8=80=E4=B8=AA?= =?UTF-8?q?=E5=AE=9A=E5=80=BC=201?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/pybind/eager_properties.cc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 26b43442664e0..1a78e8027374b 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -285,6 +285,15 @@ PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } +PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { + EAGER_TRY + int grad_fn_value = 1; + return ToPyObject(grad_fn_value); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + + + struct PyGetSetDef variable_properties[] = { {"grad", (getter)tensor_properties_get_grad, @@ -320,6 +329,7 @@ struct PyGetSetDef variable_properties[] = { {"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr}, {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, {"is_leaf", (getter)tensor_properties_is_leaf, nullptr, nullptr, nullptr}, + {"grad_fn", (getter)tensor_properties_get_grad_fn, nullptr, nullptr, nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; // variable_properties for core.eager.StringTensor From 922f79f1e10680623ebfd98d2a976ce0b1b843aa Mon Sep 17 00:00:00 2001 From: qiuwenbo Date: Tue, 20 Jun 2023 18:10:07 +0800 Subject: [PATCH 02/22] =?UTF-8?q?=E6=9A=B4=E9=9C=B2gradnode=20=E5=B9=B6?= =?UTF-8?q?=E6=9E=84=E5=BB=BAgradnode=E6=96=B0=E7=9A=84=E6=96=B9=E6=B3=95(?= =?UTF-8?q?=E7=94=A8=E6=9D=A5=E6=B5=8B=E8=AF=95)=E8=BF=9B=E8=A1=8C?= =?UTF-8?q?=E6=9A=B4=E9=9C=B2=E7=BB=99python=20python=E7=AB=AF=E5=8F=AF?= =?UTF-8?q?=E4=BB=A5=E8=AE=BF=E9=97=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/eager/grad_node_info.h | 4 ++ paddle/fluid/pybind/eager_properties.cc | 52 ++++++++++++++++++++++++- paddle/fluid/pybind/pybind.cc | 8 ++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/eager/grad_node_info.h b/paddle/fluid/eager/grad_node_info.h index 19012ea644540..564a33f981a13 100644 --- a/paddle/fluid/eager/grad_node_info.h +++ b/paddle/fluid/eager/grad_node_info.h @@ -251,6 +251,10 @@ class GradNodeBase { return true; } + //增加一个方法 返回一个固定值1 作为测试 + int64_t GetNextHookId() { return 1; } + + /** * Apply GradientHook * **/ diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 1a78e8027374b..bd0557fc76ebe 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -285,7 +285,7 @@ PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } -PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { +PyObject* tensor_properties_qiutest(TensorObject* self, void* closure) { EAGER_TRY int grad_fn_value = 1; return ToPyObject(grad_fn_value); @@ -293,6 +293,50 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { } +PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { + EAGER_TRY + if (!self->tensor.defined()) { + // Handle undefined tensors if necessary; otherwise, return nullptr or an appropriate PyObject. + // In this case, I will return Py_None. + Py_INCREF(Py_None); + return Py_None; + } + + // Get GradNode from the tensor + auto meta = egr::EagerUtils::nullable_autograd_meta(self->tensor); // If meta exists, get the GradNode + + // 打印一下meta + std::cout << "meta is " << meta << std::endl; + + if (meta) { + // Get the GradNode from meta + auto grad_node = meta->GradNode(); // Convert GradNode to a Python object + // The conversion will depend on the structure of GradNode. + + // 实现一下将gradnode 对象转化成python对象 + if (!grad_node) { + Py_INCREF(Py_None); + return Py_None; + } + + //获取python 转换器 + py::object py_obj = py::cast(grad_node, py::return_value_policy::reference); + py::handle py_handle = py::handle(py_obj); + PyObject* py_grad_node = py_handle.ptr(); + Py_INCREF(py_grad_node); // 增加引用计数,以确保在返回时不会被销毁 + + + + return py_grad_node; + } else { + // If meta does not exist, return an appropriate Python object (e.g., None or a special value). + Py_INCREF(Py_None); + return Py_None; + } EAGER_CATCH_AND_THROW_RETURN_NULL +} + + + struct PyGetSetDef variable_properties[] = { {"grad", @@ -330,6 +374,7 @@ struct PyGetSetDef variable_properties[] = { {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, {"is_leaf", (getter)tensor_properties_is_leaf, nullptr, nullptr, nullptr}, {"grad_fn", (getter)tensor_properties_get_grad_fn, nullptr, nullptr, nullptr}, + {"qiutest", (getter)tensor_properties_qiutest, nullptr, nullptr, nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; // variable_properties for core.eager.StringTensor @@ -349,5 +394,10 @@ struct PyGetSetDef string_tensor_variable_properties[] = { nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; + } // namespace pybind } // namespace paddle + + + + diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index f4dfb133c1c36..cec5d1202b357 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -13,6 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include +#include "paddle/fluid/eager/grad_node_info.cc" + // Avoid a problem with copysign defined in pyconfig.h on Windows. #ifdef copysign #undef copysign @@ -758,6 +760,12 @@ PYBIND11_MODULE(libpaddle, m) { } }); + + py::class_(m, "GradNodeBase") + //GetNextHookId 暴露一个函数 一直返回一个固定的值 + .def("get_next_hook_id", &egr::GradNodeBase::GetNextHookId); + + #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) m.def("cudnn_version", &platform::DnnVersion); m.def("gpu_memory_available", []() { From 4887161582a08c00aabd8ec7c81f50b96eb73ed0 Mon Sep 17 00:00:00 2001 From: qiuwenbo Date: Fri, 7 Jul 2023 14:21:45 +0800 Subject: [PATCH 03/22] =?UTF-8?q?=E5=BC=80=E5=8F=91grad=5Ffn=E3=80=81next?= =?UTF-8?q?=5Ffunctions=E4=B8=A4=E4=B8=AAAPI=20=E5=B9=B6=E6=9A=B4=E9=9C=B2?= =?UTF-8?q?=E5=88=B0python=E7=AB=AF-=20=E5=81=9A=E4=B8=80=E4=BA=9B?= =?UTF-8?q?=E8=A7=84=E8=8C=83=E5=8C=96=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/eager/grad_node_info.cc | 16 +++++ paddle/fluid/eager/grad_node_info.h | 4 +- paddle/fluid/pybind/eager_properties.cc | 86 ++++++++++--------------- paddle/fluid/pybind/eager_utils.cc | 9 +++ paddle/fluid/pybind/eager_utils.h | 3 + paddle/fluid/pybind/pybind.cc | 11 ++-- 6 files changed, 69 insertions(+), 60 deletions(-) diff --git a/paddle/fluid/eager/grad_node_info.cc b/paddle/fluid/eager/grad_node_info.cc index 9d1c76197508e..875ae1407bf09 100644 --- a/paddle/fluid/eager/grad_node_info.cc +++ b/paddle/fluid/eager/grad_node_info.cc @@ -559,4 +559,20 @@ void GradNodeBase::HandleComplexGradToRealGrad( } } +std::vector> GradNodeBase::NextFunctions() { + std::vector> next_nodes; + const paddle::small_vector, kSlotSmallVectorSize>& + metas = OutputMeta(); + + for (const auto& meta_list : metas) { + for (const GradSlotMeta& meta : meta_list) { + const auto& edge = meta.GetEdge(); + std::shared_ptr next_node = edge.GetMutableGradNode(); + next_nodes.push_back(next_node); + } + } + + return next_nodes; +} + } // namespace egr diff --git a/paddle/fluid/eager/grad_node_info.h b/paddle/fluid/eager/grad_node_info.h index 564a33f981a13..b516d5cf84e8c 100644 --- a/paddle/fluid/eager/grad_node_info.h +++ b/paddle/fluid/eager/grad_node_info.h @@ -251,9 +251,7 @@ class GradNodeBase { return true; } - //增加一个方法 返回一个固定值1 作为测试 - int64_t GetNextHookId() { return 1; } - + std::vector> NextFunctions(); /** * Apply GradientHook diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index bd0557fc76ebe..8c5dbfce0437d 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/fluid/eager/api/utils/tensor_utils.h" #include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/utils.h" +#include "paddle/fluid/imperative/op_base.h" #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/enforce.h" @@ -32,6 +33,7 @@ limitations under the License. */ #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/dense_tensor.h" + #pragma GCC diagnostic ignored "-Wwrite-strings" namespace paddle { @@ -285,59 +287,41 @@ PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } -PyObject* tensor_properties_qiutest(TensorObject* self, void* closure) { - EAGER_TRY - int grad_fn_value = 1; - return ToPyObject(grad_fn_value); - EAGER_CATCH_AND_THROW_RETURN_NULL -} - - PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { - EAGER_TRY - if (!self->tensor.defined()) { - // Handle undefined tensors if necessary; otherwise, return nullptr or an appropriate PyObject. - // In this case, I will return Py_None. - Py_INCREF(Py_None); - return Py_None; - } - - // Get GradNode from the tensor - auto meta = egr::EagerUtils::nullable_autograd_meta(self->tensor); // If meta exists, get the GradNode - - // 打印一下meta - std::cout << "meta is " << meta << std::endl; - - if (meta) { - // Get the GradNode from meta - auto grad_node = meta->GradNode(); // Convert GradNode to a Python object - // The conversion will depend on the structure of GradNode. + EAGER_TRY + if (!self->tensor.defined()) { + // Handle undefined tensors if necessary; otherwise, return nullptr or an + // appropriate PyObject. In this case, I will return Py_None. + Py_INCREF(Py_None); + return Py_None; + } - // 实现一下将gradnode 对象转化成python对象 - if (!grad_node) { - Py_INCREF(Py_None); - return Py_None; - } + // Get GradNode from the tensor + auto meta = egr::EagerUtils::nullable_autograd_meta( + self->tensor); // If meta exists, get the GradNode - //获取python 转换器 - py::object py_obj = py::cast(grad_node, py::return_value_policy::reference); - py::handle py_handle = py::handle(py_obj); - PyObject* py_grad_node = py_handle.ptr(); - Py_INCREF(py_grad_node); // 增加引用计数,以确保在返回时不会被销毁 + if (meta) { + // Get the GradNode from meta + auto grad_node = meta->GradNode(); // Convert GradNode to a Python object + // The conversion will depend on the structure of GradNode. + if (!grad_node) { + Py_INCREF(Py_None); + return Py_None; + } + PyObject* py_grad_node = ToPyObject(grad_node); - return py_grad_node; - } else { - // If meta does not exist, return an appropriate Python object (e.g., None or a special value). - Py_INCREF(Py_None); - return Py_None; - } EAGER_CATCH_AND_THROW_RETURN_NULL + return py_grad_node; + } else { + // If meta does not exist, return an appropriate Python object (e.g., None + // or a special value). + Py_INCREF(Py_None); + return Py_None; + } + EAGER_CATCH_AND_THROW_RETURN_NULL } - - - struct PyGetSetDef variable_properties[] = { {"grad", (getter)tensor_properties_get_grad, @@ -373,8 +357,11 @@ struct PyGetSetDef variable_properties[] = { {"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr}, {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr}, {"is_leaf", (getter)tensor_properties_is_leaf, nullptr, nullptr, nullptr}, - {"grad_fn", (getter)tensor_properties_get_grad_fn, nullptr, nullptr, nullptr}, - {"qiutest", (getter)tensor_properties_qiutest, nullptr, nullptr, nullptr}, + {"grad_fn", + (getter)tensor_properties_get_grad_fn, + nullptr, + nullptr, + nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; // variable_properties for core.eager.StringTensor @@ -394,10 +381,5 @@ struct PyGetSetDef string_tensor_variable_properties[] = { nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr}}; - } // namespace pybind } // namespace paddle - - - - diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 0312ad8d96041..de323996f0bd0 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -988,6 +988,15 @@ paddle::optional GetOptionalTensorFromArgs( } } + +PyObject* ToPyObject(egr::GradNodeBase* grad_node) { + py::object py_obj = py::cast(grad_node, py::return_value_policy::reference); + py::handle py_handle = py::handle(py_obj); + PyObject* py_grad_node = py_handle.ptr(); + Py_INCREF(py_grad_node); + return py_grad_node; +} + static paddle::Tensor& GetTensorFromPyObject(const std::string& op_type, const std::string& arg_name, PyObject* obj, diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 8f83e8f880f07..3d4b907f35648 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -37,6 +37,7 @@ typedef SSIZE_T ssize_t; #include "paddle/utils/pybind.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" +#include "paddle/fluid/eager/grad_node_info.h" namespace paddle { class CustomOpKernelContext; namespace framework { @@ -115,6 +116,8 @@ PyObject* ToPyObject( const std::unordered_map>& value); PyObject* ToPyObject(const paddle::framework::Vocab& value); +PyObject* ToPyObject(egr::GradNodeBase* grad_node); + class PyTensorHook : public egr::TensorHook { public: explicit PyTensorHook(PyObject* func) : py_func_(func) { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index cec5d1202b357..e258e501161e5 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/fluid/eager/grad_node_info.cc" +#include "paddle/fluid/eager/grad_node_info.h" // Avoid a problem with copysign defined in pyconfig.h on Windows. #ifdef copysign @@ -760,11 +760,12 @@ PYBIND11_MODULE(libpaddle, m) { } }); - py::class_(m, "GradNodeBase") - //GetNextHookId 暴露一个函数 一直返回一个固定的值 - .def("get_next_hook_id", &egr::GradNodeBase::GetNextHookId); - + .def("name", &egr::GradNodeBase::name) + .def_property_readonly("next_functions", + &egr::GradNodeBase::NextFunctions) + .def("input_meta", &egr::GradNodeBase::InputMeta) + .def("output_meta", &egr::GradNodeBase::OutputMeta); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) m.def("cudnn_version", &platform::DnnVersion); From f54aff8001581f5f453c00b989197bf63fd9d14e Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Fri, 7 Jul 2023 06:50:32 +0000 Subject: [PATCH 04/22] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E4=B8=80=E4=B8=AA?= =?UTF-8?q?=E5=8D=95=E5=85=83=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test_grad_fn_and_next_functions.py | 93 +++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py diff --git a/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py b/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py new file mode 100644 index 0000000000000..f21df257af5a7 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py @@ -0,0 +1,93 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import paddle +import paddle.nn as nn + +class Testmodel(nn.Layer): + def __init__(self): + super(Testmodel, self).__init__() + + def forward(self, x): + y = x ** 2 + y = x + y + return y + +class TestAnonmousSurvey(unittest.TestCase): + + def init_graph(self): + """ define reversed graph + + func_name [str]: represents the name of the operator node + next_funcs [dict]: represents the operator node + """ + self.grad_fn_1 = { + "func_name": "GradNodeAccumulation", + "next_funcs": {} + } + self.grad_fn_2 = { + "func_name": "PowGradNode", + "next_funcs": { + "GradNodeAccumulation": self.grad_fn_1 + } + } + self.grad_fn_3 = { + "func_name": "AddGradNode", + "next_funcs": { + "GradNodeAccumulation": self.grad_fn_1, + "PowGradNode": self.grad_fn_2 + } + } + self.output_grad_fn = { + "grad_fn": self.grad_fn_3 + } + + def init_data(self): + """ define output of model + + the final output will be saved self.output + """ + model = Testmodel() + x = paddle.randn([1, 3, 24, 24]) + x.stop_gradient = False + self.output = model(x) + + + def setUp(self): + self.init_graph() + self.init_data() + + + def test_grad_fn_and_next_funs(self): + self.check_func(self.output.grad_fn, self.output_grad_fn["grad_fn"]) + + + def check_func(self, grad_fn, grad_fn_json): + """check each node + + :param grad_fn: grad_fn of node + :return grad_fn_json: gead_node_json of node + """ + # print(grad_fn.name()) + # assert func name + self.assertEqual(grad_fn.name(), grad_fn_json["func_name"]) + # Recursively test other nodes + if hasattr(grad_fn, 'next_functions') and grad_fn.next_functions[0]: + next_funcs_json = grad_fn_json["next_funcs"] + for u in grad_fn.next_functions: + self.check_func(u, next_funcs_json[u.name()]) + +unittest.main() \ No newline at end of file From fa9384c7827b77364152d564fdc15d50a993673f Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Fri, 7 Jul 2023 11:14:19 +0000 Subject: [PATCH 05/22] =?UTF-8?q?=E4=BC=98=E5=8C=96=20code-style?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/pybind/eager_utils.cc | 1 - paddle/fluid/pybind/eager_utils.h | 2 +- .../test_grad_fn_and_next_functions.py | 60 ++++++++++--------- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 74370b1b9477d..e365819928e66 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1006,7 +1006,6 @@ paddle::optional GetOptionalTensorFromArgs( } } - PyObject* ToPyObject(egr::GradNodeBase* grad_node) { py::object py_obj = py::cast(grad_node, py::return_value_policy::reference); py::handle py_handle = py::handle(py_obj); diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 08808a911705a..208d2f25e7d21 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -21,6 +21,7 @@ typedef SSIZE_T ssize_t; #undef copysign #endif +#include "paddle/fluid/eager/grad_node_info.h" #include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" @@ -37,7 +38,6 @@ typedef SSIZE_T ssize_t; #include "paddle/utils/pybind.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" -#include "paddle/fluid/eager/grad_node_info.h" #ifdef PADDLE_WITH_DISTRIBUTE #include "paddle/phi/core/distributed/auto_parallel/dist_attr.h" #include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h" diff --git a/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py b/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py index f21df257af5a7..d6858b7a08dd0 100644 --- a/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py +++ b/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py @@ -11,75 +11,76 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +""" +Test the tensor attribute grad_fn and the properties of the reverse node grad_node, such as next_function +""" import unittest import paddle import paddle.nn as nn + class Testmodel(nn.Layer): def __init__(self): super(Testmodel, self).__init__() def forward(self, x): - y = x ** 2 + y = x**2 y = x + y - return y + return y + class TestAnonmousSurvey(unittest.TestCase): - + """ + Test the tensor attribute grad_fn and the properties of the reverse node grad_node, such as next_function + + """ + def init_graph(self): - """ define reversed graph + """define reversed graph func_name [str]: represents the name of the operator node next_funcs [dict]: represents the operator node """ - self.grad_fn_1 = { - "func_name": "GradNodeAccumulation", - "next_funcs": {} - } + self.grad_fn_1 = {"func_name": "GradNodeAccumulation", "next_funcs": {}} self.grad_fn_2 = { "func_name": "PowGradNode", - "next_funcs": { - "GradNodeAccumulation": self.grad_fn_1 - } + "next_funcs": {"GradNodeAccumulation": self.grad_fn_1}, } self.grad_fn_3 = { "func_name": "AddGradNode", "next_funcs": { "GradNodeAccumulation": self.grad_fn_1, - "PowGradNode": self.grad_fn_2 - } - } - self.output_grad_fn = { - "grad_fn": self.grad_fn_3 + "PowGradNode": self.grad_fn_2, + }, } - + self.output_grad_fn = {"grad_fn": self.grad_fn_3} + def init_data(self): - """ define output of model - + """define output of model + the final output will be saved self.output """ model = Testmodel() x = paddle.randn([1, 3, 24, 24]) - x.stop_gradient = False + x.stop_gradient = False self.output = model(x) - - + def setUp(self): self.init_graph() self.init_data() - def test_grad_fn_and_next_funs(self): self.check_func(self.output.grad_fn, self.output_grad_fn["grad_fn"]) - - def check_func(self, grad_fn, grad_fn_json): - """check each node + def check_func(self, grad_fn: grad_fn, grad_fn_json: dict) -> None: + """ + Check each node, grad_fn is tensor attribute. grad_fn_json is structure of next_node. - :param grad_fn: grad_fn of node - :return grad_fn_json: gead_node_json of node + Args: + grad_fn (grad_fn): grad_fn of node + grad_fn_json (dict): grad_node_json of node """ # print(grad_fn.name()) # assert func name @@ -90,4 +91,5 @@ def check_func(self, grad_fn, grad_fn_json): for u in grad_fn.next_functions: self.check_func(u, next_funcs_json[u.name()]) -unittest.main() \ No newline at end of file + +unittest.main() From 1c3ad3969b03998f5b0a569384daa1fc4c36e69c Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Tue, 11 Jul 2023 06:23:03 +0000 Subject: [PATCH 06/22] =?UTF-8?q?=E5=B0=86=E5=8D=95=E4=BE=A7=E6=96=87?= =?UTF-8?q?=E4=BB=B6=E8=BF=81=E5=88=B0=E6=AD=A3=E7=A1=AE=E7=9A=84=E4=BD=8D?= =?UTF-8?q?=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../legacy_test}/test_grad_fn_and_next_functions.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {python/paddle/fluid/tests/unittests => test/legacy_test}/test_grad_fn_and_next_functions.py (100%) diff --git a/python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py similarity index 100% rename from python/paddle/fluid/tests/unittests/test_grad_fn_and_next_functions.py rename to test/legacy_test/test_grad_fn_and_next_functions.py From d7925f659418bcd0866fc75e2d74a83d832014f2 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Tue, 11 Jul 2023 06:41:34 +0000 Subject: [PATCH 07/22] =?UTF-8?q?=E4=BC=98=E5=8C=96=20code-style?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/legacy_test/test_grad_fn_and_next_functions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index d6858b7a08dd0..9078b0f2d42ae 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -18,12 +18,12 @@ import unittest import paddle -import paddle.nn as nn +from paddle import nn class Testmodel(nn.Layer): def __init__(self): - super(Testmodel, self).__init__() + super().__init__() def forward(self, x): y = x**2 @@ -74,7 +74,7 @@ def setUp(self): def test_grad_fn_and_next_funs(self): self.check_func(self.output.grad_fn, self.output_grad_fn["grad_fn"]) - def check_func(self, grad_fn: grad_fn, grad_fn_json: dict) -> None: + def check_func(self, grad_fn, grad_fn_json) -> None: """ Check each node, grad_fn is tensor attribute. grad_fn_json is structure of next_node. From 38a86457e9ecd49413a1c76f9641cfbb0a62b221 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Wed, 12 Jul 2023 11:28:57 +0000 Subject: [PATCH 08/22] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=97=A0=E7=94=A8?= =?UTF-8?q?=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/legacy_test/test_grad_fn_and_next_functions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index 9078b0f2d42ae..29407589d4034 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -82,8 +82,6 @@ def check_func(self, grad_fn, grad_fn_json) -> None: grad_fn (grad_fn): grad_fn of node grad_fn_json (dict): grad_node_json of node """ - # print(grad_fn.name()) - # assert func name self.assertEqual(grad_fn.name(), grad_fn_json["func_name"]) # Recursively test other nodes if hasattr(grad_fn, 'next_functions') and grad_fn.next_functions[0]: From 1059ae5af6429f5f279c674826ec7e08b0abbca2 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Thu, 13 Jul 2023 01:21:23 +0000 Subject: [PATCH 09/22] =?UTF-8?q?=E8=A7=A3=E5=86=B3=20=5F=5Fmain=5F=5F=20h?= =?UTF-8?q?as=20no=20attribute?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/legacy_test/test_grad_fn_and_next_functions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index 29407589d4034..531cdfa98a070 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -90,4 +90,5 @@ def check_func(self, grad_fn, grad_fn_json) -> None: self.check_func(u, next_funcs_json[u.name()]) -unittest.main() +if __name__ == "__main__": + unittest.main() From bcb5730514411696a09960abe962f981d3834d64 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Wed, 19 Jul 2023 01:27:45 +0000 Subject: [PATCH 10/22] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=8D=95=E4=BE=A7?= =?UTF-8?q?=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/legacy_test/test_grad_fn_and_next_functions.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index 531cdfa98a070..d16aeef30a08e 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -84,10 +84,11 @@ def check_func(self, grad_fn, grad_fn_json) -> None: """ self.assertEqual(grad_fn.name(), grad_fn_json["func_name"]) # Recursively test other nodes - if hasattr(grad_fn, 'next_functions') and grad_fn.next_functions[0]: - next_funcs_json = grad_fn_json["next_funcs"] - for u in grad_fn.next_functions: - self.check_func(u, next_funcs_json[u.name()]) + next_funcs_json = grad_fn_json["next_funcs"] + for u in grad_fn.next_functions: + if not u: + continue + self.check_func(u, next_funcs_json[u.name()]) if __name__ == "__main__": From 05e8edfead50fc154b3ee79d94ffb459f9d49f92 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Thu, 20 Jul 2023 01:46:16 +0000 Subject: [PATCH 11/22] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=8D=95=E4=BE=A7?= =?UTF-8?q?=E8=84=9A=E6=9C=AC-temp?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/legacy_test/test_grad_fn_and_next_functions.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index d16aeef30a08e..5464775001253 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -83,12 +83,6 @@ def check_func(self, grad_fn, grad_fn_json) -> None: grad_fn_json (dict): grad_node_json of node """ self.assertEqual(grad_fn.name(), grad_fn_json["func_name"]) - # Recursively test other nodes - next_funcs_json = grad_fn_json["next_funcs"] - for u in grad_fn.next_functions: - if not u: - continue - self.check_func(u, next_funcs_json[u.name()]) if __name__ == "__main__": From ba77d9abafaf57314ef9a2937a03a23abc245efd Mon Sep 17 00:00:00 2001 From: qiuwenbo Date: Sun, 23 Jul 2023 17:59:58 +0800 Subject: [PATCH 12/22] =?UTF-8?q?=E8=A7=A3=E5=86=B3=20grad=5Ffn=20next=5Ff?= =?UTF-8?q?unctions=20api=20=E6=8E=A5=E5=8F=A3=E5=AF=BC=E8=87=B4=E5=86=85?= =?UTF-8?q?=E5=AD=98=E5=BC=82=E5=B8=B8=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/pybind/eager_properties.cc | 13 ++++++------- paddle/fluid/pybind/eager_utils.cc | 6 +++--- paddle/fluid/pybind/eager_utils.h | 2 +- paddle/fluid/pybind/pybind.cc | 19 +++++++++++++------ 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 42c5b97067b0e..cb4373b1b3b49 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -318,17 +318,16 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { if (meta) { // Get the GradNode from meta - auto grad_node = meta->GradNode(); // Convert GradNode to a Python object - // The conversion will depend on the structure of GradNode. - - if (!grad_node) { - Py_INCREF(Py_None); - return Py_None; + auto grad_node_ptr = meta->GetMutableGradNode(); + if (!grad_node_ptr) { + Py_INCREF(Py_None); + return Py_None; } - PyObject* py_grad_node = ToPyObject(grad_node); + PyObject* py_grad_node = ToPyObject(grad_node_ptr); return py_grad_node; + } else { // If meta does not exist, return an appropriate Python object (e.g., None // or a special value). diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index e365819928e66..c3d45fbe55dbc 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1006,14 +1006,14 @@ paddle::optional GetOptionalTensorFromArgs( } } -PyObject* ToPyObject(egr::GradNodeBase* grad_node) { +PyObject* ToPyObject(std::shared_ptr grad_node) { py::object py_obj = py::cast(grad_node, py::return_value_policy::reference); - py::handle py_handle = py::handle(py_obj); - PyObject* py_grad_node = py_handle.ptr(); + PyObject* py_grad_node = py_obj.release().ptr(); Py_INCREF(py_grad_node); return py_grad_node; } + static paddle::Tensor& GetTensorFromPyObject(const std::string& op_type, const std::string& arg_name, PyObject* obj, diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 208d2f25e7d21..4a09c57bb1172 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -126,7 +126,7 @@ PyObject* ToPyObject( const std::unordered_map>& value); PyObject* ToPyObject(const paddle::framework::Vocab& value); -PyObject* ToPyObject(egr::GradNodeBase* grad_node); +PyObject* ToPyObject(std::shared_ptr grad_node); class PyTensorHook : public egr::TensorHook { public: diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 1efbf2a1a1ebc..21abd86a66004 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -778,12 +778,19 @@ PYBIND11_MODULE(libpaddle, m) { } }); - py::class_(m, "GradNodeBase") - .def("name", &egr::GradNodeBase::name) - .def_property_readonly("next_functions", - &egr::GradNodeBase::NextFunctions) - .def("input_meta", &egr::GradNodeBase::InputMeta) - .def("output_meta", &egr::GradNodeBase::OutputMeta); + py::class_>(m, "GradNodeBase") + .def("name", [](const std::shared_ptr& self){ + return self->name(); + }) + .def_property_readonly("next_functions", [](const std::shared_ptr& self){ + return self->NextFunctions(); + }) + .def("input_meta", [](const std::shared_ptr& self) { + return self->InputMeta(); + }) + .def("output_meta", [](const std::shared_ptr& self) { + return self->OutputMeta(); + }); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) m.def("cudnn_version", &platform::DnnVersion); From b5ebfaa4be2ed841333268cd1c6f63e3ee78e8a5 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Mon, 24 Jul 2023 05:28:45 +0000 Subject: [PATCH 13/22] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=8D=95=E6=B5=8B?= =?UTF-8?q?=E5=86=85=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/legacy_test/test_grad_fn_and_next_functions.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index 5464775001253..531cdfa98a070 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -83,6 +83,11 @@ def check_func(self, grad_fn, grad_fn_json) -> None: grad_fn_json (dict): grad_node_json of node """ self.assertEqual(grad_fn.name(), grad_fn_json["func_name"]) + # Recursively test other nodes + if hasattr(grad_fn, 'next_functions') and grad_fn.next_functions[0]: + next_funcs_json = grad_fn_json["next_funcs"] + for u in grad_fn.next_functions: + self.check_func(u, next_funcs_json[u.name()]) if __name__ == "__main__": From fd0c65597e840009cf9871b215ad4c4af730c096 Mon Sep 17 00:00:00 2001 From: qiuwenbo Date: Mon, 24 Jul 2023 20:26:14 +0800 Subject: [PATCH 14/22] =?UTF-8?q?=E8=A7=A3=E5=86=B3=20code-style=20?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/pybind/eager_properties.cc | 4 +-- paddle/fluid/pybind/eager_utils.cc | 3 +- paddle/fluid/pybind/pybind.cc | 31 +++++++++++-------- .../test_grad_fn_and_next_functions.py | 1 + 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index cb4373b1b3b49..66118fd11650e 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -320,8 +320,8 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) { // Get the GradNode from meta auto grad_node_ptr = meta->GetMutableGradNode(); if (!grad_node_ptr) { - Py_INCREF(Py_None); - return Py_None; + Py_INCREF(Py_None); + return Py_None; } PyObject* py_grad_node = ToPyObject(grad_node_ptr); diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index c3d45fbe55dbc..365debd88f82c 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1008,12 +1008,11 @@ paddle::optional GetOptionalTensorFromArgs( PyObject* ToPyObject(std::shared_ptr grad_node) { py::object py_obj = py::cast(grad_node, py::return_value_policy::reference); - PyObject* py_grad_node = py_obj.release().ptr(); + PyObject* py_grad_node = py_obj.release().ptr(); Py_INCREF(py_grad_node); return py_grad_node; } - static paddle::Tensor& GetTensorFromPyObject(const std::string& op_type, const std::string& arg_name, PyObject* obj, diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 759f5794210a2..504e1adf22569 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -778,19 +778,24 @@ PYBIND11_MODULE(libpaddle, m) { } }); - py::class_>(m, "GradNodeBase") - .def("name", [](const std::shared_ptr& self){ - return self->name(); - }) - .def_property_readonly("next_functions", [](const std::shared_ptr& self){ - return self->NextFunctions(); - }) - .def("input_meta", [](const std::shared_ptr& self) { - return self->InputMeta(); - }) - .def("output_meta", [](const std::shared_ptr& self) { - return self->OutputMeta(); - }); + py::class_>( + m, "GradNodeBase") + .def("name", + [](const std::shared_ptr &self) { + return self->name(); + }) + .def_property_readonly( + "next_functions", + [](const std::shared_ptr &self) { + return self->NextFunctions(); + }) + .def("input_meta", + [](const std::shared_ptr &self) { + return self->InputMeta(); + }) + .def("output_meta", [](const std::shared_ptr &self) { + return self->OutputMeta(); + }); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) m.def("cudnn_version", &platform::DnnVersion); diff --git a/test/legacy_test/test_grad_fn_and_next_functions.py b/test/legacy_test/test_grad_fn_and_next_functions.py index 5043e1fc42999..531cdfa98a070 100644 --- a/test/legacy_test/test_grad_fn_and_next_functions.py +++ b/test/legacy_test/test_grad_fn_and_next_functions.py @@ -89,5 +89,6 @@ def check_func(self, grad_fn, grad_fn_json) -> None: for u in grad_fn.next_functions: self.check_func(u, next_funcs_json[u.name()]) + if __name__ == "__main__": unittest.main() From a442c1cb2f1eb0e6e0a36b1e7b5876e9ea4da8d2 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Thu, 31 Aug 2023 02:21:56 +0000 Subject: [PATCH 15/22] =?UTF-8?q?=E5=88=9D=E7=89=88=E6=9C=AC-=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E8=8A=82=E7=82=B9=E6=8C=87=E9=92=88=E5=80=BC=E8=8E=B7?= =?UTF-8?q?=E5=8F=96=E6=8E=A5=E5=8F=A3,=20=E5=A2=9E=E5=8A=A0=E6=89=93?= =?UTF-8?q?=E5=8D=B0=E6=9E=84=E5=BB=BA=E8=BE=B9=E9=9C=80=E8=A6=81=E7=9A=84?= =?UTF-8?q?=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../fluid/eager/accumulation/accumulation_node.cc | 14 +++++++++++--- .../auto_code_generator/generator/eager_gen.py | 11 +++++++++-- paddle/fluid/eager/grad_node_info.cc | 6 ++++++ paddle/fluid/eager/grad_node_info.h | 2 ++ paddle/fluid/eager/utils.h | 6 +++--- paddle/fluid/pybind/pybind.cc | 2 ++ 6 files changed, 33 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index dd0cb9b43e688..f3658094a717a 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -132,20 +132,28 @@ GradNodeAccumulation::operator()( if (ReduceHooksRegistered()) { ApplyReduceHooks(); } + + std::stringstream ss; + ss << this; + std::string this_pointer = ss.str(); + VLOG(3) << "Finish AD API Grad: GradNodeAccumulation"; - if (VLOG_IS_ON(4)) { + VLOG(6) << "gradnode_ptr = " << this; + if (VLOG_IS_ON(6)) { const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } "; std::string input_str = ""; std::string output_str = ""; + const char* TENSOR_OUT_GRAD_TEMPLATE = "(grads[0][0], [%s]), "; std::string input_out_grad_str = paddle::string::Sprintf( - TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0])); + TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0])); + input_str += input_out_grad_str; const char* TENSOR_X_GRAD_TEMPLATE = "(grad_out, [%s]), "; std::string output_x_grad_str = paddle::string::Sprintf( TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); output_str += output_x_grad_str; - VLOG(4) << paddle::string::Sprintf( + VLOG(6) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } return {{grad_out}}; diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index a90f73c8209c6..186956402fa2f 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -176,6 +176,11 @@ class {} : public egr::GradNodeBase {{ GRAD_FUNCTION_TEMPLATE = """ paddle::small_vector, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{ VLOG(3) << \"Running AD API GRAD: \" << \"{}\"; + //std::cout << "The pointer to the current object: " << this << std::endl; + //std::stringstream ss; + //ss << this; + //std::string this_pointer = ss.str(); + // Fill Zero For GradIn Tensors {} // Apply Gradient Hooks @@ -204,7 +209,9 @@ class {} : public egr::GradNodeBase {{ // Create Grad Node {} VLOG(4) << \"Finish AD API GRAD: {}"; + VLOG(6) << "gradnode_ptr = " << this; // LOG IF DEBUG + {} // Return {} @@ -259,10 +266,10 @@ class {} : public egr::GradNodeBase {{ """ AFTER_LOG_PRINT_TEMPLATE = """ - if(VLOG_IS_ON(4)){{ + if(VLOG_IS_ON(6)){{ const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], \\n Output: [%s] }} \"; {} - VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); }} """ diff --git a/paddle/fluid/eager/grad_node_info.cc b/paddle/fluid/eager/grad_node_info.cc index 1fe6aba3c7bb3..d24c1d37794bc 100644 --- a/paddle/fluid/eager/grad_node_info.cc +++ b/paddle/fluid/eager/grad_node_info.cc @@ -568,6 +568,8 @@ std::vector> GradNodeBase::NextFunctions() { for (const GradSlotMeta& meta : meta_list) { const auto& edge = meta.GetEdge(); std::shared_ptr next_node = edge.GetMutableGradNode(); + //打印一下 next_node 对象的指针值 + std::cout << "next_node: " << next_node << std::endl; next_nodes.push_back(next_node); } } @@ -575,4 +577,8 @@ std::vector> GradNodeBase::NextFunctions() { return next_nodes; } +uintptr_t GradNodeBase::GetThisPtr() const { + return reinterpret_cast(this); +} + } // namespace egr diff --git a/paddle/fluid/eager/grad_node_info.h b/paddle/fluid/eager/grad_node_info.h index b516d5cf84e8c..838be85646bd4 100644 --- a/paddle/fluid/eager/grad_node_info.h +++ b/paddle/fluid/eager/grad_node_info.h @@ -253,6 +253,8 @@ class GradNodeBase { std::vector> NextFunctions(); + uintptr_t GetThisPtr() const; + /** * Apply GradientHook * **/ diff --git a/paddle/fluid/eager/utils.h b/paddle/fluid/eager/utils.h index 1cdb98b3ddbbe..ba03d9792a434 100644 --- a/paddle/fluid/eager/utils.h +++ b/paddle/fluid/eager/utils.h @@ -255,7 +255,7 @@ class EagerUtils { } if (VLOG_IS_ON(11)) { const char* TENSOR_PRINT_TEMPLATE = - "{Name: %s, Initialized: %d, Ptr: %d " + "{Name: %s, Initialized: %d, Ptr: %d, " "TensorInfo: [ %s ], Value:[ %s ], ADInfo:[ %s ]}"; auto* ad_meta = nullable_autograd_meta(t); if (ad_meta && (ad_meta->WeakGrad().lock().get())) { @@ -306,7 +306,7 @@ class EagerUtils { } } else if (VLOG_IS_ON(6)) { const char* TENSOR_PRINT_TEMPLATE = - "{Name: %s, Initialized: %d, Ptr: %d " + "{Name: %s, Initialized: %d, Ptr: %d," "TensorInfo: [ %s ], ADInfo:[ %s ]}"; auto* ad_meta = nullable_autograd_meta(t); if (ad_meta && (ad_meta->WeakGrad().lock().get())) { @@ -333,7 +333,7 @@ class EagerUtils { } } else if (VLOG_IS_ON(5)) { const char* TENSOR_PRINT_TEMPLATE = - "{Name: %s, Initialized: %d , Ptr: %d " + "{Name: %s, Initialized: %d , Ptr: %d, " "TensorInfo: [ %s ]}"; return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE, tensor_name_str, diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 504e1adf22569..ec8243bcacc7b 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -789,6 +789,8 @@ PYBIND11_MODULE(libpaddle, m) { [](const std::shared_ptr &self) { return self->NextFunctions(); }) + + .def("node_this_ptr", &egr::GradNodeBase::GetThisPtr) .def("input_meta", [](const std::shared_ptr &self) { return self->InputMeta(); From ca6a064c3439978fa7f69ab9c51212d2836274e8 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Sat, 9 Sep 2023 11:33:34 +0000 Subject: [PATCH 16/22] =?UTF-8?q?=E5=A2=9E=E5=8A=A0eager=5Fmanual=20?= =?UTF-8?q?=E4=B8=AD=E5=8F=8D=E5=90=91=E8=8A=82=E7=82=B9=E7=9A=84=E6=89=93?= =?UTF-8?q?=E5=8D=B0=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../manual/eager_manual/nodes/add_n_node.cc | 24 +++++ .../manual/eager_manual/nodes/conv2d_nodes.cc | 87 +++++++++++++++++++ .../eager_manual/nodes/multiply_node.cc | 5 +- .../nodes/sync_batch_norm_node.cc | 5 +- 4 files changed, 117 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc index 424d71a4cec89..b91d3c78ef5fa 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc @@ -72,6 +72,30 @@ AddNGradNodeFinal::operator()( egr::CheckTensorHasNanOrInf("add_n_grad", returns); } + VLOG(6) << "gradnode_ptr = " << this; + if (VLOG_IS_ON(6)) { + const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; + std::string input_str = ""; + std::string output_str = ""; + + const char *TENSOR_INPUT_TEMPLATE = " \n( x , [%s]), "; + std::string input_x_str = + paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(x)); + input_str += input_x_str; + + const char *TENSOR_OUT_GRAD_TEMPLATE = " \n( out_grad , [%s]), "; + std::string input_out_grad_str = + paddle::string::Sprintf(TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(out_grad)); + input_str += input_out_grad_str; + + const char *TENSOR_OUTPUT_TEMPLATE = " \n ( returns , [%s]), "; + std::string output_returns_str = paddle::string::Sprintf( + TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0])); + output_str += output_returns_str; + + VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + } + if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); return returns; } diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc index a7d00f8df1802..8294812e81007 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc @@ -76,6 +76,7 @@ Conv2dGradNodeFinal::operator()( // Inplace Strategy + // Call grad_api function VLOG(3) << "Final State Running: Conv2dGradNodeFinal"; @@ -162,6 +163,41 @@ Conv2dGradNodeFinal::operator()( // Set TensorWrappers for Forward Outputs if needed } + VLOG(6) << "gradnode_ptr = " << this; + if (VLOG_IS_ON(6)) { + const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; + std::string input_str = ""; + std::string output_str = ""; + + const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), "; + std::string input_input_str = + paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input)); + input_str += input_input_str; + + const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), "; + std::string input_filter_str = + paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter)); + input_str += input_filter_str; + + const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), "; + std::string input_grad_out_str = + paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); + input_str += input_grad_out_str; + + const char *TENSOR_GRAD_INPUT_TEMPLATE = " \n ( grad_input , [%s]), "; + std::string output_grad_input_str = paddle::string::Sprintf( + TENSOR_GRAD_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_input)); + output_str += output_grad_input_str; + + const char *TENSOR_GRAD_FILTER_TEMPLATE = " \n ( grad_filter , [%s]), "; + std::string output_grad_filter_str = paddle::string::Sprintf( + TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter)); + output_str += output_grad_filter_str; + + VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + } + + // Return if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); return returns; @@ -283,6 +319,57 @@ Conv2dDoubleGradNodeFinal::operator()( // Create Grad Node + + VLOG(6) << "gradnode_ptr = " << this; + if (VLOG_IS_ON(6)) { + const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; + std::string input_str = ""; + std::string output_str = ""; + + const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), "; + std::string input_input_str = + paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input)); + input_str += input_input_str; + + const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), "; + std::string input_filter_str = + paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter)); + input_str += input_filter_str; + + const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), "; + std::string input_grad_out_str = + paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); + input_str += input_grad_out_str; + + const char *TENSOR_GRAD_INPUT_GRAD_TEMPLATE = " \n( grad_input_grad , [%s]), "; + std::string input_grad_input_grad_str = + paddle::string::Sprintf(TENSOR_GRAD_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_input_grad)); + input_str += input_grad_input_grad_str; + + const char *TENSOR_GRAD_FILTER_GRAD_TEMPLATE = " \n( grad_filter_grad , [%s]), "; + std::string input_grad_filter_grad_str = + paddle::string::Sprintf(TENSOR_GRAD_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter_grad)); + input_str += input_grad_filter_grad_str; + + const char *TENSOR_INPUT_GRAD_TEMPLATE = " \n( input_grad , [%s]), "; + std::string output_input_grad_str = + paddle::string::Sprintf(TENSOR_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(input_grad)); + output_str += output_input_grad_str; + + const char *TENSOR_FILTER_GRAD_TEMPLATE = " \n( filter_grad , [%s]), "; + std::string output_filter_grad_str = + paddle::string::Sprintf(TENSOR_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(filter_grad)); + output_str += output_filter_grad_str; + + const char *TENSOR_GRAD_OUT_GRAD_TEMPLATE = " \n( grad_out_grad , [%s]) "; + std::string output_grad_out_grad_str = + paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out_grad)); + output_str += output_grad_out_grad_str; + + VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + } + + // Return if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); return returns; diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index f7a90c43e7d93..fe86fe4d5fbcb 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -566,10 +566,11 @@ MultiplyGradNode::operator()( "op. If you don't intend calculating higher order" "derivatives, please set `create_graph`to False.")); } - VLOG(4) << "Finish AD API GRAD: multiply_grad"; + VLOG(6) << "Finish AD API GRAD: multiply_grad"; // LOG IF DEBUG - if (VLOG_IS_ON(4)) { + VLOG(6) << "gradnode_ptr = " << this; + if (VLOG_IS_ON(6)) { const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc index f10d724eaa210..6b0c6816b3366 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc @@ -409,7 +409,8 @@ SyncBatchNormGradNode::operator()( VLOG(4) << "Finish AD API GRAD: sync_batch_norm_grad"; // LOG IF DEBUG - if (VLOG_IS_ON(4)) { + VLOG(6) << "gradnode_ptr = " << this; + if (VLOG_IS_ON(6)) { const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; @@ -457,7 +458,7 @@ SyncBatchNormGradNode::operator()( std::string output_bias_grad_str = paddle::string::Sprintf( TENSOR_BIAS_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(bias_grad)); output_str += output_bias_grad_str; - VLOG(4) << paddle::string::Sprintf( + VLOG(6) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } From 657412a64ad01c1f3fded62ea6c996e985c89689 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Thu, 14 Sep 2023 01:16:36 +0000 Subject: [PATCH 17/22] =?UTF-8?q?=E4=B8=8D=E4=BF=AE=E6=94=B9=E5=8E=9F?= =?UTF-8?q?=E6=9C=89=E7=9A=84=E6=97=A5=E5=BF=97=E7=AD=89=E7=BA=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../fluid/eager/accumulation/accumulation_node.cc | 7 ++++--- .../api/manual/eager_manual/nodes/add_n_node.cc | 6 +++--- .../api/manual/eager_manual/nodes/conv2d_nodes.cc | 13 ++++++------- .../api/manual/eager_manual/nodes/multiply_node.cc | 5 +++-- .../eager_manual/nodes/sync_batch_norm_node.cc | 7 ++++--- .../auto_code_generator/generator/eager_gen.py | 7 ++----- paddle/fluid/eager/grad_node_info.cc | 2 -- 7 files changed, 22 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index f3658094a717a..90e579741ea8e 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -138,8 +138,7 @@ GradNodeAccumulation::operator()( std::string this_pointer = ss.str(); VLOG(3) << "Finish AD API Grad: GradNodeAccumulation"; - VLOG(6) << "gradnode_ptr = " << this; - if (VLOG_IS_ON(6)) { + if (VLOG_IS_ON(4)) { const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } "; std::string input_str = ""; @@ -153,7 +152,9 @@ GradNodeAccumulation::operator()( std::string output_x_grad_str = paddle::string::Sprintf( TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); output_str += output_x_grad_str; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } return {{grad_out}}; diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc index b91d3c78ef5fa..733082b8ad8fa 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc @@ -72,8 +72,7 @@ AddNGradNodeFinal::operator()( egr::CheckTensorHasNanOrInf("add_n_grad", returns); } - VLOG(6) << "gradnode_ptr = " << this; - if (VLOG_IS_ON(6)) { + if (VLOG_IS_ON(4)) { const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; std::string output_str = ""; @@ -93,7 +92,8 @@ AddNGradNodeFinal::operator()( TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0])); output_str += output_returns_str; - VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); } if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc index 8294812e81007..113a6abf76a1f 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc @@ -76,7 +76,6 @@ Conv2dGradNodeFinal::operator()( // Inplace Strategy - // Call grad_api function VLOG(3) << "Final State Running: Conv2dGradNodeFinal"; @@ -163,8 +162,7 @@ Conv2dGradNodeFinal::operator()( // Set TensorWrappers for Forward Outputs if needed } - VLOG(6) << "gradnode_ptr = " << this; - if (VLOG_IS_ON(6)) { + if (VLOG_IS_ON(4)) { const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; std::string output_str = ""; @@ -194,7 +192,8 @@ Conv2dGradNodeFinal::operator()( TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter)); output_str += output_grad_filter_str; - VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); } @@ -320,8 +319,7 @@ Conv2dDoubleGradNodeFinal::operator()( // Create Grad Node - VLOG(6) << "gradnode_ptr = " << this; - if (VLOG_IS_ON(6)) { + if (VLOG_IS_ON(4)) { const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; std::string output_str = ""; @@ -366,7 +364,8 @@ Conv2dDoubleGradNodeFinal::operator()( paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out_grad)); output_str += output_grad_out_grad_str; - VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); } diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index fe86fe4d5fbcb..7c59029291247 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -569,8 +569,7 @@ MultiplyGradNode::operator()( VLOG(6) << "Finish AD API GRAD: multiply_grad"; // LOG IF DEBUG - VLOG(6) << "gradnode_ptr = " << this; - if (VLOG_IS_ON(6)) { + if (VLOG_IS_ON(4)) { const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; @@ -597,6 +596,8 @@ MultiplyGradNode::operator()( output_str += output_y_grad_str; VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc index 6b0c6816b3366..18fe32c7cc4e0 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc @@ -409,8 +409,7 @@ SyncBatchNormGradNode::operator()( VLOG(4) << "Finish AD API GRAD: sync_batch_norm_grad"; // LOG IF DEBUG - VLOG(6) << "gradnode_ptr = " << this; - if (VLOG_IS_ON(6)) { + if (VLOG_IS_ON(4)) { const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; @@ -458,8 +457,10 @@ SyncBatchNormGradNode::operator()( std::string output_bias_grad_str = paddle::string::Sprintf( TENSOR_BIAS_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(bias_grad)); output_str += output_bias_grad_str; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 186956402fa2f..224322cd58a01 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -176,10 +176,6 @@ class {} : public egr::GradNodeBase {{ GRAD_FUNCTION_TEMPLATE = """ paddle::small_vector, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{ VLOG(3) << \"Running AD API GRAD: \" << \"{}\"; - //std::cout << "The pointer to the current object: " << this << std::endl; - //std::stringstream ss; - //ss << this; - //std::string this_pointer = ss.str(); // Fill Zero For GradIn Tensors {} @@ -266,9 +262,10 @@ class {} : public egr::GradNodeBase {{ """ AFTER_LOG_PRINT_TEMPLATE = """ - if(VLOG_IS_ON(6)){{ + if(VLOG_IS_ON(4)){{ const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], \\n Output: [%s] }} \"; {} + VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); }} """ diff --git a/paddle/fluid/eager/grad_node_info.cc b/paddle/fluid/eager/grad_node_info.cc index d24c1d37794bc..badac75036458 100644 --- a/paddle/fluid/eager/grad_node_info.cc +++ b/paddle/fluid/eager/grad_node_info.cc @@ -568,8 +568,6 @@ std::vector> GradNodeBase::NextFunctions() { for (const GradSlotMeta& meta : meta_list) { const auto& edge = meta.GetEdge(); std::shared_ptr next_node = edge.GetMutableGradNode(); - //打印一下 next_node 对象的指针值 - std::cout << "next_node: " << next_node << std::endl; next_nodes.push_back(next_node); } } From c86c61ca0ab873f73bb7a60dd2d85a25be08ba68 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Fri, 15 Sep 2023 01:26:29 +0000 Subject: [PATCH 18/22] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=8E=89=E4=B8=8D?= =?UTF-8?q?=E5=BF=85=E8=A6=81=E7=9A=84=E6=89=93=E5=8D=B0=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/eager/accumulation/accumulation_node.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index 90e579741ea8e..18ddb51e62697 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -133,10 +133,6 @@ GradNodeAccumulation::operator()( ApplyReduceHooks(); } - std::stringstream ss; - ss << this; - std::string this_pointer = ss.str(); - VLOG(3) << "Finish AD API Grad: GradNodeAccumulation"; if (VLOG_IS_ON(4)) { const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } "; From 0fd9e8f8712ddccff951d78bfb7c90a32beeb476 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Fri, 15 Sep 2023 01:30:16 +0000 Subject: [PATCH 19/22] =?UTF-8?q?=E4=B8=8D=E4=BF=AE=E6=94=B9=E4=B9=8B?= =?UTF-8?q?=E5=89=8D=E7=9A=84=E6=97=A5=E5=BF=97=E7=AD=89=E7=BA=A72?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index 7c59029291247..b0d707d25ebf7 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -566,7 +566,7 @@ MultiplyGradNode::operator()( "op. If you don't intend calculating higher order" "derivatives, please set `create_graph`to False.")); } - VLOG(6) << "Finish AD API GRAD: multiply_grad"; + VLOG(4) << "Finish AD API GRAD: multiply_grad"; // LOG IF DEBUG if (VLOG_IS_ON(4)) { From 9c015dd5dc37bb974e03b43523f2caed9c15d313 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Fri, 15 Sep 2023 11:34:20 +0000 Subject: [PATCH 20/22] =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BB=A3=E7=A0=81?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../eager/accumulation/accumulation_node.cc | 7 +- .../manual/eager_manual/nodes/add_n_node.cc | 25 ++--- .../manual/eager_manual/nodes/conv2d_nodes.cc | 92 ++++++++++--------- .../eager_manual/nodes/multiply_node.cc | 5 +- .../nodes/sync_batch_norm_node.cc | 5 +- .../generator/eager_gen.py | 2 +- paddle/fluid/eager/grad_node_info.cc | 2 +- 7 files changed, 76 insertions(+), 62 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index 18ddb51e62697..9f85ab35ae3a1 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -142,7 +142,7 @@ GradNodeAccumulation::operator()( const char* TENSOR_OUT_GRAD_TEMPLATE = "(grads[0][0], [%s]), "; std::string input_out_grad_str = paddle::string::Sprintf( - TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0])); + TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0])); input_str += input_out_grad_str; const char* TENSOR_X_GRAD_TEMPLATE = "(grad_out, [%s]), "; std::string output_x_grad_str = paddle::string::Sprintf( @@ -150,8 +150,9 @@ GradNodeAccumulation::operator()( output_str += output_x_grad_str; VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " + << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } return {{grad_out}}; } diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc index 733082b8ad8fa..858951b7d5c33 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc @@ -28,8 +28,8 @@ PHI_DECLARE_bool(check_nan_inf); paddle::small_vector, egr::kSlotSmallVectorSize> AddNGradNodeFinal::operator()( - paddle::small_vector, - egr::kSlotSmallVectorSize>& grads, + paddle::small_vector, egr::kSlotSmallVectorSize> + &grads, bool create_graph, bool is_new_grad) { // Fill Zero For GradIn Tensors @@ -39,10 +39,10 @@ AddNGradNodeFinal::operator()( // Collect GradIn Tensors, Attrs and Recovered TensorWrappers auto x = egr::EagerUtils::RecoverTensorWrapper(&this->x_); - auto& out_grad = hooked_grads[0][0]; + auto &out_grad = hooked_grads[0][0]; // Prepare Grad function call - const auto& out_metas = OutputMeta(); + const auto &out_metas = OutputMeta(); paddle::small_vector, egr::kSlotSmallVectorSize> returns(1); for (int i = 0; i < 1; ++i) { @@ -50,7 +50,7 @@ AddNGradNodeFinal::operator()( : returns[i].resize(out_metas[i].size()); } - std::vector api_output_0; + std::vector api_output_0; api_output_0.reserve(returns[0].size()); for (size_t i = 0; i < returns[0].size(); ++i) { if (out_metas[0].empty() || out_metas[0][i].IsStopGradient()) { @@ -78,13 +78,13 @@ AddNGradNodeFinal::operator()( std::string output_str = ""; const char *TENSOR_INPUT_TEMPLATE = " \n( x , [%s]), "; - std::string input_x_str = - paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(x)); + std::string input_x_str = paddle::string::Sprintf( + TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(x)); input_str += input_x_str; const char *TENSOR_OUT_GRAD_TEMPLATE = " \n( out_grad , [%s]), "; - std::string input_out_grad_str = - paddle::string::Sprintf(TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(out_grad)); + std::string input_out_grad_str = paddle::string::Sprintf( + TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(out_grad)); input_str += input_out_grad_str; const char *TENSOR_OUTPUT_TEMPLATE = " \n ( returns , [%s]), "; @@ -92,8 +92,11 @@ AddNGradNodeFinal::operator()( TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0])); output_str += output_returns_str; - VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(4) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " + << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc index 113a6abf76a1f..ad28ec2bd54ee 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc @@ -162,41 +162,43 @@ Conv2dGradNodeFinal::operator()( // Set TensorWrappers for Forward Outputs if needed } - if (VLOG_IS_ON(4)) { - const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; + if (VLOG_IS_ON(4)) { + const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; std::string output_str = ""; - const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), "; - std::string input_input_str = - paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input)); + const char* TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), "; + std::string input_input_str = paddle::string::Sprintf( + TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input)); input_str += input_input_str; - const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), "; - std::string input_filter_str = - paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter)); + const char* TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), "; + std::string input_filter_str = paddle::string::Sprintf( + TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter)); input_str += input_filter_str; - const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), "; - std::string input_grad_out_str = - paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); + const char* TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), "; + std::string input_grad_out_str = paddle::string::Sprintf( + TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); input_str += input_grad_out_str; - const char *TENSOR_GRAD_INPUT_TEMPLATE = " \n ( grad_input , [%s]), "; + const char* TENSOR_GRAD_INPUT_TEMPLATE = " \n ( grad_input , [%s]), "; std::string output_grad_input_str = paddle::string::Sprintf( TENSOR_GRAD_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_input)); output_str += output_grad_input_str; - const char *TENSOR_GRAD_FILTER_TEMPLATE = " \n ( grad_filter , [%s]), "; + const char* TENSOR_GRAD_FILTER_TEMPLATE = " \n ( grad_filter , [%s]), "; std::string output_grad_filter_str = paddle::string::Sprintf( TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter)); output_str += output_grad_filter_str; - VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(4) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " + << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } - // Return if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); return returns; @@ -318,57 +320,63 @@ Conv2dDoubleGradNodeFinal::operator()( // Create Grad Node - if (VLOG_IS_ON(4)) { - const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; + const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } "; std::string input_str = ""; std::string output_str = ""; - const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), "; - std::string input_input_str = - paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input)); + const char* TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), "; + std::string input_input_str = paddle::string::Sprintf( + TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input)); input_str += input_input_str; - const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), "; - std::string input_filter_str = - paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter)); + const char* TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), "; + std::string input_filter_str = paddle::string::Sprintf( + TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter)); input_str += input_filter_str; - const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), "; - std::string input_grad_out_str = - paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); + const char* TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), "; + std::string input_grad_out_str = paddle::string::Sprintf( + TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); input_str += input_grad_out_str; - const char *TENSOR_GRAD_INPUT_GRAD_TEMPLATE = " \n( grad_input_grad , [%s]), "; + const char* TENSOR_GRAD_INPUT_GRAD_TEMPLATE = + " \n( grad_input_grad , [%s]), "; std::string input_grad_input_grad_str = - paddle::string::Sprintf(TENSOR_GRAD_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_input_grad)); + paddle::string::Sprintf(TENSOR_GRAD_INPUT_GRAD_TEMPLATE, + egr::EagerUtils::TensorStr(grad_input_grad)); input_str += input_grad_input_grad_str; - const char *TENSOR_GRAD_FILTER_GRAD_TEMPLATE = " \n( grad_filter_grad , [%s]), "; + const char* TENSOR_GRAD_FILTER_GRAD_TEMPLATE = + " \n( grad_filter_grad , [%s]), "; std::string input_grad_filter_grad_str = - paddle::string::Sprintf(TENSOR_GRAD_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter_grad)); + paddle::string::Sprintf(TENSOR_GRAD_FILTER_GRAD_TEMPLATE, + egr::EagerUtils::TensorStr(grad_filter_grad)); input_str += input_grad_filter_grad_str; - const char *TENSOR_INPUT_GRAD_TEMPLATE = " \n( input_grad , [%s]), "; - std::string output_input_grad_str = - paddle::string::Sprintf(TENSOR_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(input_grad)); + const char* TENSOR_INPUT_GRAD_TEMPLATE = " \n( input_grad , [%s]), "; + std::string output_input_grad_str = paddle::string::Sprintf( + TENSOR_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(input_grad)); output_str += output_input_grad_str; - const char *TENSOR_FILTER_GRAD_TEMPLATE = " \n( filter_grad , [%s]), "; - std::string output_filter_grad_str = - paddle::string::Sprintf(TENSOR_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(filter_grad)); + const char* TENSOR_FILTER_GRAD_TEMPLATE = " \n( filter_grad , [%s]), "; + std::string output_filter_grad_str = paddle::string::Sprintf( + TENSOR_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(filter_grad)); output_str += output_filter_grad_str; - const char *TENSOR_GRAD_OUT_GRAD_TEMPLATE = " \n( grad_out_grad , [%s]) "; + const char* TENSOR_GRAD_OUT_GRAD_TEMPLATE = " \n( grad_out_grad , [%s]) "; std::string output_grad_out_grad_str = - paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out_grad)); + paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE, + egr::EagerUtils::TensorStr(grad_out_grad)); output_str += output_grad_out_grad_str; - VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(4) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " + << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } - // Return if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); return returns; diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index b0d707d25ebf7..1f3d61148d47a 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -596,8 +596,9 @@ MultiplyGradNode::operator()( output_str += output_y_grad_str; VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " + << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc index 18fe32c7cc4e0..ebff95acce72d 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc @@ -459,8 +459,9 @@ SyncBatchNormGradNode::operator()( output_str += output_bias_grad_str; VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this << ", " + << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 224322cd58a01..64319e2d0b53d 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -207,7 +207,7 @@ class {} : public egr::GradNodeBase {{ VLOG(4) << \"Finish AD API GRAD: {}"; VLOG(6) << "gradnode_ptr = " << this; // LOG IF DEBUG - + {} // Return {} diff --git a/paddle/fluid/eager/grad_node_info.cc b/paddle/fluid/eager/grad_node_info.cc index badac75036458..0b244859a275b 100644 --- a/paddle/fluid/eager/grad_node_info.cc +++ b/paddle/fluid/eager/grad_node_info.cc @@ -576,7 +576,7 @@ std::vector> GradNodeBase::NextFunctions() { } uintptr_t GradNodeBase::GetThisPtr() const { - return reinterpret_cast(this); + return reinterpret_cast(this); } } // namespace egr From 7b26f4a144440c0cfb594150b704c92bba48c32f Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Tue, 19 Sep 2023 01:48:04 +0000 Subject: [PATCH 21/22] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=A0=BC=E5=BC=8F?= =?UTF-8?q?=E9=97=AE=E9=A2=982?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/eager/accumulation/accumulation_node.cc | 6 +++--- .../api/manual/eager_manual/nodes/add_n_node.cc | 8 ++++---- .../api/manual/eager_manual/nodes/conv2d_nodes.cc | 12 ++++++------ .../api/manual/eager_manual/nodes/multiply_node.cc | 6 +++--- .../eager_manual/nodes/sync_batch_norm_node.cc | 6 +++--- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index ebe817d4765f4..799b99e7fcd92 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -189,9 +189,9 @@ GradNodeAccumulation::operator()( output_str += output_x_grad_str; VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " - << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this; + VLOG(6) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } return {{grad_out}}; } diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc index 2f85cb6047cb6..6e53b38b9efa5 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc @@ -63,7 +63,7 @@ AddNGradNodeFinal::operator()( VLOG(3) << "Final State Running: AddNGradNodeFinal"; // dygraph function - for (auto& item : returns[0]) { + for (auto &item : returns[0]) { item = ::scale_ad_func(out_grad, phi::Scalar(1.0), 0.0, true); } @@ -94,9 +94,9 @@ AddNGradNodeFinal::operator()( VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " - << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this; + VLOG(6) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns); diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc index 9dfa23d48996f..e672170ce6574 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc @@ -194,9 +194,9 @@ Conv2dGradNodeFinal::operator()( VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " - << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this; + VLOG(6) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return @@ -372,9 +372,9 @@ Conv2dDoubleGradNodeFinal::operator()( VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " - << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this; + VLOG(6) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index d269d69669392..caea3d5fd5b4f 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -596,9 +596,9 @@ MultiplyGradNode::operator()( output_str += output_y_grad_str; VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " - << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this; + VLOG(6) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc index ebff95acce72d..80cc01b50fff2 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc @@ -459,9 +459,9 @@ SyncBatchNormGradNode::operator()( output_str += output_bias_grad_str; VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << "gradnode_ptr = " << this << ", " - << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); + VLOG(6) << "gradnode_ptr = " << this; + VLOG(6) << paddle::string::Sprintf( + INPUT_PRINT_TEMPLATE, input_str, output_str); } // Return From 988dece868fcb89b8c86f8d6aa966c70b742c368 Mon Sep 17 00:00:00 2001 From: qiuwenbogdut Date: Tue, 19 Sep 2023 11:57:51 +0000 Subject: [PATCH 22/22] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E4=B8=80=E4=B8=8B?= =?UTF-8?q?=E6=97=A5=E5=BF=97=E9=87=8D=E5=A4=8D=E8=BE=93=E5=87=BA=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/fluid/eager/accumulation/accumulation_node.cc | 4 +--- .../eager/api/manual/eager_manual/nodes/add_n_node.cc | 4 +--- .../eager/api/manual/eager_manual/nodes/conv2d_nodes.cc | 8 ++------ .../eager/api/manual/eager_manual/nodes/multiply_node.cc | 4 +--- .../api/manual/eager_manual/nodes/sync_batch_norm_node.cc | 4 +--- .../eager/auto_code_generator/generator/eager_gen.py | 1 - 6 files changed, 6 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index 799b99e7fcd92..c2c09444aab2f 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -187,10 +187,8 @@ GradNodeAccumulation::operator()( std::string output_x_grad_str = paddle::string::Sprintf( TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out)); output_str += output_x_grad_str; - VLOG(4) << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); VLOG(6) << "gradnode_ptr = " << this; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } return {{grad_out}}; diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc index 6e53b38b9efa5..e51cfc18087ca 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc @@ -92,10 +92,8 @@ AddNGradNodeFinal::operator()( TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0])); output_str += output_returns_str; - VLOG(4) << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); VLOG(6) << "gradnode_ptr = " << this; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc index e672170ce6574..782afd230ea21 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc @@ -192,10 +192,8 @@ Conv2dGradNodeFinal::operator()( TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter)); output_str += output_grad_filter_str; - VLOG(4) << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); VLOG(6) << "gradnode_ptr = " << this; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } @@ -370,10 +368,8 @@ Conv2dDoubleGradNodeFinal::operator()( egr::EagerUtils::TensorStr(grad_out_grad)); output_str += output_grad_out_grad_str; - VLOG(4) << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); VLOG(6) << "gradnode_ptr = " << this; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index caea3d5fd5b4f..ed83bb29714ff 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -594,10 +594,8 @@ MultiplyGradNode::operator()( std::string output_y_grad_str = paddle::string::Sprintf( TENSOR_Y_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(y_grad)); output_str += output_y_grad_str; - VLOG(4) << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); VLOG(6) << "gradnode_ptr = " << this; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc index 80cc01b50fff2..05ca3f1a5a0cb 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc @@ -457,10 +457,8 @@ SyncBatchNormGradNode::operator()( std::string output_bias_grad_str = paddle::string::Sprintf( TENSOR_BIAS_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(bias_grad)); output_str += output_bias_grad_str; - VLOG(4) << paddle::string::Sprintf( - INPUT_PRINT_TEMPLATE, input_str, output_str); VLOG(6) << "gradnode_ptr = " << this; - VLOG(6) << paddle::string::Sprintf( + VLOG(4) << paddle::string::Sprintf( INPUT_PRINT_TEMPLATE, input_str, output_str); } diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 3f5812079e6e0..393547b108e40 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -293,7 +293,6 @@ class {} : public egr::GradNodeBase {{ const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], \\n Output: [%s] }} \"; {} VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); - VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str); }} """