Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

赛题八:paddleviz能力扩展——Tensor信息展示 #56837

Merged
merged 28 commits into from
Sep 20, 2023
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
260a4f7
[尝试] 给tensor增加一个属性, 这个属性是一个定值 1
qiuwenbogdut Jun 6, 2023
922f79f
暴露gradnode 并构建gradnode新的方法(用来测试)进行暴露给python python端可以访问
qiuwenbogdut Jun 20, 2023
4887161
开发grad_fn、next_functions两个API 并暴露到python端- 做一些规范化处理
qiuwenbogdut Jul 7, 2023
f54aff8
增加一个单元测试
qiuwenbogdut Jul 7, 2023
09e86f5
Merge branch 'develop' into dev_qiu
qiuwenbogdut Jul 7, 2023
fa9384c
优化 code-style
qiuwenbogdut Jul 7, 2023
bef4cc6
Merge branch 'develop' into dev_qiu
qiuwenbogdut Jul 11, 2023
1c3ad39
将单侧文件迁到正确的位置
qiuwenbogdut Jul 11, 2023
d7925f6
优化 code-style
qiuwenbogdut Jul 11, 2023
38a8645
删除无用注释
qiuwenbogdut Jul 12, 2023
1059ae5
解决 __main__ has no attribute
qiuwenbogdut Jul 13, 2023
bcb5730
修改单侧文件
qiuwenbogdut Jul 19, 2023
05e8edf
修改单侧脚本-temp
qiuwenbogdut Jul 20, 2023
ba77d9a
解决 grad_fn next_functions api 接口导致内存异常的问题
qiuwenbogdut Jul 23, 2023
b5ebfaa
修改单测内容
qiuwenbogdut Jul 24, 2023
72d32fa
Merge branch 'develop' into dev_qiu
qiuwenbogdut Jul 24, 2023
fd0c655
解决 code-style 问题
qiuwenbogdut Jul 24, 2023
a442c1c
初版本-增加节点指针值获取接口, 增加打印构建边需要的信息
qiuwenbogdut Aug 31, 2023
ca6a064
增加eager_manual 中反向节点的打印信息
qiuwenbogdut Sep 9, 2023
657412a
不修改原有的日志等级
qiuwenbogdut Sep 14, 2023
c86c61c
删除掉不必要的打印信息
qiuwenbogdut Sep 15, 2023
0fd9e8f
不修改之前的日志等级2
qiuwenbogdut Sep 15, 2023
e6edf3d
增加反向节点地址信息获取, 增加打印边的信息, 不修改原有日志等级
qiuwenbogdut Sep 15, 2023
3160ca4
Merge branch 'develop' into dev_qiu
qiuwenbogdut Sep 15, 2023
9c015dd
优化代码格式
qiuwenbogdut Sep 15, 2023
daa5381
优化代码格式
qiuwenbogdut Sep 15, 2023
7b26f4a
修改格式问题2
qiuwenbogdut Sep 19, 2023
988dece
解决一下日志重复输出的问题
qiuwenbogdut Sep 19, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -132,21 +132,26 @@ GradNodeAccumulation::operator()(
if (ReduceHooksRegistered()) {
ApplyReduceHooks();
}

VLOG(3) << "Finish AD API Grad: GradNodeAccumulation";
if (VLOG_IS_ON(4)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } ";

std::string input_str = "";
std::string output_str = "";

const char* TENSOR_OUT_GRAD_TEMPLATE = "(grads[0][0], [%s]), ";
std::string input_out_grad_str = paddle::string::Sprintf(
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
input_str += input_out_grad_str;
const char* TENSOR_X_GRAD_TEMPLATE = "(grad_out, [%s]), ";
std::string output_x_grad_str = paddle::string::Sprintf(
TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
output_str += output_x_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
return {{grad_out}};
}
Expand Down
24 changes: 24 additions & 0 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,30 @@ AddNGradNodeFinal::operator()(
egr::CheckTensorHasNanOrInf("add_n_grad", returns);
}

if (VLOG_IS_ON(4)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( x , [%s]), ";
std::string input_x_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(x));
input_str += input_x_str;

const char *TENSOR_OUT_GRAD_TEMPLATE = " \n( out_grad , [%s]), ";
std::string input_out_grad_str =
paddle::string::Sprintf(TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(out_grad));
input_str += input_out_grad_str;

const char *TENSOR_OUTPUT_TEMPLATE = " \n ( returns , [%s]), ";
std::string output_returns_str = paddle::string::Sprintf(
TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0]));
output_str += output_returns_str;

VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}

if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
}
86 changes: 86 additions & 0 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,41 @@ Conv2dGradNodeFinal::operator()(
// Set TensorWrappers for Forward Outputs if needed
}

if (VLOG_IS_ON(4)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str =
paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char *TENSOR_GRAD_INPUT_TEMPLATE = " \n ( grad_input , [%s]), ";
std::string output_grad_input_str = paddle::string::Sprintf(
TENSOR_GRAD_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_input));
output_str += output_grad_input_str;

const char *TENSOR_GRAD_FILTER_TEMPLATE = " \n ( grad_filter , [%s]), ";
std::string output_grad_filter_str = paddle::string::Sprintf(
TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter));
output_str += output_grad_filter_str;

VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}


// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down Expand Up @@ -283,6 +318,57 @@ Conv2dDoubleGradNodeFinal::operator()(

// Create Grad Node


if (VLOG_IS_ON(4)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str =
paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char *TENSOR_GRAD_INPUT_GRAD_TEMPLATE = " \n( grad_input_grad , [%s]), ";
std::string input_grad_input_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_input_grad));
input_str += input_grad_input_grad_str;

const char *TENSOR_GRAD_FILTER_GRAD_TEMPLATE = " \n( grad_filter_grad , [%s]), ";
std::string input_grad_filter_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter_grad));
input_str += input_grad_filter_grad_str;

const char *TENSOR_INPUT_GRAD_TEMPLATE = " \n( input_grad , [%s]), ";
std::string output_input_grad_str =
paddle::string::Sprintf(TENSOR_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(input_grad));
output_str += output_input_grad_str;

const char *TENSOR_FILTER_GRAD_TEMPLATE = " \n( filter_grad , [%s]), ";
std::string output_filter_grad_str =
paddle::string::Sprintf(TENSOR_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(filter_grad));
output_str += output_filter_grad_str;

const char *TENSOR_GRAD_OUT_GRAD_TEMPLATE = " \n( grad_out_grad , [%s]) ";
std::string output_grad_out_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out_grad));
output_str += output_grad_out_grad_str;

VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}


// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,8 @@ MultiplyGradNode::operator()(
output_str += output_y_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

// Return
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,8 @@ SyncBatchNormGradNode::operator()(
output_str += output_bias_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

// Return
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ class {} : public egr::GradNodeBase {{
GRAD_FUNCTION_TEMPLATE = """
paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{
VLOG(3) << \"Running AD API GRAD: \" << \"{}\";

// Fill Zero For GradIn Tensors
{}
// Apply Gradient Hooks
Expand Down Expand Up @@ -204,7 +205,9 @@ class {} : public egr::GradNodeBase {{
// Create Grad Node
{}
VLOG(4) << \"Finish AD API GRAD: {}";
VLOG(6) << "gradnode_ptr = " << this;
// LOG IF DEBUG

{}
// Return
{}
Expand Down Expand Up @@ -263,6 +266,7 @@ class {} : public egr::GradNodeBase {{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], \\n Output: [%s] }} \";
{}
VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个VLog的级别是不能改的。必须是4

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这两行看上去一样,新加一个VLOG(6)的作用是什么呢?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

原始的日志等级是4 等级不够高 有些需要的信息没有打印出来,所以就增加了一个等级为6的日志打印
image

Copy link
Contributor

@zyfncg zyfncg Sep 19, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

image 这里的信息打印重复了吧?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

是的, 确实存在重复打印的问题, 这边修改一下

}}
"""

Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/eager/grad_node_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -575,4 +575,8 @@ std::vector<std::shared_ptr<GradNodeBase>> GradNodeBase::NextFunctions() {
return next_nodes;
}

uintptr_t GradNodeBase::GetThisPtr() const {
return reinterpret_cast<uintptr_t>(this);
}

} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/grad_node_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,8 @@ class GradNodeBase {

std::vector<std::shared_ptr<egr::GradNodeBase>> NextFunctions();

uintptr_t GetThisPtr() const;

/**
* Apply GradientHook
* **/
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ class EagerUtils {
}
if (VLOG_IS_ON(11)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d, "
"TensorInfo: [ %s ], Value:[ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand Down Expand Up @@ -306,7 +306,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(6)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d,"
"TensorInfo: [ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand All @@ -333,7 +333,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(5)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d , Ptr: %d "
"{Name: %s, Initialized: %d , Ptr: %d, "
"TensorInfo: [ %s ]}";
return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE,
tensor_name_str,
Expand Down
9 changes: 4 additions & 5 deletions paddle/fluid/pybind/eager_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -318,17 +318,16 @@ PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) {

if (meta) {
// Get the GradNode from meta
auto grad_node = meta->GradNode(); // Convert GradNode to a Python object
// The conversion will depend on the structure of GradNode.

if (!grad_node) {
auto grad_node_ptr = meta->GetMutableGradNode();
if (!grad_node_ptr) {
Py_INCREF(Py_None);
return Py_None;
}

PyObject* py_grad_node = ToPyObject(grad_node);
PyObject* py_grad_node = ToPyObject(grad_node_ptr);

return py_grad_node;

} else {
// If meta does not exist, return an appropriate Python object (e.g., None
// or a special value).
Expand Down
5 changes: 2 additions & 3 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1006,10 +1006,9 @@ paddle::optional<paddle::Tensor> GetOptionalTensorFromArgs(
}
}

PyObject* ToPyObject(egr::GradNodeBase* grad_node) {
PyObject* ToPyObject(std::shared_ptr<egr::GradNodeBase> grad_node) {
py::object py_obj = py::cast(grad_node, py::return_value_policy::reference);
py::handle py_handle = py::handle(py_obj);
PyObject* py_grad_node = py_handle.ptr();
PyObject* py_grad_node = py_obj.release().ptr();
Py_INCREF(py_grad_node);
return py_grad_node;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/eager_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ PyObject* ToPyObject(
const std::unordered_map<std::string, std::vector<std::string>>& value);
PyObject* ToPyObject(const paddle::framework::Vocab& value);

PyObject* ToPyObject(egr::GradNodeBase* grad_node);
PyObject* ToPyObject(std::shared_ptr<egr::GradNodeBase> grad_node);

class PyTensorHook : public egr::TensorHook {
public:
Expand Down
26 changes: 20 additions & 6 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -778,12 +778,26 @@ PYBIND11_MODULE(libpaddle, m) {
}
});

py::class_<egr::GradNodeBase>(m, "GradNodeBase")
.def("name", &egr::GradNodeBase::name)
.def_property_readonly("next_functions",
&egr::GradNodeBase::NextFunctions)
.def("input_meta", &egr::GradNodeBase::InputMeta)
.def("output_meta", &egr::GradNodeBase::OutputMeta);
py::class_<egr::GradNodeBase, std::shared_ptr<egr::GradNodeBase>>(
m, "GradNodeBase")
.def("name",
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->name();
})
.def_property_readonly(
"next_functions",
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->NextFunctions();
})

.def("node_this_ptr", &egr::GradNodeBase::GetThisPtr)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

node_this_ptr这么命名感觉有些奇怪,可以再想想有没有更好的命名

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@wanghuancoder 欢哥 命名这块有没有什么建议?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

要不改成.def("node_ptr", &egr::GradNodeBase::GetPtr)吧
这个PR不要改了。新提一个PR修改。这个PR先合入。

.def("input_meta",
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->InputMeta();
})
.def("output_meta", [](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->OutputMeta();
});

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
m.def("cudnn_version", &platform::DnnVersion);
Expand Down
5 changes: 5 additions & 0 deletions test/legacy_test/test_grad_fn_and_next_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,11 @@ def check_func(self, grad_fn, grad_fn_json) -> None:
grad_fn_json (dict): grad_node_json of node
"""
self.assertEqual(grad_fn.name(), grad_fn_json["func_name"])
# Recursively test other nodes
if hasattr(grad_fn, 'next_functions') and grad_fn.next_functions[0]:
next_funcs_json = grad_fn_json["next_funcs"]
for u in grad_fn.next_functions:
self.check_func(u, next_funcs_json[u.name()])


if __name__ == "__main__":
Expand Down