Skip to content

Commit

Permalink
增加打印反向图边的信息, 增加反向节点指针值获取接口
Browse files Browse the repository at this point in the history
  • Loading branch information
qiuwenbogdut committed Sep 10, 2023
2 parents b70357d + ca6a064 commit fb84c53
Show file tree
Hide file tree
Showing 10 changed files with 150 additions and 12 deletions.
14 changes: 11 additions & 3 deletions paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,20 +171,28 @@ GradNodeAccumulation::operator()(
if (ReduceHooksRegistered()) {
ApplyReduceHooks();
}

std::stringstream ss;
ss << this;
std::string this_pointer = ss.str();

VLOG(3) << "Finish AD API Grad: GradNodeAccumulation";
if (VLOG_IS_ON(4)) {
VLOG(6) << "gradnode_ptr = " << this;
if (VLOG_IS_ON(6)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } ";

std::string input_str = "";
std::string output_str = "";

const char* TENSOR_OUT_GRAD_TEMPLATE = "(grads[0][0], [%s]), ";
std::string input_out_grad_str = paddle::string::Sprintf(
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
input_str += input_out_grad_str;
const char* TENSOR_X_GRAD_TEMPLATE = "(grad_out, [%s]), ";
std::string output_x_grad_str = paddle::string::Sprintf(
TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
output_str += output_x_grad_str;
VLOG(4) << paddle::string::Sprintf(
VLOG(6) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
return {{grad_out}};
Expand Down
24 changes: 24 additions & 0 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,30 @@ AddNGradNodeFinal::operator()(
egr::CheckTensorHasNanOrInf("add_n_grad", returns);
}

VLOG(6) << "gradnode_ptr = " << this;
if (VLOG_IS_ON(6)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( x , [%s]), ";
std::string input_x_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(x));
input_str += input_x_str;

const char *TENSOR_OUT_GRAD_TEMPLATE = " \n( out_grad , [%s]), ";
std::string input_out_grad_str =
paddle::string::Sprintf(TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(out_grad));
input_str += input_out_grad_str;

const char *TENSOR_OUTPUT_TEMPLATE = " \n ( returns , [%s]), ";
std::string output_returns_str = paddle::string::Sprintf(
TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0]));
output_str += output_returns_str;

VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}

if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
}
87 changes: 87 additions & 0 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ Conv2dGradNodeFinal::operator()(

// Inplace Strategy


// Call grad_api function
VLOG(3) << "Final State Running: Conv2dGradNodeFinal";

Expand Down Expand Up @@ -162,6 +163,41 @@ Conv2dGradNodeFinal::operator()(
// Set TensorWrappers for Forward Outputs if needed
}

VLOG(6) << "gradnode_ptr = " << this;
if (VLOG_IS_ON(6)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str =
paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char *TENSOR_GRAD_INPUT_TEMPLATE = " \n ( grad_input , [%s]), ";
std::string output_grad_input_str = paddle::string::Sprintf(
TENSOR_GRAD_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_input));
output_str += output_grad_input_str;

const char *TENSOR_GRAD_FILTER_TEMPLATE = " \n ( grad_filter , [%s]), ";
std::string output_grad_filter_str = paddle::string::Sprintf(
TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter));
output_str += output_grad_filter_str;

VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}


// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down Expand Up @@ -283,6 +319,57 @@ Conv2dDoubleGradNodeFinal::operator()(

// Create Grad Node


VLOG(6) << "gradnode_ptr = " << this;
if (VLOG_IS_ON(6)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str =
paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char *TENSOR_GRAD_INPUT_GRAD_TEMPLATE = " \n( grad_input_grad , [%s]), ";
std::string input_grad_input_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_input_grad));
input_str += input_grad_input_grad_str;

const char *TENSOR_GRAD_FILTER_GRAD_TEMPLATE = " \n( grad_filter_grad , [%s]), ";
std::string input_grad_filter_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter_grad));
input_str += input_grad_filter_grad_str;

const char *TENSOR_INPUT_GRAD_TEMPLATE = " \n( input_grad , [%s]), ";
std::string output_input_grad_str =
paddle::string::Sprintf(TENSOR_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(input_grad));
output_str += output_input_grad_str;

const char *TENSOR_FILTER_GRAD_TEMPLATE = " \n( filter_grad , [%s]), ";
std::string output_filter_grad_str =
paddle::string::Sprintf(TENSOR_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(filter_grad));
output_str += output_filter_grad_str;

const char *TENSOR_GRAD_OUT_GRAD_TEMPLATE = " \n( grad_out_grad , [%s]) ";
std::string output_grad_out_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out_grad));
output_str += output_grad_out_grad_str;

VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}


// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -566,10 +566,11 @@ MultiplyGradNode::operator()(
"op. If you don't intend calculating higher order"
"derivatives, please set `create_graph`to False."));
}
VLOG(4) << "Finish AD API GRAD: multiply_grad";
VLOG(6) << "Finish AD API GRAD: multiply_grad";
// LOG IF DEBUG

if (VLOG_IS_ON(4)) {
VLOG(6) << "gradnode_ptr = " << this;
if (VLOG_IS_ON(6)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";

std::string input_str = "";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,8 @@ SyncBatchNormGradNode::operator()(
VLOG(4) << "Finish AD API GRAD: sync_batch_norm_grad";
// LOG IF DEBUG

if (VLOG_IS_ON(4)) {
VLOG(6) << "gradnode_ptr = " << this;
if (VLOG_IS_ON(6)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";

std::string input_str = "";
Expand Down Expand Up @@ -457,7 +458,7 @@ SyncBatchNormGradNode::operator()(
std::string output_bias_grad_str = paddle::string::Sprintf(
TENSOR_BIAS_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(bias_grad));
output_str += output_bias_grad_str;
VLOG(4) << paddle::string::Sprintf(
VLOG(6) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

Expand Down
11 changes: 9 additions & 2 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,11 @@ class {} : public egr::GradNodeBase {{
GRAD_FUNCTION_TEMPLATE = """
paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{
VLOG(3) << \"Running AD API GRAD: \" << \"{}\";
//std::cout << "The pointer to the current object: " << this << std::endl;
//std::stringstream ss;
//ss << this;
//std::string this_pointer = ss.str();
// Fill Zero For GradIn Tensors
{}
// Apply Gradient Hooks
Expand Down Expand Up @@ -204,7 +209,9 @@ class {} : public egr::GradNodeBase {{
// Create Grad Node
{}
VLOG(4) << \"Finish AD API GRAD: {}";
VLOG(6) << "gradnode_ptr = " << this;
// LOG IF DEBUG
{}
// Return
{}
Expand Down Expand Up @@ -259,10 +266,10 @@ class {} : public egr::GradNodeBase {{
"""

AFTER_LOG_PRINT_TEMPLATE = """
if(VLOG_IS_ON(4)){{
if(VLOG_IS_ON(6)){{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], \\n Output: [%s] }} \";
{}
VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}}
"""

Expand Down
6 changes: 6 additions & 0 deletions paddle/fluid/eager/grad_node_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -594,11 +594,17 @@ std::vector<std::shared_ptr<GradNodeBase>> GradNodeBase::NextFunctions() {
for (const GradSlotMeta& meta : meta_list) {
const auto& edge = meta.GetEdge();
std::shared_ptr<GradNodeBase> next_node = edge.GetMutableGradNode();
//打印一下 next_node 对象的指针值
std::cout << "next_node: " << next_node << std::endl;
next_nodes.push_back(next_node);
}
}

return next_nodes;
}

uintptr_t GradNodeBase::GetThisPtr() const {
return reinterpret_cast<uintptr_t>(this);
}

} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/grad_node_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,8 @@ class GradNodeBase {

std::vector<std::shared_ptr<egr::GradNodeBase>> NextFunctions();

uintptr_t GetThisPtr() const;

/**
* Apply GradientHook
* **/
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ class EagerUtils {
}
if (VLOG_IS_ON(11)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d, "
"TensorInfo: [ %s ], Value:[ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand Down Expand Up @@ -306,7 +306,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(6)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d,"
"TensorInfo: [ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand All @@ -333,7 +333,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(5)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d , Ptr: %d "
"{Name: %s, Initialized: %d , Ptr: %d, "
"TensorInfo: [ %s ]}";
return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE,
tensor_name_str,
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -854,6 +854,8 @@ PYBIND11_MODULE(libpaddle, m) {
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->NextFunctions();
})

.def("node_this_ptr", &egr::GradNodeBase::GetThisPtr)
.def("input_meta",
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->InputMeta();
Expand Down

0 comments on commit fb84c53

Please sign in to comment.