Skip to content

Commit

Permalink
赛题八:paddleviz能力扩展——Tensor信息展示 (PaddlePaddle#56837)
Browse files Browse the repository at this point in the history
for paddleviz
  • Loading branch information
qiuwenbogdut authored and iosmers committed Sep 21, 2023
1 parent 08947ac commit 29c89b5
Show file tree
Hide file tree
Showing 11 changed files with 141 additions and 10 deletions.
4 changes: 4 additions & 0 deletions paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,19 +171,23 @@ GradNodeAccumulation::operator()(
if (ReduceHooksRegistered()) {
ApplyReduceHooks();
}

VLOG(3) << "Finish AD API Grad: GradNodeAccumulation";
if (VLOG_IS_ON(4)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } ";

std::string input_str = "";
std::string output_str = "";

const char* TENSOR_OUT_GRAD_TEMPLATE = "(grads[0][0], [%s]), ";
std::string input_out_grad_str = paddle::string::Sprintf(
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
input_str += input_out_grad_str;
const char* TENSOR_X_GRAD_TEMPLATE = "(grad_out, [%s]), ";
std::string output_x_grad_str = paddle::string::Sprintf(
TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
output_str += output_x_grad_str;
VLOG(6) << "gradnode_ptr = " << this;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
Expand Down
37 changes: 31 additions & 6 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ PHI_DECLARE_bool(check_nan_inf);

paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>
AddNGradNodeFinal::operator()(
paddle::small_vector<std::vector<paddle::Tensor>,
egr::kSlotSmallVectorSize>& grads,
paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>
&grads,
bool create_graph,
bool is_new_grad) {
// Fill Zero For GradIn Tensors
Expand All @@ -39,18 +39,18 @@ AddNGradNodeFinal::operator()(

// Collect GradIn Tensors, Attrs and Recovered TensorWrappers
auto x = egr::EagerUtils::RecoverTensorWrapper(&this->x_);
auto& out_grad = hooked_grads[0][0];
auto &out_grad = hooked_grads[0][0];
// Prepare Grad function call

const auto& out_metas = OutputMeta();
const auto &out_metas = OutputMeta();
paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>
returns(1);
for (int i = 0; i < 1; ++i) {
out_metas[i].empty() ? returns[i].resize(1)
: returns[i].resize(out_metas[i].size());
}

std::vector<paddle::Tensor*> api_output_0;
std::vector<paddle::Tensor *> api_output_0;
api_output_0.reserve(returns[0].size());
for (size_t i = 0; i < returns[0].size(); ++i) {
if (out_metas[0].empty() || out_metas[0][i].IsStopGradient()) {
Expand All @@ -63,7 +63,7 @@ AddNGradNodeFinal::operator()(
VLOG(3) << "Final State Running: AddNGradNodeFinal";

// dygraph function
for (auto& item : returns[0]) {
for (auto &item : returns[0]) {
item = ::scale_ad_func(out_grad, phi::Scalar(1.0), 0.0, true);
}

Expand All @@ -72,6 +72,31 @@ AddNGradNodeFinal::operator()(
egr::CheckTensorHasNanOrInf("add_n_grad", returns);
}

if (VLOG_IS_ON(4)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( x , [%s]), ";
std::string input_x_str = paddle::string::Sprintf(
TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(x));
input_str += input_x_str;

const char *TENSOR_OUT_GRAD_TEMPLATE = " \n( out_grad , [%s]), ";
std::string input_out_grad_str = paddle::string::Sprintf(
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(out_grad));
input_str += input_out_grad_str;

const char *TENSOR_OUTPUT_TEMPLATE = " \n ( returns , [%s]), ";
std::string output_returns_str = paddle::string::Sprintf(
TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0]));
output_str += output_returns_str;

VLOG(6) << "gradnode_ptr = " << this;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
}
90 changes: 90 additions & 0 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,41 @@ Conv2dGradNodeFinal::operator()(
// Set TensorWrappers for Forward Outputs if needed
}

if (VLOG_IS_ON(4)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char* TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str = paddle::string::Sprintf(
TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char* TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str = paddle::string::Sprintf(
TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char* TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str = paddle::string::Sprintf(
TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char* TENSOR_GRAD_INPUT_TEMPLATE = " \n ( grad_input , [%s]), ";
std::string output_grad_input_str = paddle::string::Sprintf(
TENSOR_GRAD_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_input));
output_str += output_grad_input_str;

const char* TENSOR_GRAD_FILTER_TEMPLATE = " \n ( grad_filter , [%s]), ";
std::string output_grad_filter_str = paddle::string::Sprintf(
TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter));
output_str += output_grad_filter_str;

VLOG(6) << "gradnode_ptr = " << this;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down Expand Up @@ -283,6 +318,61 @@ Conv2dDoubleGradNodeFinal::operator()(

// Create Grad Node

if (VLOG_IS_ON(4)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char* TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str = paddle::string::Sprintf(
TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char* TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str = paddle::string::Sprintf(
TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char* TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str = paddle::string::Sprintf(
TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char* TENSOR_GRAD_INPUT_GRAD_TEMPLATE =
" \n( grad_input_grad , [%s]), ";
std::string input_grad_input_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_INPUT_GRAD_TEMPLATE,
egr::EagerUtils::TensorStr(grad_input_grad));
input_str += input_grad_input_grad_str;

const char* TENSOR_GRAD_FILTER_GRAD_TEMPLATE =
" \n( grad_filter_grad , [%s]), ";
std::string input_grad_filter_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_FILTER_GRAD_TEMPLATE,
egr::EagerUtils::TensorStr(grad_filter_grad));
input_str += input_grad_filter_grad_str;

const char* TENSOR_INPUT_GRAD_TEMPLATE = " \n( input_grad , [%s]), ";
std::string output_input_grad_str = paddle::string::Sprintf(
TENSOR_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(input_grad));
output_str += output_input_grad_str;

const char* TENSOR_FILTER_GRAD_TEMPLATE = " \n( filter_grad , [%s]), ";
std::string output_filter_grad_str = paddle::string::Sprintf(
TENSOR_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(filter_grad));
output_str += output_filter_grad_str;

const char* TENSOR_GRAD_OUT_GRAD_TEMPLATE = " \n( grad_out_grad , [%s]) ";
std::string output_grad_out_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE,
egr::EagerUtils::TensorStr(grad_out_grad));
output_str += output_grad_out_grad_str;

VLOG(6) << "gradnode_ptr = " << this;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -594,6 +594,7 @@ MultiplyGradNode::operator()(
std::string output_y_grad_str = paddle::string::Sprintf(
TENSOR_Y_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(y_grad));
output_str += output_y_grad_str;
VLOG(6) << "gradnode_ptr = " << this;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,7 @@ SyncBatchNormGradNode::operator()(
std::string output_bias_grad_str = paddle::string::Sprintf(
TENSOR_BIAS_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(bias_grad));
output_str += output_bias_grad_str;
VLOG(6) << "gradnode_ptr = " << this;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@ class {} : public egr::GradNodeBase {{
GRAD_FUNCTION_TEMPLATE = """
paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{
VLOG(3) << \"Running AD API GRAD: \" << \"{}\";
// Fill Zero For GradIn Tensors
{}
// Apply Gradient Hooks
Expand Down Expand Up @@ -230,7 +231,9 @@ class {} : public egr::GradNodeBase {{
// Create Grad Node
{}
VLOG(4) << \"Finish AD API GRAD: {}";
VLOG(6) << "gradnode_ptr = " << this;
// LOG IF DEBUG
{}
// Return
{}
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/eager/grad_node_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -604,4 +604,8 @@ std::vector<std::shared_ptr<GradNodeBase>> GradNodeBase::NextFunctions() {
return next_nodes;
}

uintptr_t GradNodeBase::GetThisPtr() const {
return reinterpret_cast<uintptr_t>(this);
}

} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/grad_node_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,8 @@ class GradNodeBase {

std::vector<std::shared_ptr<egr::GradNodeBase>> NextFunctions();

uintptr_t GetThisPtr() const;

/**
* Apply GradientHook
* **/
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ class EagerUtils {
}
if (VLOG_IS_ON(11)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d, "
"TensorInfo: [ %s ], Value:[ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand Down Expand Up @@ -392,7 +392,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(6)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d,"
"TensorInfo: [ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand All @@ -419,7 +419,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(5)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d , Ptr: %d "
"{Name: %s, Initialized: %d , Ptr: %d, "
"TensorInfo: [ %s ]}";
return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE,
tensor_name_str,
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/pybind/eager_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ PyObject* ToPyObject(
PyObject* ToPyObject(const paddle::framework::Vocab& value);

PyObject* ToPyObject(std::shared_ptr<egr::GradNodeBase> grad_node);

PyObject* ToPyObject(const pir::OpResult& value);
PyObject* ToPyObject(const std::vector<pir::OpResult>& value);

Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -868,6 +868,8 @@ PYBIND11_MODULE(libpaddle, m) {
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->NextFunctions();
})

.def("node_this_ptr", &egr::GradNodeBase::GetThisPtr)
.def("input_meta",
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->InputMeta();
Expand Down

0 comments on commit 29c89b5

Please sign in to comment.