Skip to content

Commit

Permalink
[GPU] RandomUniform new shape inference for dynamism support
Browse files Browse the repository at this point in the history
  • Loading branch information
sshlyapn committed Aug 9, 2023
1 parent 4ad072e commit 0b049eb
Show file tree
Hide file tree
Showing 7 changed files with 477 additions and 51 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@ struct random_uniform : public primitive_base<random_uniform> {
random_uniform() : primitive_base("", {}),
global_seed(0),
op_seed(0),
output_shape{},
output_format(format::type::any) {}
output_shape{} {}

DECLARE_OBJECT_TYPE_SERIALIZATION

Expand All @@ -36,20 +35,26 @@ struct random_uniform : public primitive_base<random_uniform> {
*/
random_uniform(const primitive_id &id, const std::vector<input_info> &inputs,
const data_types &data_type, const uint64_t global_seed,
const uint64_t op_seed, const tensor output_shape,
const format output_format,
const uint64_t op_seed, const ov::Shape output_shape,
const padding &output_padding = padding())
: primitive_base(id, inputs, {output_padding},
{optional_data_type{data_type}}),
global_seed(global_seed),
op_seed(op_seed),
output_shape(output_shape),
output_format(output_format) {}
output_shape(output_shape) {}

random_uniform(const primitive_id &id, const std::vector<input_info> &inputs,
const data_types &data_type, const uint64_t global_seed,
const uint64_t op_seed, const padding &output_padding = padding())
: primitive_base(id, inputs, {output_padding},
{optional_data_type{data_type}}),
global_seed(global_seed),
op_seed(op_seed),
output_shape() {}

const uint64_t global_seed;
const uint64_t op_seed;
const tensor output_shape;
const format output_format;
const ov::Shape output_shape;

size_t hash() const override {
size_t seed = primitive::hash();
Expand All @@ -73,17 +78,13 @@ struct random_uniform : public primitive_base<random_uniform> {
ob << global_seed;
ob << op_seed;
ob << output_shape;
ob << make_data(&output_format.value, sizeof(format::type));
}

void load(BinaryInputBuffer& ib) override {
primitive_base<random_uniform>::load(ib);
ib >> *const_cast<uint64_t*>(&global_seed);
ib >> *const_cast<uint64_t*>(&op_seed);
ib >> *const_cast<tensor*>(&output_shape);
format::type tmp_type = format::type::any;
ib >> make_data(&tmp_type, sizeof(format::type));
*const_cast<format*>(&output_format) = format(tmp_type);
ib >> *const_cast<ov::Shape*>(&output_shape);
}
};

Expand Down
13 changes: 13 additions & 0 deletions src/plugins/intel_gpu/src/graph/include/random_uniform_inst.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,17 @@

namespace cldnn {

template <>
struct typed_program_node<random_uniform> : public typed_program_node_base<random_uniform> {
using parent = typed_program_node_base<random_uniform>;

public:
using parent::parent;

program_node& input(size_t index = 0) const { return get_dependency(index); }
std::vector<size_t> get_shape_infer_dependencies() const override { return {0}; }
};

using random_uniform_node = typed_program_node<random_uniform>;

template<>
Expand All @@ -17,6 +28,8 @@ class typed_primitive_inst<random_uniform> : public typed_primitive_inst_base<ra
using parent::parent;

public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(random_uniform_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(random_uniform_node const &node, kernel_impl_params const& impl_param);

static std::string to_string(random_uniform_node const &node);
Expand Down
57 changes: 52 additions & 5 deletions src/plugins/intel_gpu/src/graph/random_uniform.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//

#include <random_uniform_inst.h>
#include "random_uniform_inst.h"
#include "primitive_type_base.h"
#include <sstream>
#include <json_object.h>
#include <data_inst.h>
#include "json_object.h"

#include "random_uniform_shape_inference.hpp"

namespace cldnn {
GPU_DEFINE_PRIMITIVE_TYPE_ID(random_uniform)
Expand All @@ -17,9 +17,56 @@ random_uniform_inst::typed_primitive_inst(network& network, random_uniform_node

layout random_uniform_inst::calc_output_layout(random_uniform_node const &node, kernel_impl_params const& impl_param) {
auto primitive = impl_param.typed_desc<random_uniform>();
return {*primitive->output_data_types[0], primitive->output_format, primitive->output_shape};
auto format = format::get_default_format(primitive->output_shape.size());

return {primitive->output_shape, *primitive->output_data_types[0], format};
}

template<typename ShapeType>
std::vector<layout> random_uniform_inst::calc_output_layouts(random_uniform_node const& /*node*/, kernel_impl_params const& impl_param) {
auto desc = impl_param.typed_desc<random_uniform>();
auto output_data_type = desc->output_data_types[0].value_or(impl_param.get_input_layout().data_type);

std::vector<ShapeType> output_shapes;
std::vector<ShapeType> input_shapes = { impl_param.get_input_layout(0).get_partial_shape(),
impl_param.get_input_layout(1).get_partial_shape(),
impl_param.get_input_layout(2).get_partial_shape() };

auto& memory_deps = impl_param.memory_deps;
std::map<size_t, ngraph::HostTensorPtr> const_data;

auto run_shape_infer = [&]() {
ov::op::v8::RandomUniform op;
if (memory_deps.count(1) > 0 && memory_deps.count(2) > 0) {
auto min_val = memory_deps.at(1);
cldnn::mem_lock<uint8_t, mem_lock_type::read> min_val_lock(min_val, impl_param.get_stream());
const_data.emplace(1, make_host_tensor(min_val->get_layout(), min_val_lock.data()));

auto max_val = memory_deps.at(2);
cldnn::mem_lock<uint8_t, mem_lock_type::read> max_val_lock(max_val, impl_param.get_stream());
const_data.emplace(2, make_host_tensor(max_val->get_layout(), max_val_lock.data()));

return ov::op::v8::shape_infer(&op, input_shapes, ov::make_tensor_accessor(const_data));
} else {
return ov::op::v8::shape_infer(&op, input_shapes, ov::make_tensor_accessor(const_data));
}
};

if (memory_deps.count(0) > 0) {
auto output_shape = memory_deps.at(0);
cldnn::mem_lock<uint8_t, mem_lock_type::read> output_shape_lock(output_shape, impl_param.get_stream());
const_data.emplace(0, make_host_tensor(output_shape->get_layout(), output_shape_lock.data()));

output_shapes = run_shape_infer();
} else {
output_shapes = run_shape_infer();
}

return { layout{output_shapes[0], output_data_type, format::get_default_format(output_shapes[0].size())} };
}

template std::vector<layout> random_uniform_inst::calc_output_layouts<ov::PartialShape>(random_uniform_node const& node, const kernel_impl_params& impl_param);

std::string random_uniform_inst::to_string(random_uniform_node const &node) {
auto node_info = node.desc_to_json();
json_composite random_uniform_info;
Expand Down
38 changes: 27 additions & 11 deletions src/plugins/intel_gpu/src/plugin/ops/random_uniform.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,33 @@ namespace {

void CreateRandomUniformOp(Program &p, const std::shared_ptr<ngraph::op::v8::RandomUniform> &op) {
auto inputs = p.GetInputInfo(op);
auto output_shape = op->get_output_shape(0);
cldnn::format outputFormat = cldnn::format::get_default_format(output_shape.size());

auto random_uniform_prim = cldnn::random_uniform(layer_type_name_ID(op),
inputs,
cldnn::element_type_to_data_type(op->get_out_type()),
op->get_global_seed(),
op->get_op_seed(),
tensor_from_dims(output_shape),
outputFormat);
p.add_primitive(*op, random_uniform_prim);
auto input_pshape = op->get_input_partial_shape(0);
auto output_pshape = op->get_output_partial_shape(0);

OPENVINO_ASSERT(input_pshape.is_static(), "[GPU] Dynamic input of RandomUniform leads to dynamic output rank, but GPU doesn't support it yet");

if (output_pshape.is_static() && !p.use_new_shape_infer()) {
auto output_shape = output_pshape.get_shape();
// Extend to 4D shape
output_shape.insert(output_shape.end(), 4 - output_shape.size(), 1ul);

auto random_uniform_prim = cldnn::random_uniform(layer_type_name_ID(op),
inputs,
cldnn::element_type_to_data_type(op->get_out_type()),
op->get_global_seed(),
op->get_op_seed(),
output_shape);
p.add_primitive(*op, random_uniform_prim);
} else {
OPENVINO_ASSERT(input_pshape.size() == 1, "[GPU] RandomUniform expects 1D input, got ", input_pshape.size());

auto random_uniform_prim = cldnn::random_uniform(layer_type_name_ID(op),
inputs,
cldnn::element_type_to_data_type(op->get_out_type()),
op->get_global_seed(),
op->get_op_seed());
p.add_primitive(*op, random_uniform_prim);
}
}

} // namespace
Expand Down
Loading

0 comments on commit 0b049eb

Please sign in to comment.