forked from ROCm/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
operator_gpu_test.cc
63 lines (54 loc) · 1.39 KB
/
operator_gpu_test.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#include <string>
#include <gtest/gtest.h>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
class JustTest : public OperatorBase {
public:
using OperatorBase::OperatorBase;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
virtual std::string type() {
return "BASE";
}
};
class JustTestCUDA : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
std::string type() override {
return "CUDA";
}
};
class JustTestCUDNN : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
std::string type() override {
return "CUDNN";
}
};
OPERATOR_SCHEMA(JustTest).NumInputs(0, 1).NumOutputs(0, 1);
REGISTER_CUDA_OPERATOR(JustTest, JustTestCUDA);
REGISTER_CUDNN_OPERATOR(JustTest, JustTestCUDNN);
TEST(EnginePrefTest, GPUDeviceDefaultPreferredEngines) {
if (!HasCudaGPU())
return;
OperatorDef op_def;
Workspace ws;
op_def.mutable_device_option()->set_device_type(PROTO_CUDA);
op_def.set_type("JustTest");
{
const auto op = CreateOperator(op_def, &ws);
EXPECT_NE(nullptr, op.get());
// CUDNN should be taken as it's in the default global preferred engines
// list
EXPECT_EQ(static_cast<JustTest*>(op.get())->type(), "CUDNN");
}
}
} // namespace caffe2