Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Use a CUDAGuard when running Torch models
Browse files Browse the repository at this point in the history
VivekPanyam committed Apr 20, 2020

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
1 parent 9c3be22 commit bd5263b
Showing 1 changed file with 9 additions and 0 deletions.
9 changes: 9 additions & 0 deletions source/neuropod/backends/torchscript/torch_backend.cc
Original file line number Diff line number Diff line change
@@ -7,6 +7,7 @@
#include "neuropod/backends/torchscript/type_utils.hh"
#include "neuropod/internal/tensor_types.hh"

#include <c10/cuda/CUDAGuard.h>
#include <caffe2/core/macros.h>

#include <iostream>
@@ -291,6 +292,14 @@ std::unique_ptr<NeuropodValueMap> TorchNeuropodBackend::infer_internal(const Neu
{
torch::NoGradGuard guard;

// Make sure we're running on the correct device
std::unique_ptr<at::cuda::CUDAGuard> device_guard;
const auto model_device = get_torch_device(DeviceType::GPU);
if (model_device.is_cuda())
{
device_guard = stdx::make_unique<at::cuda::CUDAGuard>(model_device);
}

// Get inference schema
const auto &method = model_->get_method("forward");
const auto &schema = SCHEMA(method);

0 comments on commit bd5263b

Please sign in to comment.