Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions projects/clr/rocclr/device/pal/paldevice.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -253,16 +253,15 @@ class Device : public NullDevice {
uint32_t index_; //!< HW queue index for scratch buffer access
amd::Monitor queue_lock_; //!< Queue lock for access
AqlPacketMgmt aql_packet_mgmt_; //!< AQL packets management class for debugger support
QueueRecycleInfo(const Device& dev)
QueueRecycleInfo()
: counter_(1),
engineType_(Pal::EngineTypeCompute),
index_(0),
queue_lock_(true) /* Queue lock for sharing */,
aql_packet_mgmt_(dev) {}
queue_lock_(true) /* Queue lock for sharing */ {}

//! Returns the MQD's read_dispatch_id's address.
uintptr_t DebuggerData() const {
return reinterpret_cast<uintptr_t>(&aql_packet_mgmt_.amd_queue_.read_dispatch_id);
//! Returns the aql packet list
uintptr_t AqlPacketList() const {
return reinterpret_cast<uintptr_t>(&aql_packet_mgmt_.aql_packets_);
}
};

Expand Down
14 changes: 8 additions & 6 deletions projects/clr/rocclr/device/pal/palkernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,10 +172,12 @@ const pal::Program& Kernel::prog() const {
return reinterpret_cast<const pal::Program&>(prog_);
}

std::pair<hsa_kernel_dispatch_packet_t* /* packet address */, uint64_t /* packet id */>
HSAILKernel::loadArguments(VirtualGPU& gpu, const amd::Kernel& kernel,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like this revert will help unblock the other build fix at #1615

https://github.com/ROCm/rocm-systems/actions/runs/19038204128/job/54368073245?pr=1615#step:14:7900

[hip-clr] [128/167] Building CXX object rocclr\CMakeFiles\rocclr.dir\device\pal\palkernel.cpp.obj
[hip-clr] FAILED: rocclr/CMakeFiles/rocclr.dir/device/pal/palkernel.cpp.obj 
[hip-clr] ccache "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.44.35207\bin\Hostx64\x64\cl.exe"  /nologo /TP -DATI_OS_WIN -DCL_TARGET_OPENCL_VERSION=220 -DCL_USE_DEPRECATED_OPENCL_1_0_APIS -DCL_USE_DEPRECATED_OPENCL_1_1_APIS -DCL_USE_DEPRECATED_OPENCL_1_2_APIS -DCL_USE_DEPRECATED_OPENCL_2_0_APIS -DCOMGR_DYN_DLL -DGPUOPEN_CLIENT_INTERFACE_MAJOR_VERSION=42 -DHAVE_CL2_HPP -DHIP_MAJOR_VERSION=7 -DHIP_MINOR_VERSION=1 -DLITTLEENDIAN_CPU -DOPENCL_C_MAJOR=2 -DOPENCL_C_MINOR=0 -DOPENCL_MAJOR=2 -DOPENCL_MINOR=1 -DPAL_BUILD_RDF=1 -DPAL_CLIENT_INTERFACE_MAJOR_VERSION=932 -DPAL_DEVELOPER_BUILD=0 -DPAL_GPUOPEN_OCL -DPAL_KMT_BUILD=1 -DROCCLR_VERSION_GITHASH=\"37af463\" -DWITH_PAL_DEVICE -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\.. -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\compiler\lib -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\compiler\lib\include -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\compiler\lib\backends\common -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\device -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\elf -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\include -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\opencl\khronos\headers\opencl2.2\CL -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\opencl\khronos\headers\opencl2.2\CL\.. -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\opencl\khronos\headers\opencl2.2\CL\..\.. -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\opencl\khronos\headers\opencl2.2\CL\..\..\.. -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\opencl\khronos\headers\opencl2.2\CL\..\..\..\.. -IC:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\opencl\khronos\headers\opencl2.2\CL\..\..\..\..\amdocl -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\inc -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\inc\core -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\inc\gpuUtil -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\inc\util -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\shared\inc -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\shared\devdriver\shared\legacy\inc -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\shared\devdriver\third_party\dd_crc32\inc -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\pal\shared\metrohash\src -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\sc\HSAIL\ext\loader -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\hsail-compiler\lib\loaders\elf\utils\libelf\..\..\..\..\..\lib\loaders\elf\utils\common -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\hsail-compiler\lib\loaders\elf\utils\libelf\..\..\..\..\..\lib\loaders\elf\utils\common\win32 -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\hsail-compiler\lib\loaders\elf\utils\libelf\..\..\..\..\..\lib\loaders\elf\utils\libelf -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\sc\HSAIL\ext\libamdhsacode -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\sc\HSAIL\ext\libamdhsacode\..\..\include -IC:\home\runner\_work\rocm-systems\rocm-systems\shared\amdgpu-windows-interop\sc\HSAIL\ext\libamdhsacode\..\..\hsail-tools\libHSAIL -external:IB:\build\compiler\amd-comgr\dist\include -external:W0 /DWIN32 /D_WINDOWS /EHsc /DWIN32 /D_WINDOWS  /EHsc /O2 /Ob2 /DNDEBUG -std:c++20 -MD /wd4267 /wd4244 /wd4996 /MT /showIncludes /Forocclr\CMakeFiles\rocclr.dir\device\pal\palkernel.cpp.obj /Fdrocclr\CMakeFiles\rocclr.dir\rocclr.pdb /FS -c C:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\device\pal\palkernel.cpp
[hip-clr] cl : Command line warning D9025 : overriding '/MD' with '/MT'
[hip-clr] C:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\device\pal\palkernel.cpp(176): error C2653: 'HSAILKernel': is not a class or namespace name
[hip-clr] C:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\device\pal\palkernel.cpp(176): error C2270: 'loadArguments': modifiers not allowed on nonmember functions
[hip-clr] C:\home\runner\_work\rocm-systems\rocm-systems\projects\clr\rocclr\device\pal\palkernel.cpp(184): error C3861: 'argsBufferSize': identifier not found

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agreed with the assessment that a revert is the right call here, history at https://github.com/ROCm/rocm-systems/actions/workflows/therock-ci.yml?query=branch%3Adevelop shows builds started breaking when that commit was introduced.
{8E835E6B-B452-4644-A44D-412A74B9B498}

const amd::NDRangeContainer& sizes, const_address params,
size_t ldsAddress, uint64_t vmDefQueue, uint64_t* vmParentWrap) const {
hsa_kernel_dispatch_packet_t* Kernel::loadArguments(VirtualGPU& gpu, const amd::Kernel& kernel,
const amd::NDRangeContainer& sizes,
const_address params, size_t ldsAddress,
uint64_t vmDefQueue,
uint64_t* vmParentWrap,
uint32_t* aql_index) const {
// Provide private and local heap addresses
static constexpr uint AddressShift = LP64_SWITCH(0, 32);
const_address parameters = params;
Expand Down Expand Up @@ -362,7 +364,7 @@ HSAILKernel::loadArguments(VirtualGPU& gpu, const amd::Kernel& kernel,
std::min(static_cast<uint32_t>(argsBufferSize()), signature.paramsSize()));
}

auto&& [hsaDisp, aql_packet_id] = gpu.GetAqlPacketSlot();
hsa_kernel_dispatch_packet_t* hsaDisp = gpu.GetAqlPacketSlot(aql_index);

constexpr uint16_t kDispatchPacketHeader =
(HSA_PACKET_TYPE_KERNEL_DISPATCH << HSA_PACKET_HEADER_TYPE) |
Expand Down Expand Up @@ -399,7 +401,7 @@ HSAILKernel::loadArguments(VirtualGPU& gpu, const amd::Kernel& kernel,
gpu.addVmMemory(gpu.hsaQueueMem());
}

return {hsaDisp, aql_packet_id};
return hsaDisp;
}

bool Kernel::setKernelDescriptor(amd::hsa::loader::Symbol* sym,
Expand Down
17 changes: 9 additions & 8 deletions projects/clr/rocclr/device/pal/palkernel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,15 @@ class Kernel : public device::Kernel {

//! Returns AQL packet in CPU memory
//! if the kernel arguments were successfully loaded, otherwise NULL
std::pair<hsa_kernel_dispatch_packet_t* /* packet address */, uint64_t /* packet id */>
loadArguments(VirtualGPU& gpu, //!< Running GPU context
const amd::Kernel& kernel, //!< AMD kernel object
const amd::NDRangeContainer& sizes, //!< NDrange container
const_address params, //!< Application arguments for the kernel
size_t ldsAddress, //!< LDS address that includes all arguments.
uint64_t vmDefQueue, //!< GPU VM default queue pointer
uint64_t* vmParentWrap //!< GPU VM parent aql wrap object
hsa_kernel_dispatch_packet_t* loadArguments(
VirtualGPU& gpu, //!< Running GPU context
const amd::Kernel& kernel, //!< AMD kernel object
const amd::NDRangeContainer& sizes, //!< NDrange container
const_address params, //!< Application arguments for the kernel
size_t ldsAddress, //!< LDS address that includes all arguments.
uint64_t vmDefQueue, //!< GPU VM default queue pointer
uint64_t* vmParentWrap, //!< GPU VM parent aql wrap object
uint32_t* aql_index //!< AQL packet index in the packets array for debugger
) const;

//! Returns the kernel index in the program
Expand Down
86 changes: 8 additions & 78 deletions projects/clr/rocclr/device/pal/palvirtual.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,37 +52,6 @@

namespace amd::pal {

AqlPacketMgmt::AqlPacketMgmt(const Device& dev) {
memset(aql_vgpus_, 0, sizeof(aql_vgpus_));

static_assert(sizeof(decltype(amd_queue_)::read_dispatch_id) == sizeof(uint64_t));
static_assert(sizeof(decltype(amd_queue_)::write_dispatch_id) == sizeof(uint64_t));

// Initialize the amd_queue_
amd_queue_.hsa_queue.type = HSA_QUEUE_TYPE_MULTI;
amd_queue_.hsa_queue.features = HSA_QUEUE_FEATURE_KERNEL_DISPATCH;
amd_queue_.hsa_queue.base_address = &aql_packets_[0];
amd_queue_.hsa_queue.size = sizeof(aql_packets_) / sizeof(aql_packets_[0]);
amd_queue_.hsa_queue.id = []() {
static std::atomic<uint64_t> queue_counter;
return queue_counter++;
}();
amd_queue_.read_dispatch_id_field_base_byte_offset =
offsetof(decltype(amd_queue_), read_dispatch_id) - offsetof(decltype(amd_queue_), hsa_queue);

amd_queue_.max_cu_id = dev.properties().gfxipProperties.shaderCore.numAvailableCus - 1;
amd_queue_.max_wave_id = dev.properties().gfxipProperties.shaderCore.numSimdsPerCu *
dev.properties().gfxipProperties.shaderCore.numWavefrontsPerSimd -
1;

amd_queue_.private_segment_aperture_base_hi = static_cast<uint32_t>(
dev.properties().gpuMemoryProperties.privateApertureBase >> LP64_SWITCH(0, 32));
amd_queue_.group_segment_aperture_base_hi = static_cast<uint32_t>(
dev.properties().gpuMemoryProperties.sharedApertureBase >> LP64_SWITCH(0, 32));

AMD_HSA_BITS_SET(amd_queue_.queue_properties, AMD_QUEUE_PROPERTIES_IS_PTR64, LP64_SWITCH(0, 1));
}

uint32_t VirtualGPU::Queue::AllocedQueues(const VirtualGPU& gpu, Pal::EngineType type) {
uint32_t allocedQueues = 0;
for (const auto& queue : gpu.dev().QueuePool()) {
Expand Down Expand Up @@ -182,13 +151,13 @@ VirtualGPU::Queue* VirtualGPU::Queue::Create(VirtualGPU& gpu, Pal::QueueType que
uint32_t index = AllocedQueues(gpu, qCreateInfo.engineType);
// Create PAL queue object
if (index < GPU_MAX_HW_QUEUES) {
Device::QueueRecycleInfo* info = new (qSize) Device::QueueRecycleInfo(gpu.dev());
Device::QueueRecycleInfo* info = new (qSize) Device::QueueRecycleInfo();
if (info == nullptr) {
LogError("Could not create QueueRecycleInfo!");
return nullptr;
}
addrQ = reinterpret_cast<address>(&info[1]);
qCreateInfo.aqlPacketList = info->DebuggerData();
qCreateInfo.aqlPacketList = info->AqlPacketList();
result = palDev->CreateQueue(qCreateInfo, addrQ, &queue->iQueue_);
if (result == Pal::Result::Success) {
const_cast<Device&>(gpu.dev()).QueuePool().insert({queue->iQueue_, info});
Expand Down Expand Up @@ -224,7 +193,7 @@ VirtualGPU::Queue* VirtualGPU::Queue::Create(VirtualGPU& gpu, Pal::QueueType que
queue->lock_ = &info->queue_lock_;
addrQ = reinterpret_cast<address>(&queue[1]);
} else {
Device::QueueRecycleInfo* info = new Device::QueueRecycleInfo(gpu.dev());
Device::QueueRecycleInfo* info = new Device::QueueRecycleInfo();
if (info == nullptr) {
LogError("Could not create QueueRecycleInfo!");
return nullptr;
Expand All @@ -233,7 +202,7 @@ VirtualGPU::Queue* VirtualGPU::Queue::Create(VirtualGPU& gpu, Pal::QueueType que
queue->aql_mgmt_ = &info->aql_packet_mgmt_;
// Exclusive compute path
addrQ = reinterpret_cast<address>(&queue[1]);
qCreateInfo.aqlPacketList = info->DebuggerData();
qCreateInfo.aqlPacketList = info->AqlPacketList();
result = palDev->CreateQueue(qCreateInfo, addrQ, &queue->iQueue_);
}
if (result != Pal::Result::Success) {
Expand Down Expand Up @@ -1103,7 +1072,7 @@ VirtualGPU::~VirtualGPU() {
if (queues_[MainEngine] != nullptr) {
// Clear all timestamps, associated with this virtual GPU
auto& mgmt = *queues_[MainEngine]->aql_mgmt_;
for (uint32_t i = 0; i < mgmt.amd_queue_.hsa_queue.size; ++i) {
for (uint32_t i = 0; i < AqlPacketMgmt::kAqlPacketsListSize; ++i) {
if (mgmt.aql_vgpus_[i] == this) {
mgmt.aql_vgpus_[i] = nullptr;
mgmt.aql_events_[i].invalidate();
Expand Down Expand Up @@ -2719,15 +2688,13 @@ bool VirtualGPU::submitKernelInternal(const amd::NDRangeContainer& sizes, const
GpuEvent gpuEvent(queues_[MainEngine]->cmdBufId());
uint32_t id = gpuEvent.id_;
uint64_t vmParentWrap = 0;
uint32_t aql_index = 0;
// Program the kernel arguments for the GPU execution
auto&& [aqlPkt, aql_packet_id] =
hsa_kernel_dispatch_packet_t* aqlPkt =
hsaKernel.loadArguments(*this, kernel, sizes, parameters, ldsSize + sharedMemBytes,
vmDefQueue, &vmParentWrap);
vmDefQueue, &vmParentWrap, &aql_index);
assert((nullptr != aqlPkt) && "Couldn't load kernel arguments");

auto& amd_queue = queues_[MainEngine]->aql_mgmt_->amd_queue_;
uint32_t aql_index = aql_packet_id % amd_queue.hsa_queue.size;

// Dynamic call stack size is considered to calculate private segment size and scratch regs
// in pal::Kernel::postLoad(). As it is not called during hipModuleLaunchKernel unlike
// hipLaunchKernel/hipLaunchKernelGGL, Updated value is passed to dispatch packet.
Expand Down Expand Up @@ -2762,46 +2729,9 @@ bool VirtualGPU::submitKernelInternal(const amd::NDRangeContainer& sizes, const
dispatchParam.useAtc = dev().settings().svmFineGrainSystem_ ? true : false;
dispatchParam.kernargSegmentSize = hsaKernel.argsBufferSize();
dispatchParam.aqlPacketIndex = aql_index;

// Update the mqd's information about scratch memory.
amd_queue.scratch_backing_memory_location = static_cast<uint64_t>(dispatchParam.scratchAddr);
amd_queue.scratch_backing_memory_byte_size = static_cast<uint64_t>(dispatchParam.scratchSize);

// FIXME: Conservatively, the read_dispatch_id cannot be smaller than the current aql_packet_id -
// hsa_queue.size for the debugger to work correctly. The read_dispatch_id really should be
// updated when the CmdBuf is marked as complete.
uint64_t new_read_dispatch_id = (aql_packet_id >= amd_queue.hsa_queue.size)
? (aql_packet_id - amd_queue.hsa_queue.size + 1)
: 0;

// Do an atomic max of &amd_queue.read_dispatch_id and new_read_dispatch_id
uint64_t old_read_dispatch_id = amd_queue.read_dispatch_id;
while (new_read_dispatch_id > old_read_dispatch_id) {
#if defined(__GNUC__)
if (__atomic_compare_exchange_n(&amd_queue.read_dispatch_id, &old_read_dispatch_id,
new_read_dispatch_id, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
break;
#elif defined(_MSC_VER)
uint64_t initial_value = InterlockedCompareExchange64(
reinterpret_cast<LONG64 volatile*>(&amd_queue.read_dispatch_id), new_read_dispatch_id,
old_read_dispatch_id);
if (initial_value == old_read_dispatch_id) break;
old_read_dispatch_id = initial_value;
#else // !defined (_MSV_VER) && !defined(__GNUC__)
#error Not implemented
#endif // !defined (_MSV_VER) && !defined(__GNUC__)
}

// Run AQL dispatch in HW
eventBegin(MainEngine);

#if PAL_CLIENT_INTERFACE_MAJOR_VERSION < 954
iCmd()->CmdDispatchAql(dispatchParam);
#else // PAL_CLIENT_INTERFACE_MAJOR_VERSION >= 954
Pal::DispatchAqlFeedback feedback{};
iCmd()->CmdDispatchAql(dispatchParam, &feedback);
amd_queue.compute_tmpring_size = feedback.tmpRingSize;
#endif // PAL_CLIENT_INTERFACE_MAJOR_VERSION >= 954

if (id != gpuEvent.id_) {
LogError("Something is wrong. ID mismatch!\n");
Expand Down
33 changes: 8 additions & 25 deletions projects/clr/rocclr/device/pal/palvirtual.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,6 @@
#include "palQueue.h"
#include "palFence.h"
#include "palLinearAllocator.h"
#include "amd_hsa_queue.h"

#ifdef _WIN32
#include <winnt.h>
#endif // _WIN32

/*! \addtogroup PAL PAL Resource Implementation
* @{
Expand All @@ -60,13 +55,12 @@ class Kernel;

struct AqlPacketMgmt : public amd::EmbeddedObject {
static constexpr uint32_t kAqlPacketsListSize = 4 * Ki;
AqlPacketMgmt(const Device& dev);
AqlPacketMgmt() : packet_index_(0) { memset(aql_vgpus_, 0, sizeof(aql_vgpus_)); }

amd_queue_t amd_queue_{};
alignas(sizeof(hsa_kernel_dispatch_packet_t))
hsa_kernel_dispatch_packet_t aql_packets_[kAqlPacketsListSize]; //!< The list of AQL packets
hsa_kernel_dispatch_packet_t aql_packets_[kAqlPacketsListSize]; //!< The list of AQL packets
GpuEvent aql_events_[kAqlPacketsListSize]; //!< The list of gpu for each AQL packet
VirtualGPU* aql_vgpus_[kAqlPacketsListSize]; //!< The list of vgpus which had submissions
std::atomic<uint64_t> packet_index_; //!< The active packet slot index
};

enum class BarrierType : uint8_t {
Expand Down Expand Up @@ -602,26 +596,15 @@ class VirtualGPU : public device::VirtualDevice {
}

//! Returns the current active slot for AQL packet
std::pair<hsa_kernel_dispatch_packet_t* /* packet address */, uint64_t /* packet id */>
GetAqlPacketSlot() const {
hsa_kernel_dispatch_packet_t* GetAqlPacketSlot(uint32_t* index) {
auto& mgmt = *queues_[MainEngine]->aql_mgmt_;
// Atomic increment global AQL index and wrap around max AQL list size
uint64_t packet_id =
#if defined(__GNUC__)
__atomic_fetch_add(&mgmt.amd_queue_.write_dispatch_id, 1, __ATOMIC_RELAXED);
#elif defined(_MSC_VER)
InterlockedExchangeAdd64(
reinterpret_cast<LONG64 volatile*>(&mgmt.amd_queue_.write_dispatch_id), 1);
#else // !defined (_MSV_VER) && !defined(__GNUC__)
#error Not implemented
#endif // !defined (_MSV_VER) && !defined(__GNUC__)

uint32_t index = packet_id % mgmt.amd_queue_.hsa_queue.size;
if (mgmt.aql_events_[index].isValid()) {
*index = ++mgmt.packet_index_ % AqlPacketMgmt::kAqlPacketsListSize;
if (mgmt.aql_events_[*index].isValid()) {
// Make sure GPU doesn't process this slot
mgmt.aql_vgpus_[index]->waitForEvent(&mgmt.aql_events_[index]);
mgmt.aql_vgpus_[*index]->waitForEvent(&mgmt.aql_events_[*index]);
}
return {&mgmt.aql_packets_[index], packet_id};
return &mgmt.aql_packets_[*index];
}

protected:
Expand Down
Loading