Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

issue: 1792164 Socket error queue support #900

Open
wants to merge 21 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
cf5aae3
issue: 1792164 Use NOTIFY_ON_EVENTS in all places
igor-ivanov Jun 4, 2020
e81bdb1
issue: 1792164 Introduce socket error queue
igor-ivanov Jun 5, 2020
a458344
issue: 1792164 Change handle_cmsg() prototype
igor-ivanov Jun 5, 2020
3b233a4
issue: 1792164 Add MSG_ERRQUEUE handler
igor-ivanov Jun 5, 2020
28b8b23
issue: 1792164 Add tx zcopy description into mem_buf_desc_t
igor-ivanov Jun 5, 2020
815376c
issue: 1792164 Define zero copy constants
igor-ivanov Jun 15, 2020
74de061
issue: 1792164 Add SO_ZEROCOPY processing
igor-ivanov Jun 9, 2020
3457d7d
issue: 1792164 Add PBUF_ZEROCOPY type of allocated pbuf
igor-ivanov Jun 9, 2020
7aa343d
issue: 1792164 Introduce flags to process zero copy send
igor-ivanov Jun 9, 2020
969ade6
issue: 1792164 Add MSG_ZEROCOPY processing
igor-ivanov Jun 10, 2020
94ffe80
issue: 1792164 Force tx completion for zerocopy buffers
igor-ivanov Jun 11, 2020
c239eff
issue: 1792164 Track last memory descriptor with identical zcopy counter
igor-ivanov Jun 30, 2020
b22e4e9
issue: 1792164 Do TX polling from internal thread
igor-ivanov Jun 30, 2020
306d312
issue: 1792164 Extend VMA_INTERNAL_THREAD_ARM_CQ variable
igor-ivanov Jun 30, 2020
50996ba
issue: 1792164 Support MSG_ERRQUEUE in sockinfo::rx()
igor-ivanov Jul 1, 2020
f808091
issue: 1792164 Support checking POLLERR events by poll()
igor-ivanov Jul 1, 2020
fc9acbe
issue: 1792164 Improve LSO send flow
igor-ivanov Oct 7, 2020
1392cb6
issue: 1792164 Fix race access to error queue
igor-ivanov Oct 5, 2020
b0b5b52
issue: 1792164 Remove epoll event when it is consumed
pasis Nov 11, 2020
d2db645
issue: 1792164 Handle duplicate zcopy notifications
pasis Nov 11, 2020
07a71ec
issue: 2439102 Fix issue with processing control msg in recvmsg()
igor-ivanov Jan 21, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion README.txt
Original file line number Diff line number Diff line change
Expand Up @@ -830,11 +830,15 @@ timer expiration (once every 100ms). Application threads may be blocked till in
Default value is 0 (deferred handling)

VMA_INTERNAL_THREAD_ARM_CQ
Wakeup the internal thread for each packet that the CQ receive.
Wakeup the internal thread for activity on TX/RX CQ.
Poll and process the packet and bring it to the socket layer.
This can minimize latency in case of a busy application which is not available to
receive the packet when it arrived.
However, this might decrease performance in case of high pps rate application.
Disable Arm CQ is 0
Check RX CQ is 1
Check TX CQ is 2
Check all CQs is 3
Default value is 0 (Disabled)

VMA_WAIT_AFTER_JOIN_MSEC
Expand Down
6 changes: 6 additions & 0 deletions src/vma/dev/buffer_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,12 @@

inline static void free_lwip_pbuf(struct pbuf_custom *pbuf_custom)
{
mem_buf_desc_t* p_desc = (mem_buf_desc_t *)pbuf_custom;

if (p_desc->m_flags & mem_buf_desc_t::ZCOPY) {
p_desc->tx.zc.callback(p_desc);
}
pbuf_custom->pbuf.type = 0;
pbuf_custom->pbuf.flags = 0;
pbuf_custom->pbuf.ref = 0;
}
Expand Down
67 changes: 51 additions & 16 deletions src/vma/dev/net_device_val.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,9 @@ const char* ring_alloc_logic_attr::to_str()
return m_str;
}

net_device_val::net_device_val(struct net_device_val_desc *desc) : m_lock("net_device_val lock")
net_device_val::net_device_val(struct net_device_val_desc *desc) :
m_lock("net_device_val lock"),
m_sysvar_internal_thread_arm_cq(safe_mce_sys().internal_thread_arm_cq)
{
bool valid = false;
ib_ctx_handler* ib_ctx;
Expand Down Expand Up @@ -1133,16 +1135,33 @@ int net_device_val::global_ring_poll_and_process_element(uint64_t *p_poll_sn, vo
auto_unlocker lock(m_lock);
rings_hash_map_t::iterator ring_iter;
for (ring_iter = m_h_ring_map.begin(); ring_iter != m_h_ring_map.end(); ring_iter++) {
int ret = THE_RING->poll_and_process_element_rx(p_poll_sn, pv_fd_ready_array);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0 && errno != EAGAIN) {
nd_logerr("Error in ring->poll_and_process_element() of %p (errno=%d %m)", THE_RING, errno);
return ret;
if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_RX) {
int ret = THE_RING->poll_and_process_element_rx(p_poll_sn, pv_fd_ready_array);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0 && errno != EAGAIN) {
nd_logerr("Error in RX ring->poll_and_process_element() of %p (errno=%d %m)", THE_RING, errno);
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
if (ret > 0) {
nd_logfunc("ring[%p] RX Returned with: %d (sn=%d)", THE_RING, ret, *p_poll_sn);
ret_total += ret;
}
}

if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_TX) {
int ret = THE_RING->poll_and_process_element_tx(p_poll_sn);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0 && errno != EAGAIN) {
nd_logerr("Error in TX ring->poll_and_process_element() of %p (errno=%d %m)", THE_RING, errno);
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
if (ret > 0) {
nd_logfunc("ring[%p] TX Returned with: %d (sn=%d)", THE_RING, ret, *p_poll_sn);
ret_total += ret;
}
}
BULLSEYE_EXCLUDE_BLOCK_END
if (ret > 0)
nd_logfunc("ring[%p] Returned with: %d (sn=%d)", THE_RING, ret, *p_poll_sn);
ret_total += ret;
}
return ret_total;
}
Expand All @@ -1153,13 +1172,29 @@ int net_device_val::global_ring_request_notification(uint64_t poll_sn)
auto_unlocker lock(m_lock);
rings_hash_map_t::iterator ring_iter;
for (ring_iter = m_h_ring_map.begin(); ring_iter != m_h_ring_map.end(); ring_iter++) {
int ret = THE_RING->request_notification(CQT_RX, poll_sn);
if (ret < 0) {
nd_logerr("Error ring[%p]->request_notification() (errno=%d %m)", THE_RING, errno);
return ret;
if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_RX) {
int ret = THE_RING->request_notification(CQT_RX, poll_sn);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0) {
nd_logerr("Error RX ring[%p]->request_notification() (errno=%d %m)", THE_RING, errno);
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
nd_logfunc("ring[%p] RX Returned with: %d (sn=%d)", THE_RING, ret, poll_sn);
ret_total += ret;
}

if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_TX) {
int ret = THE_RING->request_notification(CQT_TX, poll_sn);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0) {
nd_logerr("Error TX ring[%p]->request_notification() (errno=%d %m)", THE_RING, errno);
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
nd_logfunc("ring[%p] TX Returned with: %d (sn=%d)", THE_RING, ret, poll_sn);
ret_total += ret;
}
nd_logfunc("ring[%p] Returned with: %d (sn=%d)", THE_RING, ret, poll_sn);
ret_total += ret;
}
return ret_total;
}
Expand Down
3 changes: 3 additions & 0 deletions src/vma/dev/net_device_val.h
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,9 @@ class net_device_val
std::string m_name; /* container for ifname */
char m_str[BUFF_SIZE]; /* detailed information about device */
char m_base_name[IFNAMSIZ]; /* base name of device basing ifname */

/* Global environment variables section */
const int m_sysvar_internal_thread_arm_cq;
};

class net_device_val_eth : public net_device_val
Expand Down
16 changes: 11 additions & 5 deletions src/vma/dev/qp_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ qp_mgr::qp_mgr(const ring_simple* p_ring, const ib_ctx_handler* p_context,
m_ibv_rx_sg_array = new ibv_sge[m_n_sysvar_rx_num_wr_to_post_recv];
m_ibv_rx_wr_array = new ibv_recv_wr[m_n_sysvar_rx_num_wr_to_post_recv];

set_unsignaled_count();
memset(&m_rate_limit, 0, sizeof(struct vma_rate_limit_t));

qp_logfunc("");
Expand Down Expand Up @@ -336,7 +335,6 @@ void qp_mgr::up()
release_tx_buffers();

/* clean any link to completions with error we might have */
set_unsignaled_count();
m_p_last_tx_mem_buf_desc = NULL;

modify_qp_to_ready_state();
Expand Down Expand Up @@ -497,7 +495,6 @@ void qp_mgr::trigger_completion_for_all_sent_packets()

// Close the Tx unsignaled send list
set_unsignaled_count();
m_p_last_tx_mem_buf_desc = NULL;

if (!m_p_ring->m_tx_num_wr_free) {
qp_logdbg("failed to trigger completion for all packets due to no available wr");
Expand Down Expand Up @@ -599,9 +596,19 @@ inline int qp_mgr::send_to_wire(vma_ibv_send_wr* p_send_wqe, vma_wr_tx_packet_at
int qp_mgr::send(vma_ibv_send_wr* p_send_wqe, vma_wr_tx_packet_attr attr)
{
mem_buf_desc_t* p_mem_buf_desc = (mem_buf_desc_t *)p_send_wqe->wr_id;
/* Control tx completions:
* - VMA_TX_WRE_BATCHING - The number of Tx Work Request Elements used
* until a completion signal is requested.
* - ZCOPY packets should notify application as soon as possible to
* confirm one that user buffers are free to reuse. So force completion
* signal for such work requests.
* - First call of send() should do completion. It means that
* m_n_unsignaled_count must be zero for this time.
*/
bool request_comp = (is_completion_need() ||
(p_mem_buf_desc->m_flags & mem_buf_desc_t::ZCOPY));

qp_logfunc("VERBS send, unsignaled_count: %d", m_n_unsignaled_count);
bool request_comp = is_completion_need();

#ifdef VMA_TIME_MEASURE
TAKE_T_TX_POST_SEND_START;
Expand Down Expand Up @@ -636,7 +643,6 @@ int qp_mgr::send(vma_ibv_send_wr* p_send_wqe, vma_wr_tx_packet_attr attr)
int ret;

set_unsignaled_count();
m_p_last_tx_mem_buf_desc = NULL;

// Poll the Tx CQ
uint64_t dummy_poll_sn = 0;
Expand Down
5 changes: 4 additions & 1 deletion src/vma/dev/qp_mgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,10 @@ friend class cq_mgr_mp;

int configure(struct ibv_comp_channel* p_rx_comp_event_channel);
virtual int prepare_ibv_qp(vma_ibv_qp_init_attr& qp_init_attr) = 0;
inline void set_unsignaled_count(void) { m_n_unsignaled_count = m_n_sysvar_tx_num_wr_to_signal - 1; }
inline void set_unsignaled_count(void) {
m_n_unsignaled_count = m_n_sysvar_tx_num_wr_to_signal - 1;
m_p_last_tx_mem_buf_desc = NULL;
}

virtual cq_mgr* init_rx_cq_mgr(struct ibv_comp_channel* p_rx_comp_event_channel);
virtual cq_mgr* init_tx_cq_mgr(void);
Expand Down
1 change: 0 additions & 1 deletion src/vma/dev/qp_mgr_eth_mlx5.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -887,7 +887,6 @@ void qp_mgr_eth_mlx5::trigger_completion_for_all_sent_packets()

// Close the Tx unsignaled send list
set_unsignaled_count();
m_p_last_tx_mem_buf_desc = NULL;

if (!m_p_ring->m_tx_num_wr_free) {
qp_logdbg("failed to trigger completion for all packets due to no available wr");
Expand Down
1 change: 1 addition & 0 deletions src/vma/dev/ring.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ class ring
virtual int drain_and_proccess() = 0;
virtual int wait_for_notification_and_process_element(int cq_channel_fd, uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL) = 0;
virtual int poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL) = 0;
virtual int poll_and_process_element_tx(uint64_t* p_cq_poll_sn) = 0;
virtual void adapt_cq_moderation() = 0;
virtual void mem_buf_desc_return_single_to_owner_tx(mem_buf_desc_t* p_mem_buf_desc) = 0;

Expand Down
25 changes: 25 additions & 0 deletions src/vma/dev/ring_bond.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -451,6 +451,31 @@ int ring_bond::poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd_r
}
}

int ring_bond::poll_and_process_element_tx(uint64_t* p_cq_poll_sn)
{
if (m_lock_ring_tx.trylock()) {
errno = EAGAIN;
return 0;
}

int temp = 0;
int ret = 0;
for (uint32_t i = 0; i < m_bond_rings.size(); i++) {
if (m_bond_rings[i]->is_up()) {
temp = m_bond_rings[i]->poll_and_process_element_tx(p_cq_poll_sn);
if (temp > 0) {
ret += temp;
}
}
}
m_lock_ring_tx.unlock();
if (ret > 0) {
return ret;
} else {
return temp;
}
}

int ring_bond::drain_and_proccess()
{
if (m_lock_ring_rx.trylock()) {
Expand Down
3 changes: 2 additions & 1 deletion src/vma/dev/ring_bond.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ class ring_bond : public ring {
virtual void print_val();

virtual int request_notification(cq_type_t cq_type, uint64_t poll_sn);
virtual int poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL);
virtual int poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL);
virtual int poll_and_process_element_tx(uint64_t* p_cq_poll_sn);
virtual void adapt_cq_moderation();
virtual bool reclaim_recv_buffers(descq_t *rx_reuse);
virtual bool reclaim_recv_buffers(mem_buf_desc_t* rx_reuse_lst);
Expand Down
7 changes: 7 additions & 0 deletions src/vma/dev/ring_simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,13 @@ int ring_simple::poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd
return ret;
}

int ring_simple::poll_and_process_element_tx(uint64_t* p_cq_poll_sn)
{
int ret = 0;
RING_TRY_LOCK_RUN_AND_UPDATE_RET(m_lock_ring_tx, m_p_cq_mgr_tx->poll_and_process_element_tx(p_cq_poll_sn));
return ret;
}

int ring_simple::socketxtreme_poll(struct vma_completion_t *vma_completions, unsigned int ncompletions, int flags)
{
int ret = 0;
Expand Down
3 changes: 2 additions & 1 deletion src/vma/dev/ring_simple.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ class ring_simple : public ring_slave
virtual ~ring_simple();

virtual int request_notification(cq_type_t cq_type, uint64_t poll_sn);
virtual int poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL);
virtual int poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL);
virtual int poll_and_process_element_tx(uint64_t* p_cq_poll_sn);
virtual void adapt_cq_moderation();
virtual bool reclaim_recv_buffers(descq_t *rx_reuse);
virtual bool reclaim_recv_buffers(mem_buf_desc_t* rx_reuse_lst);
Expand Down
1 change: 1 addition & 0 deletions src/vma/dev/ring_tap.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ class ring_tap : public ring_slave
virtual bool attach_flow(flow_tuple& flow_spec_5t, pkt_rcvr_sink* sink);
virtual bool detach_flow(flow_tuple& flow_spec_5t, pkt_rcvr_sink* sink);
virtual int poll_and_process_element_rx(uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL);
virtual int poll_and_process_element_tx(uint64_t* p_cq_poll_sn) { NOT_IN_USE(p_cq_poll_sn); return 0; }
virtual int wait_for_notification_and_process_element(int cq_channel_fd, uint64_t* p_cq_poll_sn, void* pv_fd_ready_array = NULL);
virtual int drain_and_proccess();
virtual bool reclaim_recv_buffers(descq_t *rx_reuse);
Expand Down
2 changes: 1 addition & 1 deletion src/vma/event/event_handler_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ void event_handler_manager::register_command_event(int fd, command* cmd)

event_handler_manager::event_handler_manager() :
m_reg_action_q_lock("reg_action_q_lock"),
m_b_sysvar_internal_thread_arm_cq_enabled(safe_mce_sys().internal_thread_arm_cq_enabled),
m_b_sysvar_internal_thread_arm_cq_enabled(safe_mce_sys().internal_thread_arm_cq),
m_n_sysvar_vma_time_measure_num_samples(safe_mce_sys().vma_time_measure_num_samples),
m_n_sysvar_timer_resolution_msec(safe_mce_sys().timer_resolution_msec)
{
Expand Down
78 changes: 57 additions & 21 deletions src/vma/iomux/epfd_info.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,13 @@ int epfd_info::remove_fd_from_epoll_os(int fd)

epfd_info::epfd_info(int epfd, int size) :
lock_mutex_recursive("epfd_info"), m_epfd(epfd), m_size(size), m_ring_map_lock("epfd_ring_map_lock"),
m_lock_poll_os("epfd_lock_poll_os"), m_sysvar_thread_mode(safe_mce_sys().thread_mode),
m_b_os_data_available(false)
m_lock_poll_os("epfd_lock_poll_os"),
m_b_os_data_available(false),
m_sysvar_thread_mode(safe_mce_sys().thread_mode),
m_sysvar_internal_thread_arm_cq(safe_mce_sys().internal_thread_arm_cq)
{
__log_funcall("");

int max_sys_fd = get_sys_max_fd_num();
if (m_size<=max_sys_fd)
{
Expand Down Expand Up @@ -609,17 +612,35 @@ int epfd_info::ring_poll_and_process_element(uint64_t *p_poll_sn, void* pv_fd_re
m_ring_map_lock.lock();

for (ring_map_t::iterator iter = m_ring_map.begin(); iter != m_ring_map.end(); iter++) {
int ret = iter->first->poll_and_process_element_rx(p_poll_sn, pv_fd_ready_array);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0 && errno != EAGAIN) {
__log_err("Error in ring->poll_and_process_element() of %p (errno=%d %m)", iter->first, errno);
m_ring_map_lock.unlock();
return ret;
if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_RX) {
int ret = iter->first->poll_and_process_element_rx(p_poll_sn, pv_fd_ready_array);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0 && errno != EAGAIN) {
__log_err("Error in RX ring->poll_and_process_element() of %p (errno=%d %m)", iter->first, errno);
m_ring_map_lock.unlock();
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
if (ret > 0) {
__log_func("ring[%p] RX Returned with: %d (sn=%d)", iter->first, ret, *p_poll_sn);
ret_total += ret;
}
}

if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_TX) {
int ret = iter->first->poll_and_process_element_tx(p_poll_sn);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0 && errno != EAGAIN) {
__log_err("Error in TX ring->poll_and_process_element() of %p (errno=%d %m)", iter->first, errno);
m_ring_map_lock.unlock();
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
if (ret > 0) {
__log_func("ring[%p] TX Returned with: %d (sn=%d)", iter->first, ret, *p_poll_sn);
ret_total += ret;
}
}
BULLSEYE_EXCLUDE_BLOCK_END
if (ret > 0)
__log_func("ring[%p] Returned with: %d (sn=%d)", iter->first, ret, *p_poll_sn);
ret_total += ret;
}

m_ring_map_lock.unlock();
Expand All @@ -646,16 +667,31 @@ int epfd_info::ring_request_notification(uint64_t poll_sn)
m_ring_map_lock.lock();

for (ring_map_t::iterator iter = m_ring_map.begin(); iter != m_ring_map.end(); iter++) {
int ret = iter->first->request_notification(CQT_RX, poll_sn);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0) {
__log_err("Error ring[%p]->request_notification() (errno=%d %m)", iter->first, errno);
m_ring_map_lock.unlock();
return ret;
if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_RX) {
int ret = iter->first->request_notification(CQT_RX, poll_sn);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0) {
__log_err("Error RX ring[%p]->request_notification() (errno=%d %m)", iter->first, errno);
m_ring_map_lock.unlock();
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
__log_func("ring[%p] RX Returned with: %d (sn=%d)", iter->first, ret, poll_sn);
ret_total += ret;
}

if (m_sysvar_internal_thread_arm_cq & mce_sys_var::ARM_CQ_TX) {
int ret = iter->first->request_notification(CQT_TX, poll_sn);
BULLSEYE_EXCLUDE_BLOCK_START
if (ret < 0) {
__log_err("Error TX ring[%p]->request_notification() (errno=%d %m)", iter->first, errno);
m_ring_map_lock.unlock();
return ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
__log_func("ring[%p] TX Returned with: %d (sn=%d)", iter->first, ret, poll_sn);
ret_total += ret;
}
BULLSEYE_EXCLUDE_BLOCK_END
__log_func("ring[%p] Returned with: %d (sn=%d)", iter->first, ret, poll_sn);
ret_total += ret;
}

m_ring_map_lock.unlock();
Expand Down
Loading