diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index f9b91e7a16cb..bfb5978067d3 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -48,11 +48,7 @@ BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *st m_flexCounterTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_TABLE)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), m_countersDb(new DBConnector("COUNTERS_DB", 0)), - m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE), - m_ingressZeroBufferPool(SAI_NULL_OBJECT_ID), - m_egressZeroBufferPool(SAI_NULL_OBJECT_ID), - m_ingressZeroPoolRefCount(0), - m_egressZeroPoolRefCount(0) + m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE) { SWSS_LOG_ENTER(); initTableHandlers(); @@ -314,65 +310,6 @@ const object_reference_map &BufferOrch::getBufferPoolNameOidMap(void) return *m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME]; } -void BufferOrch::lockZeroBufferPool(bool ingress) -{ - if (ingress) - m_ingressZeroPoolRefCount++; - else - m_egressZeroPoolRefCount++; -} - -void BufferOrch::unlockZeroBufferPool(bool ingress) -{ - sai_object_id_t pool = SAI_NULL_OBJECT_ID; - if (ingress) - { - if (--m_ingressZeroPoolRefCount <= 0) - { - pool = m_ingressZeroBufferPool; - m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - } - else - { - if (--m_egressZeroPoolRefCount <= 0) - { - pool = m_egressZeroBufferPool; - m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - } - - if (pool != SAI_NULL_OBJECT_ID) - { - auto sai_status = sai_buffer_api->remove_buffer_pool(pool); - if (SAI_STATUS_SUCCESS != sai_status) - { - SWSS_LOG_ERROR("Failed to remove buffer pool, rv:%d", sai_status); - task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) - { - return; - } - } - else - { - SWSS_LOG_NOTICE("Zero buffer pool has been successfully removed"); - } - } -} - -void BufferOrch::setZeroBufferPool(bool ingress, sai_object_id_t pool) -{ - if (ingress) - { - m_ingressZeroBufferPool = pool; - } - else - { - m_egressZeroBufferPool = pool; - } -} - task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); @@ -381,8 +318,6 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string map_type_name = APP_BUFFER_POOL_TABLE_NAME; string object_name = kfvKey(tuple); string op = kfvOp(tuple); - sai_buffer_pool_type_t pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; - bool creating_zero_pool = false; SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) @@ -396,16 +331,6 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); - if (object_name == "ingress_zero_pool") - { - creating_zero_pool = true; - pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; - } - else if (object_name == "egress_zero_pool") - { - creating_zero_pool = true; - pool_direction = SAI_BUFFER_POOL_TYPE_EGRESS; - } if (op == SET_COMMAND) { @@ -453,11 +378,6 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_invalid_entry; } attr.id = SAI_BUFFER_POOL_ATTR_TYPE; - if (creating_zero_pool && pool_direction != static_cast(attr.value.u32)) - { - SWSS_LOG_ERROR("Wrong pool direction for pool %s", object_name.c_str()); - return task_process_status::task_invalid_entry; - } attribs.push_back(attr); } else if (field == buffer_pool_mode_field_name) @@ -523,54 +443,20 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } else { - if (creating_zero_pool) - { - if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) - { - sai_object = m_ingressZeroBufferPool; - } - else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) - { - sai_object = m_egressZeroBufferPool; - } - } - - if (SAI_NULL_OBJECT_ID == sai_object) - { - sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); - if (SAI_STATUS_SUCCESS != sai_status) - { - SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) - { - return handle_status; - } - } - - SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); - } - else - { - SWSS_LOG_NOTICE("No need to create buffer pool %s since it has been created", object_name.c_str()); - } - - if (creating_zero_pool) + sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); + if (SAI_STATUS_SUCCESS != sai_status) { - if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) - { - m_ingressZeroPoolRefCount++; - m_ingressZeroBufferPool = sai_object; - } - else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) + SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) { - m_egressZeroPoolRefCount++; - m_egressZeroBufferPool = sai_object; + return handle_status; } } (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = false; + SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); // Here we take the PFC watchdog approach to update the COUNTERS_DB metadata (e.g., PFC_WD_DETECTION_TIME per queue) // at initialization (creation and registration phase) // Specifically, we push the buffer pool name to oid mapping upon the creation of the oid @@ -593,39 +479,17 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) if (SAI_NULL_OBJECT_ID != sai_object) { clearBufferPoolWatermarkCounterIdList(sai_object); - bool remove = true; - if (sai_object == m_ingressZeroBufferPool) - { - if (--m_ingressZeroPoolRefCount > 0) - remove = false; - else - m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - else if (sai_object == m_egressZeroBufferPool) - { - if (--m_egressZeroPoolRefCount > 0) - remove = false; - else - m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - if (remove) + sai_status = sai_buffer_api->remove_buffer_pool(sai_object); + if (SAI_STATUS_SUCCESS != sai_status) { - sai_status = sai_buffer_api->remove_buffer_pool(sai_object); - if (SAI_STATUS_SUCCESS != sai_status) + SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) { - SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) - { - return handle_status; - } + return handle_status; } - SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); - } - else - { - SWSS_LOG_NOTICE("Will not remove buffer pool %s since it is still referenced", object_name.c_str()); } + SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); } auto it_to_delete = (m_buffer_type_maps[map_type_name])->find(object_name); (m_buffer_type_maps[map_type_name])->erase(it_to_delete); @@ -1049,7 +913,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup for (string port_name : port_names) { Port port; - bool portUpdated = false; SWSS_LOG_DEBUG("processing port:%s", port_name.c_str()); if (!gPortsOrch->getPort(port_name, port)) { @@ -1064,12 +927,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup SWSS_LOG_ERROR("Invalid pg index specified:%zd", ind); return task_process_status::task_invalid_entry; } - if (port.m_priority_group_lock[ind]) - { - SWSS_LOG_WARN("Priority group %zd on port %s is locked, pending profile 0x%" PRIx64 " until unlocked", ind, port_name.c_str(), sai_buffer_profile); - portUpdated = true; - port.m_priority_group_pending_profile[ind] = sai_buffer_profile; - } else { if (need_update_sai) @@ -1090,10 +947,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup } } } - if (portUpdated) - { - gPortsOrch->setPort(port_name, port); - } } if (m_ready_list.find(key) != m_ready_list.end()) diff --git a/orchagent/bufferorch.h b/orchagent/bufferorch.h index 24af140b4a28..59428509b507 100644 --- a/orchagent/bufferorch.h +++ b/orchagent/bufferorch.h @@ -37,14 +37,6 @@ class BufferOrch : public Orch static type_map m_buffer_type_maps; void generateBufferPoolWatermarkCounterIdList(void); const object_reference_map &getBufferPoolNameOidMap(void); - sai_object_id_t getZeroBufferPool(bool ingress) - { - return ingress ? m_ingressZeroBufferPool : m_egressZeroBufferPool; - } - - void lockZeroBufferPool(bool ingress); - void unlockZeroBufferPool(bool ingress); - void setZeroBufferPool(bool direction, sai_object_id_t pool); private: typedef task_process_status (BufferOrch::*buffer_table_handler)(KeyOpFieldsValuesTuple &tuple); @@ -80,10 +72,6 @@ class BufferOrch : public Orch bool m_isBufferPoolWatermarkCounterIdListGenerated = false; - sai_object_id_t m_ingressZeroBufferPool; - sai_object_id_t m_egressZeroBufferPool; - int m_ingressZeroPoolRefCount; - int m_egressZeroPoolRefCount; }; #endif /* SWSS_BUFFORCH_H */ diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index 6fb497812df9..f7dc20ef265b 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -3,7 +3,6 @@ #include "logger.h" #include "sai_serialize.h" #include "portsorch.h" -#include "bufferorch.h" #include #include @@ -27,7 +26,6 @@ extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; extern AclOrch * gAclOrch; -extern BufferOrch *gBufferOrch; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; extern sai_buffer_api_t *sai_buffer_api; @@ -567,7 +565,7 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, return; } - setPriorityGroupAndQueueLockFlag(portInstance, true); + setQueueLockFlag(portInstance, true); sai_attribute_t attr; attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID; @@ -583,7 +581,7 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, sai_object_id_t oldQueueProfileId = attr.value.oid; attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID; - attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(false); + attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(); // Set our zero buffer profile status = sai_queue_api->set_queue_attribute(queue, &attr); @@ -595,35 +593,6 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, // Save original buffer profile m_originalQueueBufferProfile = oldQueueProfileId; - - // Get PG - sai_object_id_t pg = portInstance.m_priority_group_ids[static_cast (queueId)]; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - - // Get PG's buffer profile - status = sai_buffer_api->get_ingress_priority_group_attribute(pg, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get buffer profile ID on PG 0x%" PRIx64 ": %d", pg, status); - return; - } - - // Set zero profile to PG - sai_object_id_t oldPgProfileId = attr.value.oid; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(true); - - status = sai_buffer_api->set_ingress_priority_group_attribute(pg, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set buffer profile ID on pg 0x%" PRIx64 ": %d", pg, status); - return; - } - - // Save original buffer profile - m_originalPgBufferProfile = oldPgProfileId; } PfcWdZeroBufferHandler::~PfcWdZeroBufferHandler(void) @@ -649,41 +618,12 @@ PfcWdZeroBufferHandler::~PfcWdZeroBufferHandler(void) return; } - auto idx = size_t(getQueueId()); - sai_object_id_t pg = portInstance.m_priority_group_ids[idx]; - sai_object_id_t pending_profile_id = portInstance.m_priority_group_pending_profile[idx]; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - - if (pending_profile_id != SAI_NULL_OBJECT_ID) - { - attr.value.oid = pending_profile_id; - SWSS_LOG_NOTICE("Priority group %zd on port %s has been restored to pending profile 0x%" PRIx64, - idx, portInstance.m_alias.c_str(), pending_profile_id); - portInstance.m_priority_group_pending_profile[idx] = SAI_NULL_OBJECT_ID; - } - else - { - attr.value.oid = m_originalPgBufferProfile; - SWSS_LOG_NOTICE("Priority group %zd on port %s has been restored to original profile 0x%" PRIx64, - idx, portInstance.m_alias.c_str(), m_originalPgBufferProfile); - } - - // Set our zero buffer profile - status = sai_buffer_api->set_ingress_priority_group_attribute(pg, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set buffer profile ID on queue 0x%" PRIx64 ": %d", getQueue(), status); - return; - } - - setPriorityGroupAndQueueLockFlag(portInstance, false); + setQueueLockFlag(portInstance, false); } -void PfcWdZeroBufferHandler::setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const +void PfcWdZeroBufferHandler::setQueueLockFlag(Port& port, bool isLocked) const { - // set lock bits on PG and queue - port.m_priority_group_lock[static_cast(getQueueId())] = isLocked; + // set lock bits on queue for (size_t i = 0; i < port.m_queue_ids.size(); ++i) { if (port.m_queue_ids[i] == getQueue()) @@ -703,9 +643,8 @@ PfcWdZeroBufferHandler::ZeroBufferProfile::~ZeroBufferProfile(void) { SWSS_LOG_ENTER(); - // Destroy ingress and egress profiles and pools - destroyZeroBufferProfile(true); - destroyZeroBufferProfile(false); + // Destroy egress profiles and pools + destroyZeroBufferProfile(); } PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance(void) @@ -717,38 +656,19 @@ PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferPro return instance; } -sai_object_id_t& PfcWdZeroBufferHandler::ZeroBufferProfile::getPool(bool ingress) -{ - // If there is a cached zero buffer pool, just use it - // else fetch zero buffer pool from buffer orch - // If there is one, use it and increase the reference number. - // otherwise, just return NULL OID - // PfcWdZeroBufferHandler will create it later and notify buffer orch later - auto &poolId = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; - if (poolId == SAI_NULL_OBJECT_ID) - { - poolId = gBufferOrch->getZeroBufferPool(ingress); - if (poolId != SAI_NULL_OBJECT_ID) - { - gBufferOrch->lockZeroBufferPool(ingress); - } - } - return poolId; -} - -sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile(bool ingress) +sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile() { SWSS_LOG_ENTER(); - if (getInstance().getProfile(ingress) == SAI_NULL_OBJECT_ID) + if (getInstance().getProfile() == SAI_NULL_OBJECT_ID) { - getInstance().createZeroBufferProfile(ingress); + getInstance().createZeroBufferProfile(); } - return getInstance().getProfile(ingress); + return getInstance().getProfile(); } -void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ingress) +void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile() { SWSS_LOG_ENTER(); @@ -756,60 +676,51 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing vector attribs; sai_status_t status; - auto &poolId = getPool(ingress); - - if (SAI_NULL_OBJECT_ID == poolId) - { - // Create zero pool - attr.id = SAI_BUFFER_POOL_ATTR_SIZE; - attr.value.u64 = 0; - attribs.push_back(attr); - - attr.id = SAI_BUFFER_POOL_ATTR_TYPE; - attr.value.u32 = ingress ? SAI_BUFFER_POOL_TYPE_INGRESS : SAI_BUFFER_POOL_TYPE_EGRESS; - attribs.push_back(attr); + // Create zero pool + attr.id = SAI_BUFFER_POOL_ATTR_SIZE; + attr.value.u64 = 0; + attribs.push_back(attr); - attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_STATIC; - attribs.push_back(attr); + attr.id = SAI_BUFFER_POOL_ATTR_TYPE; + attr.value.u32 = SAI_BUFFER_POOL_TYPE_EGRESS; + attribs.push_back(attr); - status = sai_buffer_api->create_buffer_pool( - &poolId, - gSwitchId, - static_cast(attribs.size()), - attribs.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); - return; - } + attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; + attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_DYNAMIC; + attribs.push_back(attr); - // Pass the ownership to BufferOrch - gBufferOrch->setZeroBufferPool(ingress, poolId); - gBufferOrch->lockZeroBufferPool(ingress); + status = sai_buffer_api->create_buffer_pool( + &getPool(), + gSwitchId, + static_cast(attribs.size()), + attribs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); + return; } // Create zero profile attribs.clear(); attr.id = SAI_BUFFER_PROFILE_ATTR_POOL_ID; - attr.value.oid = getPool(ingress); + attr.value.oid = getPool(); attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC; + attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC; attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_BUFFER_SIZE; attr.value.u64 = 0; attribs.push_back(attr); - attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH; - attr.value.s8 = 0; + attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH; + attr.value.s8 = -8; attribs.push_back(attr); status = sai_buffer_api->create_buffer_profile( - &getProfile(ingress), + &getProfile(), gSwitchId, static_cast(attribs.size()), attribs.data()); @@ -820,23 +731,20 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing } } -void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile(bool ingress) +void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile() { SWSS_LOG_ENTER(); - if (getProfile(ingress) != SAI_NULL_OBJECT_ID) + sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile()); + if (status != SAI_STATUS_SUCCESS) { - sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile(ingress)); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); - return; - } + SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); + return; } - auto &pool = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; - if (pool != SAI_NULL_OBJECT_ID) + status = sai_buffer_api->remove_buffer_pool(getPool()); + if (status != SAI_STATUS_SUCCESS) { - gBufferOrch->unlockZeroBufferPool(ingress); + SWSS_LOG_ERROR("Failed to remove static zero buffer pool for PFC WD: %d", status); } } diff --git a/orchagent/pfcactionhandler.h b/orchagent/pfcactionhandler.h index 22908fbe0850..d32df433f756 100644 --- a/orchagent/pfcactionhandler.h +++ b/orchagent/pfcactionhandler.h @@ -125,39 +125,39 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler private: /* - * Sets lock bits on port's priority group and queue - * to protect them from being changed by other Orch's - */ - void setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const; + * Sets lock bits on port's queue + * to protect it from being changed by other Orch's + */ + void setQueueLockFlag(Port& port, bool isLocked) const; // Singletone class for keeping shared data - zero buffer profiles class ZeroBufferProfile { public: ~ZeroBufferProfile(void); - static sai_object_id_t getZeroBufferProfile(bool ingress); + static sai_object_id_t getZeroBufferProfile(); private: ZeroBufferProfile(void); static ZeroBufferProfile &getInstance(void); - void createZeroBufferProfile(bool ingress); - void destroyZeroBufferProfile(bool ingress); + void createZeroBufferProfile(); + void destroyZeroBufferProfile(); - sai_object_id_t& getProfile(bool ingress) + sai_object_id_t& getProfile() { - return ingress ? m_zeroIngressBufferProfile : m_zeroEgressBufferProfile; + return m_zeroEgressBufferProfile; } - sai_object_id_t& getPool(bool ingress); + sai_object_id_t& getPool() + { + return m_zeroEgressBufferPool; + } - sai_object_id_t m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; - sai_object_id_t m_zeroIngressBufferProfile = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferProfile = SAI_NULL_OBJECT_ID; }; sai_object_id_t m_originalQueueBufferProfile = SAI_NULL_OBJECT_ID; - sai_object_id_t m_originalPgBufferProfile = SAI_NULL_OBJECT_ID; }; // PFC queue that implements drop action by draining queue via SAI diff --git a/orchagent/port.h b/orchagent/port.h index fe366630ac55..2850cfc154e4 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -153,15 +153,13 @@ class Port bool m_mpls = false; /* - * Following two bit vectors are used to lock - * the PG/queue from being changed in BufferOrch. + * Following bit vector is used to lock + * the queue from being changed in BufferOrch. * The use case scenario is when PfcWdZeroBufferHandler - * sets zero buffer profile it should protect PG/queue + * sets zero buffer profile it should protect queue * from being overwritten in BufferOrch. */ std::vector m_queue_lock; - std::vector m_priority_group_lock; - std::vector m_priority_group_pending_profile; std::unordered_set m_ingress_acl_tables_uset; std::unordered_set m_egress_acl_tables_uset; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 7b90254287de..2b816d71d221 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -4162,8 +4162,6 @@ void PortsOrch::initializePriorityGroups(Port &port) SWSS_LOG_INFO("Get %d priority groups for port %s", attr.value.u32, port.m_alias.c_str()); port.m_priority_group_ids.resize(attr.value.u32); - port.m_priority_group_lock.resize(attr.value.u32); - port.m_priority_group_pending_profile.resize(attr.value.u32); if (attr.value.u32 == 0) { diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 28df6610fdf6..78c633d4a1cf 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -20,105 +20,6 @@ namespace portsorch_test using namespace std; - sai_queue_api_t ut_sai_queue_api; - sai_queue_api_t *pold_sai_queue_api; - sai_buffer_api_t ut_sai_buffer_api; - sai_buffer_api_t *pold_sai_buffer_api; - - string _ut_stub_queue_key; - sai_status_t _ut_stub_sai_get_queue_attribute( - _In_ sai_object_id_t queue_id, - _In_ uint32_t attr_count, - _Inout_ sai_attribute_t *attr_list) - { - if (attr_count == 1 && attr_list[0].id == SAI_QUEUE_ATTR_BUFFER_PROFILE_ID) - { - auto &typemapQueue = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_QUEUE_TABLE_NAME]); - auto &profileName = typemapQueue["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; - auto profileNameVec = tokenize(profileName, ':'); - auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); - attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; - return SAI_STATUS_SUCCESS; - } - else - { - return pold_sai_queue_api->get_queue_attribute(queue_id, attr_count, attr_list); - } - } - - sai_status_t _ut_stub_sai_get_ingress_priority_group_attribute( - _In_ sai_object_id_t ingress_priority_group_id, - _In_ uint32_t attr_count, - _Inout_ sai_attribute_t *attr_list) - { - if (attr_count == 1 && attr_list[0].id == SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE) - { - auto &typemapPg = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PG_TABLE_NAME]); - auto &profileName = typemapPg["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; - auto profileNameVec = tokenize(profileName, ':'); - auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); - attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; - return SAI_STATUS_SUCCESS; - } - else - { - return pold_sai_buffer_api->get_ingress_priority_group_attribute(ingress_priority_group_id, attr_count, attr_list); - } - } - - int _sai_create_buffer_pool_count = 0; - sai_status_t _ut_stub_sai_create_buffer_pool( - _Out_ sai_object_id_t *buffer_pool_id, - _In_ sai_object_id_t switch_id, - _In_ uint32_t attr_count, - _In_ const sai_attribute_t *attr_list) - { - auto status = pold_sai_buffer_api->create_buffer_pool(buffer_pool_id, switch_id, attr_count, attr_list); - if (SAI_STATUS_SUCCESS == status) - _sai_create_buffer_pool_count++; - return status; - } - - int _sai_remove_buffer_pool_count = 0; - sai_status_t _ut_stub_sai_remove_buffer_pool( - _In_ sai_object_id_t buffer_pool_id) - { - auto status = pold_sai_buffer_api->remove_buffer_pool(buffer_pool_id); - if (SAI_STATUS_SUCCESS == status) - _sai_remove_buffer_pool_count++; - return status; - } - - void _hook_sai_buffer_and_queue_api() - { - ut_sai_buffer_api = *sai_buffer_api; - pold_sai_buffer_api = sai_buffer_api; - ut_sai_buffer_api.create_buffer_pool = _ut_stub_sai_create_buffer_pool; - ut_sai_buffer_api.remove_buffer_pool = _ut_stub_sai_remove_buffer_pool; - ut_sai_buffer_api.get_ingress_priority_group_attribute = _ut_stub_sai_get_ingress_priority_group_attribute; - sai_buffer_api = &ut_sai_buffer_api; - - ut_sai_queue_api = *sai_queue_api; - pold_sai_queue_api = sai_queue_api; - ut_sai_queue_api.get_queue_attribute = _ut_stub_sai_get_queue_attribute; - sai_queue_api = &ut_sai_queue_api; - } - - void _unhook_sai_buffer_and_queue_api() - { - sai_buffer_api = pold_sai_buffer_api; - sai_queue_api = pold_sai_queue_api; - } - - void clear_pfcwd_zero_buffer_handler() - { - auto &zeroProfile = PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance(); - zeroProfile.m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; - zeroProfile.m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; - zeroProfile.m_zeroIngressBufferProfile = SAI_NULL_OBJECT_ID; - zeroProfile.m_zeroEgressBufferProfile = SAI_NULL_OBJECT_ID; - } - struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -460,9 +361,8 @@ namespace portsorch_test ASSERT_TRUE(ts.empty()); } - TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortPgAndQueue) + TEST_F(PortsOrchTest, PfcZeroBufferHandler) { - _hook_sai_buffer_and_queue_api(); Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); @@ -506,28 +406,26 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); + auto countersTable = make_shared(m_counters_db.get(), COUNTERS_TABLE); + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + // Create test buffer pool poolTable.set( - "ingress_pool", + "egress_pool", { - { "type", "ingress" }, + { "type", "egress" }, { "mode", "dynamic" }, { "size", "4200000" }, }); poolTable.set( - "egress_pool", + "ingress_pool", { - { "type", "egress" }, + { "type", "ingress" }, { "mode", "dynamic" }, { "size", "4200000" }, }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "ingress_pool" }, - { "xon", "14832" }, - { "xoff", "14832" }, - { "size", "35000" }, - { "dynamic_th", "0" } }); profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, { "xon", "14832" }, { "xoff", "14832" }, @@ -537,7 +435,7 @@ namespace portsorch_test { "size", "0" }, { "dynamic_th", "0" } }); - // Apply profile on PGs 3-4 all ports + // Apply profile on Queue and PGs 3-4 all ports for (const auto &it : ports) { std::ostringstream oss; @@ -550,210 +448,28 @@ namespace portsorch_test gBufferOrch->addExistingData(&profileTable); gBufferOrch->addExistingData(&queueTable); - // process pool, profile and PGs + // process pool, profile and Q's static_cast(gBufferOrch)->doTask(); - auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); - auto current_create_buffer_pool_count = _sai_create_buffer_pool_count; - auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); - - current_create_buffer_pool_count += 2; - ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); + auto queueConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); + queueConsumer->dumpPendingTasks(ts); + ASSERT_FALSE(ts.empty()); // Queue is skipped + ts.clear(); - std::deque entries; - entries.push_back({"Ethernet0:3-4", "SET", {{ "profile", "test_profile"}}}); auto pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); - pgConsumer->addToSync(entries); - entries.clear(); - static_cast(gBufferOrch)->doTask(); - - // Port should have been updated by BufferOrch->doTask - gPortsOrch->getPort("Ethernet0", port); - auto profile_id = (*BufferOrch::m_buffer_type_maps["BUFFER_PROFILE_TABLE"])[string("test_profile")].m_saiObjectId; - ASSERT_TRUE(profile_id != SAI_NULL_OBJECT_ID); - ASSERT_TRUE(port.m_priority_group_pending_profile[3] == profile_id); - ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); - - pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); pgConsumer->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); // PG is stored in m_priority_group_pending_profile + ASSERT_TRUE(ts.empty()); // PG Notification is not skipped ts.clear(); - // Create a zero buffer pool after PFC storm - entries.push_back({"ingress_zero_pool", "SET", {{ "type", "ingress" }, - { "mode", "static" }, - { "size", "0" }}}); - auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); - poolConsumer->addToSync(entries); - entries.clear(); - static_cast(gBufferOrch)->doTask(); - // Reference increased - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); - // Didn't create buffer pool again - ASSERT_TRUE(_sai_create_buffer_pool_count == current_create_buffer_pool_count); - - entries.push_back({"ingress_zero_pool", "DEL", {}}); - poolConsumer->addToSync(entries); - entries.clear(); - auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; - static_cast(gBufferOrch)->doTask(); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); - // release zero buffer drop handler dropHandler.reset(); - // re-fetch the port - gPortsOrch->getPort("Ethernet0", port); - - // pending profile should be cleared - ASSERT_TRUE(port.m_priority_group_pending_profile[3] == SAI_NULL_OBJECT_ID); - ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); - - // process PGs + // process queue static_cast(gBufferOrch)->doTask(); - pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); - pgConsumer->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); // PG should be processed now + queueConsumer->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); // queue should be processed now ts.clear(); - clear_pfcwd_zero_buffer_handler(); - _unhook_sai_buffer_and_queue_api(); - } - - TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortWithZeroPoolCreated) - { - _hook_sai_buffer_and_queue_api(); - Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); - Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); - Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); - Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); - Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); - - // Get SAI default ports to populate DB - auto ports = ut_helper::getInitialSaiPorts(); - - // Populate port table with SAI ports - for (const auto &it : ports) - { - portTable.set(it.first, it.second); - } - - // Set PortConfigDone, PortInitDone - portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - portTable.set("PortInitDone", { { "lanes", "0" } }); - - // refill consumer - gPortsOrch->addExistingData(&portTable); - - // Apply configuration : - // create ports - - static_cast(gPortsOrch)->doTask(); - - // Apply configuration - // ports - static_cast(gPortsOrch)->doTask(); - - ASSERT_TRUE(gPortsOrch->allPortsReady()); - - // No more tasks - vector ts; - gPortsOrch->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); - ts.clear(); - - // Simulate storm drop handler started on Ethernet0 TC 3 - Port port; - gPortsOrch->getPort("Ethernet0", port); - - // Create test buffer pool - poolTable.set("ingress_pool", - { - { "type", "ingress" }, - { "mode", "dynamic" }, - { "size", "4200000" }, - }); - poolTable.set("egress_pool", - { - { "type", "egress" }, - { "mode", "dynamic" }, - { "size", "4200000" }, - }); - poolTable.set("ingress_zero_pool", - { - { "type", "ingress" }, - { "mode", "static" }, - { "size", "0" } - }); - auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); - - // Create test buffer profile - profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, - { "xon", "14832" }, - { "xoff", "14832" }, - { "size", "35000" }, - { "dynamic_th", "0" } }); - profileTable.set("egress_profile", { { "pool", "egress_pool" }, - { "size", "0" }, - { "dynamic_th", "0" } }); - - // Apply profile on PGs 3-4 all ports - for (const auto &it : ports) - { - std::ostringstream oss; - oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); - queueTable.set(oss.str(), { {"profile", "egress_profile" } }); - } - - gBufferOrch->addExistingData(&poolTable); - gBufferOrch->addExistingData(&profileTable); - gBufferOrch->addExistingData(&pgTable); - gBufferOrch->addExistingData(&queueTable); - - auto current_create_buffer_pool_count = _sai_create_buffer_pool_count + 3; // call SAI API create_buffer_pool for each pool - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 0); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); - ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool == SAI_NULL_OBJECT_ID); - ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); - - // process pool, profile and PGs - static_cast(gBufferOrch)->doTask(); - - ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); - ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool != SAI_NULL_OBJECT_ID); - ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); - - auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); - auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); - - current_create_buffer_pool_count++; // Increased for egress zero pool - ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); - - std::deque entries; - entries.push_back({"ingress_zero_pool", "DEL", {}}); - poolConsumer->addToSync(entries); - entries.clear(); - auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; - static_cast(gBufferOrch)->doTask(); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); - - // release zero buffer drop handler - dropHandler.reset(); - clear_pfcwd_zero_buffer_handler(); - _unhook_sai_buffer_and_queue_api(); } /* This test checks that a LAG member validation happens on orchagent level