Skip to content

Commit

Permalink
[port/buffer] introduce a sync mechanism to protect port PG/queue fro…
Browse files Browse the repository at this point in the history
…m changes under PFC storm (#1143)

* [port/buffer] introduce a sync mechanism to protect port PG/queue from
changes under PFC storm

Signed-off-by: Stepan Blyschak <stepanb@mellanox.com>

* [pfcactionhandler] fix pg lock bit is not set

Signed-off-by: Stepan Blyschak <stepanb@mellanox.com>

* [portsorch_ut] add PfcZeroBufferHandlerLocksPortPgAndQueue unit test

Signed-off-by: Stepan Blyschak <stepanb@mellanox.com>

* [pfcactionhandler] add method header

Signed-off-by: Stepan Blyschak <stepanb@mellanox.com>

* [port.h] fix typos

Signed-off-by: Stepan Blyschak <stepanb@mellanox.com>

* [pfcactionhandler] fix method name that set lock bits

Signed-off-by: Stepan Blyschak <stepanb@mellanox.com>
  • Loading branch information
stepanblyschak authored and liat-grozovik committed Dec 18, 2019
1 parent 823e426 commit 9f6efa0
Show file tree
Hide file tree
Showing 8 changed files with 181 additions and 10 deletions.
10 changes: 10 additions & 0 deletions orchagent/bufferorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -562,6 +562,11 @@ task_process_status BufferOrch::processQueue(Consumer &consumer)
SWSS_LOG_ERROR("Invalid queue index specified:%zd", ind);
return task_process_status::task_invalid_entry;
}
if (port.m_queue_lock[ind])
{
SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str());
return task_process_status::task_need_retry;
}
queue_id = port.m_queue_ids[ind];
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id);
sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr);
Expand Down Expand Up @@ -661,6 +666,11 @@ task_process_status BufferOrch::processPriorityGroup(Consumer &consumer)
SWSS_LOG_ERROR("Invalid pg index specified:%zd", ind);
return task_process_status::task_invalid_entry;
}
if (port.m_priority_group_lock[ind])
{
SWSS_LOG_WARN("Priority group %zd on port %s is locked, will retry", ind, port_name.c_str());
return task_process_status::task_need_retry;
}
pg_id = port.m_priority_group_ids[ind];
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to port:%s pg index:%zd, pg sai_id:0x%" PRIx64, sai_buffer_profile, port_name.c_str(), ind, pg_id);
sai_status_t sai_status = sai_buffer_api->set_ingress_priority_group_attribute(pg_id, &attr);
Expand Down
32 changes: 25 additions & 7 deletions orchagent/pfcactionhandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,15 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port,
{
SWSS_LOG_ENTER();

Port portInstance;
if (!gPortsOrch->getPort(port, portInstance))
{
SWSS_LOG_ERROR("Cannot get port by ID 0x%" PRIx64, port);
return;
}

setPriorityGroupAndQueueLockFlag(portInstance, true);

sai_attribute_t attr;
attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID;

Expand Down Expand Up @@ -462,13 +471,6 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port,
m_originalQueueBufferProfile = oldQueueProfileId;

// Get PG
Port portInstance;
if (!gPortsOrch->getPort(port, portInstance))
{
SWSS_LOG_ERROR("Cannot get port by ID 0x%" PRIx64, port);
return;
}

sai_object_id_t pg = portInstance.m_priority_group_ids[static_cast <size_t> (queueId)];

attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE;
Expand Down Expand Up @@ -533,6 +535,22 @@ PfcWdZeroBufferHandler::~PfcWdZeroBufferHandler(void)
SWSS_LOG_ERROR("Failed to set buffer profile ID on queue 0x%" PRIx64 ": %d", getQueue(), status);
return;
}

setPriorityGroupAndQueueLockFlag(portInstance, false);
}

void PfcWdZeroBufferHandler::setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const
{
// set lock bits on PG and queue
port.m_priority_group_lock[static_cast<size_t>(getQueueId())] = isLocked;
for (size_t i = 0; i < port.m_queue_ids.size(); ++i)
{
if (port.m_queue_ids[i] == getQueue())
{
port.m_queue_lock[i] = isLocked;
}
}
gPortsOrch->setPort(port.m_alias, port);
}

PfcWdZeroBufferHandler::ZeroBufferProfile::ZeroBufferProfile(void)
Expand Down
12 changes: 9 additions & 3 deletions orchagent/pfcactionhandler.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,17 @@ class PfcWdActionHandler
uint8_t queueId, shared_ptr<Table> countersTable);
virtual ~PfcWdActionHandler(void);

inline sai_object_id_t getPort(void)
inline sai_object_id_t getPort(void) const
{
return m_port;
}

inline sai_object_id_t getQueue(void)
inline sai_object_id_t getQueue(void) const
{
return m_queue;
}

inline sai_object_id_t getQueueId(void)
inline uint8_t getQueueId(void) const
{
return m_queueId;
}
Expand Down Expand Up @@ -123,6 +123,12 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler
virtual ~PfcWdZeroBufferHandler(void);

private:
/*
* Sets lock bits on port's priority group and queue
* to protect them from beeing changed by other Orch's
*/
void setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const;

// Singletone class for keeping shared data - zero buffer profiles
class ZeroBufferProfile
{
Expand Down
10 changes: 10 additions & 0 deletions orchagent/port.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,16 @@ class Port
std::vector<sai_object_id_t> m_priority_group_ids;
sai_port_priority_flow_control_mode_t m_pfc_asym = SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED;
uint8_t m_pfc_bitmask = 0;
/*
* Following two bit vectors are used to lock
* the PG/queue from being changed in BufferOrch.
* The use case scenario is when PfcWdZeroBufferHandler
* sets zero buffer profile it should protect PG/queue
* from being overwritten in BufferOrch.
*/
std::vector<bool> m_queue_lock;
std::vector<bool> m_priority_group_lock;

};

}
Expand Down
2 changes: 2 additions & 0 deletions orchagent/portsorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2608,6 +2608,7 @@ void PortsOrch::initializeQueues(Port &port)
SWSS_LOG_INFO("Get %d queues for port %s", attr.value.u32, port.m_alias.c_str());

port.m_queue_ids.resize(attr.value.u32);
port.m_queue_lock.resize(attr.value.u32);

if (attr.value.u32 == 0)
{
Expand Down Expand Up @@ -2643,6 +2644,7 @@ void PortsOrch::initializePriorityGroups(Port &port)
SWSS_LOG_INFO("Get %d priority groups for port %s", attr.value.u32, port.m_alias.c_str());

port.m_priority_group_ids.resize(attr.value.u32);
port.m_priority_group_lock.resize(attr.value.u32);

if (attr.value.u32 == 0)
{
Expand Down
1 change: 1 addition & 0 deletions tests/mock_tests/mock_orchagent_main.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,3 +54,4 @@ extern sai_tunnel_api_t *sai_tunnel_api;
extern sai_next_hop_api_t *sai_next_hop_api;
extern sai_hostif_api_t *sai_hostif_api;
extern sai_buffer_api_t *sai_buffer_api;
extern sai_queue_api_t *sai_queue_api;
122 changes: 122 additions & 0 deletions tests/mock_tests/portsorch_ut.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include "ut_helper.h"
#include "mock_orchagent_main.h"
#include "mock_table.h"
#include "pfcactionhandler.h"

#include <sstream>

Expand All @@ -14,12 +15,15 @@ namespace portsorch_test
shared_ptr<swss::DBConnector> m_app_db;
shared_ptr<swss::DBConnector> m_config_db;
shared_ptr<swss::DBConnector> m_state_db;
shared_ptr<swss::DBConnector> m_counters_db;

PortsOrchTest()
{
// FIXME: move out from constructor
m_app_db = make_shared<swss::DBConnector>(
"APPL_DB", 0);
m_counters_db = make_shared<swss::DBConnector>(
"COUNTERS_DB", 0);
m_config_db = make_shared<swss::DBConnector>(
"CONFIG_DB", 0);
m_state_db = make_shared<swss::DBConnector>(
Expand Down Expand Up @@ -310,4 +314,122 @@ namespace portsorch_test
gBufferOrch->dumpPendingTasks(ts);
ASSERT_TRUE(ts.empty());
}

TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortPgAndQueue)
{
Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME);
Table pgTable = Table(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME);
Table profileTable = Table(m_config_db.get(), CFG_BUFFER_PROFILE_TABLE_NAME);
Table poolTable = Table(m_config_db.get(), CFG_BUFFER_POOL_TABLE_NAME);

// Get SAI default ports to populate DB
auto ports = ut_helper::getInitialSaiPorts();

// Create dependencies ...

const int portsorch_base_pri = 40;

vector<table_name_with_pri_t> ports_tables = {
{ APP_PORT_TABLE_NAME, portsorch_base_pri + 5 },
{ APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 },
{ APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri },
{ APP_LAG_TABLE_NAME, portsorch_base_pri + 4 },
{ APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri }
};

ASSERT_EQ(gPortsOrch, nullptr);
gPortsOrch = new PortsOrch(m_app_db.get(), ports_tables);
vector<string> buffer_tables = { CFG_BUFFER_POOL_TABLE_NAME,
CFG_BUFFER_PROFILE_TABLE_NAME,
CFG_BUFFER_QUEUE_TABLE_NAME,
CFG_BUFFER_PG_TABLE_NAME,
CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME,
CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME };

ASSERT_EQ(gBufferOrch, nullptr);
gBufferOrch = new BufferOrch(m_config_db.get(), buffer_tables);

// Populate port table with SAI ports
for (const auto &it : ports)
{
portTable.set(it.first, it.second);
}

// Set PortConfigDone, PortInitDone
portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } });
portTable.set("PortInitDone", { { "lanes", "0" } });

// refill consumer
gPortsOrch->addExistingData(&portTable);

// Apply configuration :
// create ports

static_cast<Orch *>(gPortsOrch)->doTask();

// Apply configuration
// ports
static_cast<Orch *>(gPortsOrch)->doTask();

ASSERT_TRUE(gPortsOrch->allPortsReady());

// No more tasks
vector<string> ts;
gPortsOrch->dumpPendingTasks(ts);
ASSERT_TRUE(ts.empty());
ts.clear();

// Simulate storm drop handler started on Ethernet0 TC 3
Port port;
gPortsOrch->getPort("Ethernet0", port);

auto countersTable = make_shared<Table>(m_counters_db.get(), COUNTERS_TABLE);
auto dropHandler = make_unique<PfcWdZeroBufferHandler>(port.m_port_id, port.m_queue_ids[3], 3, countersTable);

// Create test buffer pool
poolTable.set(
"test_pool",
{
{ "type", "ingress" },
{ "mode", "dynamic" },
{ "size", "4200000" },
});

// Create test buffer profile
profileTable.set("test_profile", { { "pool", "[BUFFER_POOL|test_pool]" },
{ "xon", "14832" },
{ "xoff", "14832" },
{ "size", "35000" },
{ "dynamic_th", "0" } });

// Apply profile on PGs 3-4 all ports
for (const auto &it : ports)
{
std::ostringstream oss;
oss << it.first << "|3-4";
pgTable.set(oss.str(), { { "profile", "[BUFFER_PROFILE|test_profile]" } });
}
gBufferOrch->addExistingData(&pgTable);
gBufferOrch->addExistingData(&poolTable);
gBufferOrch->addExistingData(&profileTable);

// process pool, profile and PGs
static_cast<Orch *>(gBufferOrch)->doTask();

auto pgConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(CFG_BUFFER_PG_TABLE_NAME));
pgConsumer->dumpPendingTasks(ts);
ASSERT_FALSE(ts.empty()); // PG is skipped
ts.clear();

// release zero buffer drop handler
dropHandler.reset();

// process PGs
static_cast<Orch *>(gBufferOrch)->doTask();

pgConsumer = static_cast<Consumer*>(gBufferOrch->getExecutor(CFG_BUFFER_PG_TABLE_NAME));
pgConsumer->dumpPendingTasks(ts);
ASSERT_TRUE(ts.empty()); // PG should be proceesed now
ts.clear();
}
}
2 changes: 2 additions & 0 deletions tests/mock_tests/ut_saihelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ namespace ut_helper
sai_api_query(SAI_API_ACL, (void **)&sai_acl_api);
sai_api_query(SAI_API_HOSTIF, (void **)&sai_hostif_api);
sai_api_query(SAI_API_BUFFER, (void **)&sai_buffer_api);
sai_api_query(SAI_API_QUEUE, (void **)&sai_queue_api);

return SAI_STATUS_SUCCESS;
}
Expand All @@ -99,6 +100,7 @@ namespace ut_helper
sai_acl_api = nullptr;
sai_hostif_api = nullptr;
sai_buffer_api = nullptr;
sai_queue_api = nullptr;
}

map<string, vector<FieldValueTuple>> getInitialSaiPorts()
Expand Down

0 comments on commit 9f6efa0

Please sign in to comment.