Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support tunnel traffic QoS remapping #2190

Merged
merged 13 commits into from
Apr 27, 2022
95 changes: 90 additions & 5 deletions orchagent/muxorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "aclorch.h"
#include "routeorch.h"
#include "fdborch.h"
#include "qosorch.h"

/* Global variables */
extern Directory<Orch*> gDirectory;
Expand All @@ -42,7 +43,6 @@ extern sai_next_hop_api_t* sai_next_hop_api;
extern sai_router_interface_api_t* sai_router_intfs_api;

/* Constants */
#define MUX_TUNNEL "MuxTunnel0"
#define MUX_ACL_TABLE_NAME INGRESS_TABLE_DROP
#define MUX_ACL_RULE_NAME "mux_acl_rule"
#define MUX_HW_STATE_UNKNOWN "unknown"
Expand Down Expand Up @@ -162,7 +162,12 @@ static sai_status_t remove_route(IpPrefix &pfx)
return status;
}

static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* p_src_ip)
static sai_object_id_t create_tunnel(
const IpAddress* p_dst_ip,
const IpAddress* p_src_ip,
sai_object_id_t tc_to_dscp_map_id,
sai_object_id_t tc_to_queue_map_id,
string dscp_mode_name)
{
sai_status_t status;

Expand Down Expand Up @@ -206,6 +211,19 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress*
attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL;
tunnel_attrs.push_back(attr);

sai_tunnel_dscp_mode_t dscp_mode;
if (dscp_mode_name == "uniform")
{
dscp_mode = SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL;
}
else
{
dscp_mode = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL;
}
attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE;
attr.value.s32 = dscp_mode;
tunnel_attrs.push_back(attr);

attr.id = SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION;
attr.value.s32 = SAI_PACKET_ACTION_DROP;
tunnel_attrs.push_back(attr);
Expand All @@ -224,6 +242,22 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress*
tunnel_attrs.push_back(attr);
}

// DSCP rewriting
if (tc_to_dscp_map_id != SAI_NULL_OBJECT_ID)
{
attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP;
attr.value.oid = tc_to_dscp_map_id;
tunnel_attrs.push_back(attr);
}

// TC remapping
if (tc_to_queue_map_id != SAI_NULL_OBJECT_ID)
{
attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP;
attr.value.oid = tc_to_queue_map_id;
tunnel_attrs.push_back(attr);
}

sai_object_id_t tunnel_id;
status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data());
if (status != SAI_STATUS_SUCCESS)
Expand Down Expand Up @@ -1154,7 +1188,9 @@ MuxOrch::MuxOrch(DBConnector *db, const std::vector<std::string> &tables,
Orch2(db, tables, request_),
decap_orch_(decapOrch),
neigh_orch_(neighOrch),
fdb_orch_(fdbOrch)
fdb_orch_(fdbOrch),
cfgTunnelTable_(db, CFG_TUNNEL_TABLE_NAME)
bingwang-ms marked this conversation as resolved.
Show resolved Hide resolved

{
handler_map_.insert(handler_pair(CFG_MUX_CABLE_TABLE_NAME, &MuxOrch::handleMuxCfg));
handler_map_.insert(handler_pair(CFG_PEER_SWITCH_TABLE_NAME, &MuxOrch::handlePeerSwitch));
Expand Down Expand Up @@ -1208,6 +1244,47 @@ bool MuxOrch::handleMuxCfg(const Request& request)
return true;
}

// Retrieve tc_to_queue_map and tc_to_dscp_map from CONFIG_DB, and
// resolve the ids from QosOrch
bool MuxOrch::resolveQosTableIds()
{
std::vector<FieldValueTuple> field_value_tuples;
if (cfgTunnelTable_.get(MUX_TUNNEL, field_value_tuples))
{
KeyOpFieldsValuesTuple tuple{"TUNNEL", MUX_TUNNEL, field_value_tuples};
for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++)
{
if (qos_to_ref_table_map.find(fvField(*it)) != qos_to_ref_table_map.end())
bingwang-ms marked this conversation as resolved.
Show resolved Hide resolved
{
sai_object_id_t id;
string object_name;
string &map_type_name = fvField(*it);
string &map_name = fvValue(*it);
ref_resolve_status status = resolveFieldRefValue(QosOrch::getTypeMap(), map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name);
if (status == ref_resolve_status::success)
{
if (map_type_name == encap_tc_to_queue_field_name)
{
tc_to_queue_map_id_ = id;
}
else if (map_type_name == encap_tc_to_dscp_field_name)
{
tc_to_dscp_map_id_ = id;
}
setObjectReference(QosOrch::getTypeMap(), CFG_TUNNEL_TABLE_NAME, MUX_TUNNEL, map_type_name, object_name);
SWSS_LOG_NOTICE("Resolved QoS map for tunnel %s type %s name %s", MUX_TUNNEL, map_type_name.c_str(), map_name.c_str());
}
}
}
return true;
}
else
{
SWSS_LOG_ERROR("Failed to read config from CONFIG_DB for %s", MUX_TUNNEL);
return false;
}
}

bool MuxOrch::handlePeerSwitch(const Request& request)
{
SWSS_LOG_ENTER();
Expand All @@ -1229,10 +1306,18 @@ bool MuxOrch::handlePeerSwitch(const Request& request)
MUX_TUNNEL, peer_ip.to_string().c_str());
return false;
}

if (!resolveQosTableIds())
{
return false;
}
auto it = dst_ips.getIpAddresses().begin();
const IpAddress& dst_ip = *it;
mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip);

// Read dscp_mode of MuxTunnel0 from config_db
string dscp_mode_name = "pipe";
cfgTunnelTable_.hget(MUX_TUNNEL, "dscp_mode", dscp_mode_name);

mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id_, tc_to_queue_map_id_, dscp_mode_name);
SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'",
peer_ip.to_string().c_str(), peer_name.c_str());
}
Expand Down
5 changes: 5 additions & 0 deletions orchagent/muxorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,12 @@ class MuxOrch : public Orch2, public Observer, public Subject

bool getMuxPort(const MacAddress&, const string&, string&);

bool resolveQosTableIds();

IpAddress mux_peer_switch_ = 0x0;
sai_object_id_t mux_tunnel_id_ = SAI_NULL_OBJECT_ID;
sai_object_id_t tc_to_queue_map_id_ = SAI_NULL_OBJECT_ID;
sai_object_id_t tc_to_dscp_map_id_ = SAI_NULL_OBJECT_ID;

MuxCableTb mux_cable_tb_;
MuxTunnelNHs mux_tunnel_nh_;
Expand All @@ -210,6 +214,7 @@ class MuxOrch : public Orch2, public Observer, public Subject
FdbOrch *fdb_orch_;

MuxCfgRequest request_;
Table cfgTunnelTable_;
};

const request_description_t mux_cable_request_description = {
Expand Down
3 changes: 2 additions & 1 deletion orchagent/orchdaemon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,8 @@ bool OrchDaemon::init()
CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME,
CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME,
CFG_DSCP_TO_FC_MAP_TABLE_NAME,
CFG_EXP_TO_FC_MAP_TABLE_NAME
CFG_EXP_TO_FC_MAP_TABLE_NAME,
CFG_TC_TO_DSCP_MAP_TABLE_NAME
};
gQosOrch = new QosOrch(m_configDb, qos_tables);

Expand Down
92 changes: 91 additions & 1 deletion orchagent/qosorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ type_map QosOrch::m_qos_maps = {
{CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()},
{CFG_DSCP_TO_FC_MAP_TABLE_NAME, new object_reference_map()},
{CFG_EXP_TO_FC_MAP_TABLE_NAME, new object_reference_map()},
{CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()},
{CFG_TUNNEL_TABLE_NAME, new object_reference_map()}
};

map<string, string> qos_to_ref_table_map = {
Expand All @@ -92,7 +94,11 @@ map<string, string> qos_to_ref_table_map = {
{scheduler_field_name, CFG_SCHEDULER_TABLE_NAME},
{wred_profile_field_name, CFG_WRED_PROFILE_TABLE_NAME},
{dscp_to_fc_field_name, CFG_DSCP_TO_FC_MAP_TABLE_NAME},
{exp_to_fc_field_name, CFG_EXP_TO_FC_MAP_TABLE_NAME}
{exp_to_fc_field_name, CFG_EXP_TO_FC_MAP_TABLE_NAME},
{decap_dscp_to_tc_field_name, CFG_DSCP_TO_TC_MAP_TABLE_NAME},
{decap_tc_to_pg_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME},
{encap_tc_to_dscp_field_name, CFG_TC_TO_DSCP_MAP_TABLE_NAME},
{encap_tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME}
};

#define DSCP_MAX_VAL 63
Expand Down Expand Up @@ -1063,6 +1069,82 @@ sai_object_id_t ExpToFcMapHandler::addQosItem(const vector<sai_attribute_t> &att
return sai_object;
}

bool TcToDscpMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple,
vector<sai_attribute_t> &attributes)
{
SWSS_LOG_ENTER();

sai_attribute_t list_attr;
list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST;
list_attr.value.qosmap.count = (uint32_t)kfvFieldsValues(tuple).size();
list_attr.value.qosmap.list = new sai_qos_map_t[list_attr.value.qosmap.count]();
uint32_t ind = 0;

for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++, ind++)
{
try
{
auto value = stoi(fvValue(*i));
if (value < 0)
{
SWSS_LOG_ERROR("DSCP value %d is negative", value);
delete[] list_attr.value.qosmap.list;
return false;
}
else if (value > DSCP_MAX_VAL)
{
SWSS_LOG_ERROR("DSCP value %d is greater than max value %d", value, DSCP_MAX_VAL);
delete[] list_attr.value.qosmap.list;
return false;
}
list_attr.value.qosmap.list[ind].key.tc = static_cast<sai_uint8_t>(stoi(fvField(*i)));
list_attr.value.qosmap.list[ind].value.dscp = static_cast<sai_uint8_t>(value);

SWSS_LOG_DEBUG("key.tc:%d, value.dscp:%d",
list_attr.value.qosmap.list[ind].key.tc,
list_attr.value.qosmap.list[ind].value.dscp);
}
catch(const invalid_argument& e)
{
SWSS_LOG_ERROR("Got exception during conversion: %s", e.what());
delete[] list_attr.value.qosmap.list;
return false;
}
}
attributes.push_back(list_attr);
return true;
}

sai_object_id_t TcToDscpMapHandler::addQosItem(const vector<sai_attribute_t> &attributes)
{
SWSS_LOG_ENTER();
sai_status_t sai_status;
sai_object_id_t sai_object;
vector<sai_attribute_t> qos_map_attrs;

sai_attribute_t qos_map_attr;
qos_map_attr.id = SAI_QOS_MAP_ATTR_TYPE;
qos_map_attr.value.u32 = SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DSCP;
qos_map_attrs.push_back(qos_map_attr);

qos_map_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST;
qos_map_attr.value.qosmap.count = attributes[0].value.qosmap.count;
qos_map_attr.value.qosmap.list = attributes[0].value.qosmap.list;
qos_map_attrs.push_back(qos_map_attr);

sai_status = sai_qos_map_api->create_qos_map(&sai_object,
gSwitchId,
(uint32_t)qos_map_attrs.size(),
qos_map_attrs.data());
if (SAI_STATUS_SUCCESS != sai_status)
{
SWSS_LOG_ERROR("Failed to create tc_to_dscp map. status:%d", sai_status);
return SAI_NULL_OBJECT_ID;
}
SWSS_LOG_DEBUG("created QosMap object:%" PRIx64, sai_object);
return sai_object;
}

task_process_status QosOrch::handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
Expand All @@ -1077,6 +1159,13 @@ task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer, KeyOpFiel
return pfc_to_queue_handler.processWorkItem(consumer, tuple);
}

task_process_status QosOrch::handleTcToDscpTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple)
{
SWSS_LOG_ENTER();
TcToDscpMapHandler tc_to_dscp_handler;
return tc_to_dscp_handler.processWorkItem(consumer, tuple);
}

QosOrch::QosOrch(DBConnector *db, vector<string> &tableNames) : Orch(db, tableNames)
{
SWSS_LOG_ENTER();
Expand All @@ -1103,6 +1192,7 @@ void QosOrch::initTableHandlers()
m_qos_handler_map.insert(qos_handler_pair(CFG_WRED_PROFILE_TABLE_NAME, &QosOrch::handleWredProfileTable));
m_qos_handler_map.insert(qos_handler_pair(CFG_DSCP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleDscpToFcTable));
m_qos_handler_map.insert(qos_handler_pair(CFG_EXP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleExpToFcTable));
m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DSCP_MAP_TABLE_NAME, &QosOrch::handleTcToDscpTable));

m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handleTcToPgTable));
m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handlePfcPrioToPgTable));
Expand Down
15 changes: 15 additions & 0 deletions orchagent/qosorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ const string yellow_drop_probability_field_name = "yellow_drop_probability";
const string green_drop_probability_field_name = "green_drop_probability";
const string dscp_to_fc_field_name = "dscp_to_fc_map";
const string exp_to_fc_field_name = "exp_to_fc_map";
const string decap_dscp_to_tc_field_name = "decap_dscp_to_tc_map";
const string decap_tc_to_pg_field_name = "decap_tc_to_pg_map";
const string encap_tc_to_queue_field_name = "encap_tc_to_queue_map";
const string encap_tc_to_dscp_field_name = "encap_tc_to_dscp_map";

const string wred_profile_field_name = "wred_profile";
const string wred_red_enable_field_name = "wred_red_enable";
Expand Down Expand Up @@ -56,6 +60,8 @@ const string ecn_green_red = "ecn_green_red";
const string ecn_green_yellow = "ecn_green_yellow";
const string ecn_all = "ecn_all";

// Declaration for being referenced in muxorch and decaporch
extern std::map<string, string> qos_to_ref_table_map;
class QosMapHandler
{
public:
Expand Down Expand Up @@ -147,6 +153,14 @@ class ExpToFcMapHandler : public QosMapHandler
sai_object_id_t addQosItem(const vector<sai_attribute_t> &attributes) override;
};

// Handler for TC_TO_DSCP_MAP
class TcToDscpMapHandler : public QosMapHandler
{
public:
bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector<sai_attribute_t> &attributes) override;
sai_object_id_t addQosItem(const vector<sai_attribute_t> &attributes) override;
};

class QosOrch : public Orch
{
public:
Expand Down Expand Up @@ -177,6 +191,7 @@ class QosOrch : public Orch
task_process_status handleWredProfileTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);
task_process_status handleTcToDscpTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple);

sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id);

Expand Down
Loading