diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 4c94571e03ebadfc8e31758b065a9f8ea4d77b72..742f0c1f60df785eabe83ff5ab18746526100e2a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -498,6 +498,60 @@ static inline bool cgx_event_is_linkevent(u64 event)
 		return false;
 }
 
+static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
+					   struct cgx *cgx)
+{
+	u64 req = 0;
+	u64 resp;
+	int err;
+
+	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
+	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+	if (!err)
+		*prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
+
+	return err;
+}
+
+static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
+					     struct cgx *cgx)
+{
+	u64 req = 0;
+	u64 resp;
+	int err;
+
+	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
+	err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+	if (!err)
+		*prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
+
+	return err;
+}
+
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
+{
+	struct cgx *cgx_dev;
+	int err;
+
+	if (!addr || !size)
+		return -EINVAL;
+
+	cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
+	if (!cgx_dev)
+		return -ENXIO;
+
+	err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
+	if (err)
+		return -EIO;
+
+	err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
+	if (err)
+		return -EIO;
+
+	return 0;
+}
+EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
+
 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
 {
 	struct lmac *lmac = data;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 8c2be84933216927dea326442607a9b8385ed172..206dc5dc1df8ea3caf227a8cab3e4b0fb0d66de2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -111,4 +111,5 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
 int cgx_get_link_info(void *cgxd, int lmac_id,
 		      struct cgx_link_user_info *linfo);
 int cgx_lmac_linkup_start(void *cgxd);
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
 #endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index 2d9fe51c66166efb22a612d0f09d8c9a3c25e23a..fb3ba4968a9bdeb61674ca73ca4d3a9d042a2df5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -78,6 +78,8 @@ enum cgx_cmd_id {
 	CGX_CMD_LINK_STATE_CHANGE,
 	CGX_CMD_MODE_CHANGE,		/* hot plug support */
 	CGX_CMD_INTF_SHUTDOWN,
+	CGX_CMD_GET_MKEX_PRFL_SIZE,
+	CGX_CMD_GET_MKEX_PRFL_ADDR
 };
 
 /* async event ids */
@@ -137,6 +139,16 @@ enum cgx_cmd_own {
  */
 #define RESP_MAC_ADDR		GENMASK_ULL(56, 9)
 
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_SIZE		GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_ADDR		GENMASK_ULL(63, 9)
+
 /* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
  * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
  *
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 9bddb032dd7ecb25e29ebb0d4ebba6157a1c5e6b..ec50a21c5aaf3ddbd3b88e422e42c631aa73f523 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,6 +143,9 @@ enum nix_scheduler {
 	NIX_TXSCH_LVL_CNT = 0x5,
 };
 
+#define TXSCH_TL1_DFLT_RR_QTM      ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_PRIO     (0x1ull)
+
 /* Min/Max packet sizes, excluding FCS */
 #define	NIC_HW_MIN_FRS			40
 #define	NIC_HW_MAX_FRS			9212
@@ -193,26 +196,4 @@ enum nix_scheduler {
 #define DEFAULT_RSS_CONTEXT_GROUP	0
 #define MAX_RSS_INDIR_TBL_SIZE		256 /* 1 << Max adder bits */
 
-/* NIX flow tag, key type flags */
-#define FLOW_KEY_TYPE_PORT	BIT(0)
-#define FLOW_KEY_TYPE_IPV4	BIT(1)
-#define FLOW_KEY_TYPE_IPV6	BIT(2)
-#define FLOW_KEY_TYPE_TCP	BIT(3)
-#define FLOW_KEY_TYPE_UDP	BIT(4)
-#define FLOW_KEY_TYPE_SCTP	BIT(5)
-
-/* NIX flow tag algorithm indices, max is 31 */
-enum {
-	FLOW_KEY_ALG_PORT,
-	FLOW_KEY_ALG_IP,
-	FLOW_KEY_ALG_TCP,
-	FLOW_KEY_ALG_UDP,
-	FLOW_KEY_ALG_SCTP,
-	FLOW_KEY_ALG_TCP_UDP,
-	FLOW_KEY_ALG_TCP_SCTP,
-	FLOW_KEY_ALG_UDP_SCTP,
-	FLOW_KEY_ALG_TCP_UDP_SCTP,
-	FLOW_KEY_ALG_MAX,
-};
-
 #endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 292c13a901df5a149029c12ffe793364fcbd2619..76a4575d18ff28779a7d998b5b7d35566fede9ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -193,12 +193,20 @@ M(NIX_TXSCHQ_CFG,	0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp)  \
 M(NIX_STATS_RST,	0x8007, nix_stats_rst, msg_req, msg_rsp)	\
 M(NIX_VTAG_CFG,		0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp)	\
 M(NIX_RSS_FLOWKEY_CFG,  0x8009, nix_rss_flowkey_cfg,			\
-				 nix_rss_flowkey_cfg, msg_rsp)		\
+				 nix_rss_flowkey_cfg,			\
+				 nix_rss_flowkey_cfg_rsp)		\
 M(NIX_SET_MAC_ADDR,	0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
 M(NIX_SET_RX_MODE,	0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp)	\
 M(NIX_SET_HW_FRS,	0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp)	\
 M(NIX_LF_START_RX,	0x800d, nix_lf_start_rx, msg_req, msg_rsp)	\
 M(NIX_LF_STOP_RX,	0x800e, nix_lf_stop_rx, msg_req, msg_rsp)	\
+M(NIX_MARK_FORMAT_CFG,	0x800f, nix_mark_format_cfg,			\
+				 nix_mark_format_cfg,			\
+				 nix_mark_format_cfg_rsp)		\
+M(NIX_SET_RX_CFG,	0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp)	\
+M(NIX_LSO_FORMAT_CFG,	0x8011, nix_lso_format_cfg,			\
+				 nix_lso_format_cfg,			\
+				 nix_lso_format_cfg_rsp)		\
 M(NIX_RXVLAN_ALLOC,	0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -413,6 +421,10 @@ enum nix_af_status {
 	NIX_AF_INVAL_TXSCHQ_CFG     = -412,
 	NIX_AF_SMQ_FLUSH_FAILED     = -413,
 	NIX_AF_ERR_LF_RESET         = -414,
+	NIX_AF_ERR_RSS_NOSPC_FIELD  = -415,
+	NIX_AF_ERR_RSS_NOSPC_ALGO   = -416,
+	NIX_AF_ERR_MARK_CFG_FAIL    = -417,
+	NIX_AF_ERR_LSO_CFG_FAIL     = -418,
 	NIX_AF_INVAL_NPA_PF_FUNC    = -419,
 	NIX_AF_INVAL_SSO_PF_FUNC    = -420,
 };
@@ -560,15 +572,40 @@ struct nix_vtag_config {
 struct nix_rss_flowkey_cfg {
 	struct mbox_msghdr hdr;
 	int	mcam_index;  /* MCAM entry index to modify */
+#define NIX_FLOW_KEY_TYPE_PORT	BIT(0)
+#define NIX_FLOW_KEY_TYPE_IPV4	BIT(1)
+#define NIX_FLOW_KEY_TYPE_IPV6	BIT(2)
+#define NIX_FLOW_KEY_TYPE_TCP	BIT(3)
+#define NIX_FLOW_KEY_TYPE_UDP	BIT(4)
+#define NIX_FLOW_KEY_TYPE_SCTP	BIT(5)
 	u32	flowkey_cfg; /* Flowkey types selected */
 	u8	group;       /* RSS context or group */
 };
 
+struct nix_rss_flowkey_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u8	alg_idx; /* Selected algo index */
+};
+
 struct nix_set_mac_addr {
 	struct mbox_msghdr hdr;
 	u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
 };
 
+struct nix_mark_format_cfg {
+	struct mbox_msghdr hdr;
+	u8 offset;
+	u8 y_mask;
+	u8 y_val;
+	u8 r_mask;
+	u8 r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u8 mark_format_idx;
+};
+
 struct nix_rx_mode {
 	struct mbox_msghdr hdr;
 #define NIX_RX_MODE_UCAST	BIT(0)
@@ -577,6 +614,15 @@ struct nix_rx_mode {
 	u16	mode;
 };
 
+struct nix_rx_cfg {
+	struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY   BIT(0)
+#define NIX_RX_OL4_VERIFY   BIT(1)
+	u8 len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY  BIT(0)
+	u8 csum_verify; /* Outer L4 checksum verification */
+};
+
 struct nix_frs_cfg {
 	struct mbox_msghdr hdr;
 	u8	update_smq;    /* Update SMQ's min/max lens */
@@ -586,6 +632,18 @@ struct nix_frs_cfg {
 	u16	minlen;
 };
 
+struct nix_lso_format_cfg {
+	struct mbox_msghdr hdr;
+	u64 field_mask;
+#define NIX_LSO_FIELD_MAX	8
+	u64 fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+	struct mbox_msghdr hdr;
+	u8 lso_format_idx;
+};
+
 /* NPC mbox message structs */
 
 #define NPC_MCAM_ENTRY_INVALID	0xFFFF
@@ -730,6 +788,8 @@ struct npc_get_kex_cfg_rsp {
 	u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
 	/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
 	u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+	u8 mkex_pfl_name[MKEX_NAME_LEN];
 };
 
 #endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index a7a20afb3ba0c6077688d727a8ae7b7edf8ca168..8d6d90fdfb739c2f4398d96d4da2693135a19066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -265,4 +265,22 @@ struct nix_rx_action {
 #define VTAG0_LID_MASK		GENMASK_ULL(10, 8)
 #define VTAG0_RELPTR_MASK	GENMASK_ULL(7, 0)
 
+struct npc_mcam_kex {
+	/* MKEX Profle Header */
+	u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
+	u8 name[MKEX_NAME_LEN];   /* MKEX Profile name */
+	u64 cpu_model;   /* Format as profiled by CPU hardware */
+	u64 kpu_version; /* KPU firmware/profile version */
+	u64 reserved; /* Reserved for extension */
+
+	/* MKEX Profle Data */
+	u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */
+	/* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+	u64 kex_ld_flags[NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+	u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+	/* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+	u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+} __packed;
+
 #endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 4d061d9719561017ac05afaa2dd397fdd93282b6..e581091c09c4e328ffca2a4d10b99af391d62c42 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -52,6 +52,10 @@ MODULE_LICENSE("GPL v2");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, rvu_id_table);
 
+static char *mkex_profile; /* MKEX profile name */
+module_param(mkex_profile, charp, 0000);
+MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+
 /* Poll a RVU block's register 'offset', for a 'zero'
  * or 'nonzero' at bits specified by 'mask'
  */
@@ -2359,6 +2363,14 @@ static void rvu_disable_sriov(struct rvu *rvu)
 	pci_disable_sriov(rvu->pdev);
 }
 
+static void rvu_update_module_params(struct rvu *rvu)
+{
+	const char *default_pfl_name = "default";
+
+	strscpy(rvu->mkex_pfl_name,
+		mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+}
+
 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct device *dev = &pdev->dev;
@@ -2412,6 +2424,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto err_release_regions;
 	}
 
+	/* Store module params in rvu structure */
+	rvu_update_module_params(rvu);
+
 	/* Check which blocks the HW supports */
 	rvu_check_block_implemented(rvu);
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index ae8e2e206c875da23db3f907fa1821333818cdbf..c9d60b0554c0ff5f0e5a9c0087a9b487679ee1b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -156,7 +156,17 @@ struct rvu_pfvf {
 struct nix_txsch {
 	struct rsrc_bmap schq;
 	u8   lvl;
-	u16  *pfvf_map;
+#define NIX_TXSCHQ_TL1_CFG_DONE       BIT_ULL(0)
+#define TXSCH_MAP_FUNC(__pfvf_map)    ((__pfvf_map) & 0xFFFF)
+#define TXSCH_MAP_FLAGS(__pfvf_map)   ((__pfvf_map) >> 16)
+#define TXSCH_MAP(__func, __flags)    (((__func) & 0xFFFF) | ((__flags) << 16))
+	u32  *pfvf_map;
+};
+
+struct nix_mark_format {
+	u8 total;
+	u8 in_use;
+	u32 *cfg;
 };
 
 struct npc_pkind {
@@ -164,9 +174,23 @@ struct npc_pkind {
 	u32	*pfchan_map;
 };
 
+struct nix_flowkey {
+#define NIX_FLOW_KEY_ALG_MAX 32
+	u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
+	int in_use;
+};
+
+struct nix_lso {
+	u8 total;
+	u8 in_use;
+};
+
 struct nix_hw {
 	struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
 	struct nix_mcast mcast;
+	struct nix_flowkey flowkey;
+	struct nix_mark_format mark_format;
+	struct nix_lso lso;
 };
 
 struct rvu_hwinfo {
@@ -237,6 +261,8 @@ struct rvu {
 	struct			workqueue_struct *cgx_evh_wq;
 	spinlock_t		cgx_evq_lock; /* cgx event queue lock */
 	struct list_head	cgx_evq_head; /* cgx event queue head */
+
+	char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -366,6 +392,8 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
 /* NIX APIs */
 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
 int rvu_nix_init(struct rvu *rvu);
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+				int blkaddr, u32 cfg);
 void rvu_nix_freemem(struct rvu *rvu);
 int rvu_get_nixlf_count(struct rvu *rvu);
 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
@@ -398,7 +426,7 @@ int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
 				      struct msg_rsp *rsp);
 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
 					 struct nix_rss_flowkey_cfg *req,
-					 struct msg_rsp *rsp);
+					 struct nix_rss_flowkey_cfg_rsp *rsp);
 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
 				      struct nix_set_mac_addr *req,
 				      struct msg_rsp *rsp);
@@ -410,6 +438,14 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
 				     struct msg_rsp *rsp);
 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
 				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+					 struct nix_mark_format_cfg  *req,
+					 struct nix_mark_format_cfg_rsp *rsp);
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+				    struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+					struct nix_lso_format_cfg *req,
+					struct nix_lso_format_cfg_rsp *rsp);
 
 /* NPC APIs */
 int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 962a82f7d1412f7b20fb23f3dfe0cddf079ecef4..4a7609fd6dd07bba4180f5b39d860ad717cc215b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -43,6 +43,19 @@ enum mc_buf_cnt {
 	MC_BUF_CNT_2048,
 };
 
+enum nix_makr_fmt_indexes {
+	NIX_MARK_CFG_IP_DSCP_RED,
+	NIX_MARK_CFG_IP_DSCP_YELLOW,
+	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
+	NIX_MARK_CFG_IP_ECN_RED,
+	NIX_MARK_CFG_IP_ECN_YELLOW,
+	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
+	NIX_MARK_CFG_VLAN_DEI_RED,
+	NIX_MARK_CFG_VLAN_DEI_YELLOW,
+	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
+	NIX_MARK_CFG_MAX,
+};
+
 /* For now considering MC resources needed for broadcast
  * pkt replication only. i.e 256 HWVFs + 12 PFs.
  */
@@ -127,6 +140,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 {
 	struct nix_txsch *txsch;
 	struct nix_hw *nix_hw;
+	u16 map_func;
 
 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
 	if (!nix_hw)
@@ -138,11 +152,18 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 		return false;
 
 	mutex_lock(&rvu->rsrc_lock);
-	if (txsch->pfvf_map[schq] != pcifunc) {
-		mutex_unlock(&rvu->rsrc_lock);
-		return false;
-	}
+	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
 	mutex_unlock(&rvu->rsrc_lock);
+
+	/* For TL1 schq, sharing across VF's of same PF is ok */
+	if (lvl == NIX_TXSCH_LVL_TL1 &&
+	    rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+		return false;
+
+	if (lvl != NIX_TXSCH_LVL_TL1 &&
+	    map_func != pcifunc)
+		return false;
+
 	return true;
 }
 
@@ -277,17 +298,21 @@ static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
 	/* TCP's flags field */
 	field.layer = NIX_TXLAYER_OL4;
 	field.offset = 12;
-	field.sizem1 = 0; /* not needed */
+	field.sizem1 = 1; /* 2 bytes */
 	field.alg = NIX_LSOALG_TCP_FLAGS;
 	rvu_write64(rvu, blkaddr,
 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 		    *(u64 *)&field);
 }
 
-static void nix_setup_lso(struct rvu *rvu, int blkaddr)
+static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 {
 	u64 cfg, idx, fidx = 0;
 
+	/* Get max HW supported format indices */
+	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
+	nix_hw->lso.total = cfg;
+
 	/* Enable LSO */
 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
 	/* For TSO, set first and middle segment flags to
@@ -297,7 +322,10 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
 
-	/* Configure format fields for TCPv4 segmentation offload */
+	/* Setup default static LSO formats
+	 *
+	 * Configure format fields for TCPv4 segmentation offload
+	 */
 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
@@ -307,6 +335,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
 		rvu_write64(rvu, blkaddr,
 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 	}
+	nix_hw->lso.in_use++;
 
 	/* Configure format fields for TCPv6 segmentation offload */
 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
@@ -319,6 +348,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
 		rvu_write64(rvu, blkaddr,
 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 	}
+	nix_hw->lso.in_use++;
 }
 
 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
@@ -431,9 +461,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 	bool ena;
 	u64 cfg;
 
-	pfvf = rvu_get_pfvf(rvu, pcifunc);
 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
-	if (!pfvf->nixlf || blkaddr < 0)
+	if (blkaddr < 0)
 		return NIX_AF_ERR_AF_LF_INVALID;
 
 	block = &hw->block[blkaddr];
@@ -443,9 +472,14 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 		return NIX_AF_ERR_AQ_ENQUEUE;
 	}
 
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
-	if (nixlf < 0)
-		return NIX_AF_ERR_AF_LF_INVALID;
+
+	/* Skip NIXLF check for broadcast MCE entry init */
+	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+		if (!pfvf->nixlf || nixlf < 0)
+			return NIX_AF_ERR_AF_LF_INVALID;
+	}
 
 	switch (req->ctype) {
 	case NIX_AQ_CTYPE_RQ:
@@ -490,7 +524,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 
 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
-	    req->op != NIX_AQ_INSTOP_WRITE) {
+	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+	     (req->op == NIX_AQ_INSTOP_WRITE &&
+	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
 				     pcifunc, req->sq.smq))
 			return NIX_AF_ERR_AQ_ENQUEUE;
@@ -838,6 +874,13 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
 		    (u64)pfvf->nix_qints_ctx->iova);
 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
 
+	/* Setup VLANX TPID's.
+	 * Use VLAN1 for 802.1Q
+	 * and VLAN0 for 802.1AD.
+	 */
+	cfg = (0x8100ULL << 16) | 0x88A8ULL;
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
 	/* Enable LMTST for this NIX LF */
 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
 
@@ -925,6 +968,41 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
 	return 0;
 }
 
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+					 struct nix_mark_format_cfg  *req,
+					 struct nix_mark_format_cfg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	int blkaddr, rc;
+	u32 cfg;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	cfg = (((u32)req->offset & 0x7) << 16) |
+	      (((u32)req->y_mask & 0xF) << 12) |
+	      (((u32)req->y_val & 0xF) << 8) |
+	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
+
+	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
+	if (rc < 0) {
+		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
+			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+		return NIX_AF_ERR_MARK_CFG_FAIL;
+	}
+
+	rsp->mark_format_idx = rc;
+	return 0;
+}
+
 /* Disable shaping of pkts by a scheduler queue
  * at a given scheduler level.
  */
@@ -983,6 +1061,73 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
 }
 
+static int
+rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
+		  u16 *schq_list, u16 *schq_cnt)
+{
+	struct nix_txsch *txsch;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	u8 cgx_id, lmac_id;
+	u16 schq_base;
+	u32 *pfvf_map;
+	int pf, intf;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -ENODEV;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+	pfvf_map = txsch->pfvf_map;
+	pf = rvu_get_pf(pcifunc);
+
+	/* static allocation as two TL1's per link */
+	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+	switch (intf) {
+	case NIX_INTF_TYPE_CGX:
+		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+		schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
+		break;
+	case NIX_INTF_TYPE_LBK:
+		schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	if (schq_base + 1 > txsch->schq.max)
+		return -ENODEV;
+
+	/* init pfvf_map as we store flags */
+	if (pfvf_map[schq_base] == U32_MAX) {
+		pfvf_map[schq_base] =
+			TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+		pfvf_map[schq_base + 1] =
+			TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+
+		/* Onetime reset for TL1 */
+		nix_reset_tx_linkcfg(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base);
+		nix_reset_tx_shaping(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base);
+
+		nix_reset_tx_linkcfg(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base + 1);
+		nix_reset_tx_shaping(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base + 1);
+	}
+
+	if (schq_list && schq_cnt) {
+		schq_list[0] = schq_base;
+		schq_list[1] = schq_base + 1;
+		*schq_cnt = 2;
+	}
+
+	return 0;
+}
+
 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 				     struct nix_txsch_alloc_req *req,
 				     struct nix_txsch_alloc_rsp *rsp)
@@ -993,6 +1138,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 	struct rvu_pfvf *pfvf;
 	struct nix_hw *nix_hw;
 	int blkaddr, rc = 0;
+	u32 *pfvf_map;
 	u16 schq;
 
 	pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1008,13 +1154,23 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
 		txsch = &nix_hw->txsch[lvl];
 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
+		pfvf_map = txsch->pfvf_map;
+
+		if (!req_schq)
+			continue;
 
 		/* There are only 28 TL1s */
-		if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
-			goto err;
+		if (lvl == NIX_TXSCH_LVL_TL1) {
+			if (req->schq_contig[lvl] ||
+			    req->schq[lvl] > 2 ||
+			    rvu_get_tl1_schqs(rvu, blkaddr,
+					      pcifunc, NULL, NULL))
+				goto err;
+			continue;
+		}
 
 		/* Check if request is valid */
-		if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+		if (req_schq > MAX_TXSCHQ_PER_FUNC)
 			goto err;
 
 		/* If contiguous queues are needed, check for availability */
@@ -1030,16 +1186,32 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
 		txsch = &nix_hw->txsch[lvl];
 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
+		pfvf_map = txsch->pfvf_map;
 		rsp->schq[lvl] = req->schq[lvl];
 
-		schq = 0;
+		if (!req->schq[lvl] && !req->schq_contig[lvl])
+			continue;
+
+		/* Handle TL1 specially as it is
+		 * allocation is restricted to 2 TL1's
+		 * per link
+		 */
+
+		if (lvl == NIX_TXSCH_LVL_TL1) {
+			rsp->schq_contig[lvl] = 0;
+			rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
+					  &rsp->schq_list[lvl][0],
+					  &rsp->schq[lvl]);
+			continue;
+		}
+
 		/* Alloc contiguous queues first */
 		if (req->schq_contig[lvl]) {
 			schq = rvu_alloc_rsrc_contig(&txsch->schq,
 						     req->schq_contig[lvl]);
 
 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
-				txsch->pfvf_map[schq] = pcifunc;
+				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
 				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
 				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
 				rsp->schq_contig_list[lvl][idx] = schq;
@@ -1050,7 +1222,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 		/* Alloc non-contiguous queues */
 		for (idx = 0; idx < req->schq[lvl]; idx++) {
 			schq = rvu_alloc_rsrc(&txsch->schq);
-			txsch->pfvf_map[schq] = pcifunc;
+			pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
 			rsp->schq_list[lvl][idx] = schq;
@@ -1092,7 +1264,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 
 		txsch = &nix_hw->txsch[lvl];
 		for (schq = 0; schq < txsch->schq.max; schq++) {
-			if (txsch->pfvf_map[schq] != pcifunc)
+			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 				continue;
 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
 		}
@@ -1101,7 +1273,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 	/* Flush SMQs */
 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
 	for (schq = 0; schq < txsch->schq.max; schq++) {
-		if (txsch->pfvf_map[schq] != pcifunc)
+		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 			continue;
 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
 		/* Do SMQ flush and set enqueue xoff */
@@ -1119,9 +1291,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 
 	/* Now free scheduler queues to free pool */
 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+		/* Free all SCHQ's except TL1 as
+		 * TL1 is shared across all VF's for a RVU PF
+		 */
+		if (lvl == NIX_TXSCH_LVL_TL1)
+			continue;
+
 		txsch = &nix_hw->txsch[lvl];
 		for (schq = 0; schq < txsch->schq.max; schq++) {
-			if (txsch->pfvf_map[schq] != pcifunc)
+			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 				continue;
 			rvu_free_rsrc(&txsch->schq, schq);
 			txsch->pfvf_map[schq] = 0;
@@ -1138,11 +1316,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 	return 0;
 }
 
+static int nix_txschq_free_one(struct rvu *rvu,
+			       struct nix_txsch_free_req *req)
+{
+	int lvl, schq, nixlf, blkaddr, rc;
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct nix_txsch *txsch;
+	struct nix_hw *nix_hw;
+	u32 *pfvf_map;
+	u64 cfg;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	lvl = req->schq_lvl;
+	schq = req->schq;
+	txsch = &nix_hw->txsch[lvl];
+
+	/* Don't allow freeing TL1 */
+	if (lvl > NIX_TXSCH_LVL_TL2 ||
+	    schq >= txsch->schq.max)
+		goto err;
+
+	pfvf_map = txsch->pfvf_map;
+	mutex_lock(&rvu->rsrc_lock);
+
+	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+		mutex_unlock(&rvu->rsrc_lock);
+		goto err;
+	}
+
+	/* Flush if it is a SMQ. Onus of disabling
+	 * TL2/3 queue links before SMQ flush is on user
+	 */
+	if (lvl == NIX_TXSCH_LVL_SMQ) {
+		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+		/* Do SMQ flush and set enqueue xoff */
+		cfg |= BIT_ULL(50) | BIT_ULL(49);
+		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+		/* Wait for flush to complete */
+		rc = rvu_poll_reg(rvu, blkaddr,
+				  NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+		if (rc) {
+			dev_err(rvu->dev,
+				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+		}
+	}
+
+	/* Free the resource */
+	rvu_free_rsrc(&txsch->schq, schq);
+	txsch->pfvf_map[schq] = 0;
+	mutex_unlock(&rvu->rsrc_lock);
+	return 0;
+err:
+	return NIX_AF_ERR_TLX_INVALID;
+}
+
 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
 				    struct nix_txsch_free_req *req,
 				    struct msg_rsp *rsp)
 {
-	return nix_txschq_free(rvu, req->hdr.pcifunc);
+	if (req->flags & TXSCHQ_FREE_ALL)
+		return nix_txschq_free(rvu, req->hdr.pcifunc);
+	else
+		return nix_txschq_free_one(rvu, req);
 }
 
 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
@@ -1183,16 +1431,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
 	return true;
 }
 
+static int
+nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+{
+	u16 schq_list[2], schq_cnt, schq;
+	int blkaddr, idx, err = 0;
+	u16 map_func, map_flags;
+	struct nix_hw *nix_hw;
+	u64 reg, regval;
+	u32 *pfvf_map;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+
+	mutex_lock(&rvu->rsrc_lock);
+
+	err = rvu_get_tl1_schqs(rvu, blkaddr,
+				pcifunc, schq_list, &schq_cnt);
+	if (err)
+		goto unlock;
+
+	for (idx = 0; idx < schq_cnt; idx++) {
+		schq = schq_list[idx];
+		map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+		map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+		/* check if config is already done or this is pf */
+		if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
+			continue;
+
+		/* default configuration */
+		reg = NIX_AF_TL1X_TOPOLOGY(schq);
+		regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+		rvu_write64(rvu, blkaddr, reg, regval);
+		reg = NIX_AF_TL1X_SCHEDULE(schq);
+		regval = TXSCH_TL1_DFLT_RR_QTM;
+		rvu_write64(rvu, blkaddr, reg, regval);
+		reg = NIX_AF_TL1X_CIR(schq);
+		regval = 0;
+		rvu_write64(rvu, blkaddr, reg, regval);
+
+		map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+		pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+	}
+unlock:
+	mutex_unlock(&rvu->rsrc_lock);
+	return err;
+}
+
 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 				    struct nix_txschq_config *req,
 				    struct msg_rsp *rsp)
 {
+	u16 schq, pcifunc = req->hdr.pcifunc;
 	struct rvu_hwinfo *hw = rvu->hw;
-	u16 pcifunc = req->hdr.pcifunc;
 	u64 reg, regval, schq_regbase;
 	struct nix_txsch *txsch;
+	u16 map_func, map_flags;
 	struct nix_hw *nix_hw;
 	int blkaddr, idx, err;
+	u32 *pfvf_map;
 	int nixlf;
 
 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
@@ -1212,6 +1517,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 		return NIX_AF_ERR_AF_LF_INVALID;
 
 	txsch = &nix_hw->txsch[req->lvl];
+	pfvf_map = txsch->pfvf_map;
+
+	/* VF is only allowed to trigger
+	 * setting default cfg on TL1
+	 */
+	if (pcifunc & RVU_PFVF_FUNC_MASK &&
+	    req->lvl == NIX_TXSCH_LVL_TL1) {
+		return nix_tl1_default_cfg(rvu, pcifunc);
+	}
+
 	for (idx = 0; idx < req->num_regs; idx++) {
 		reg = req->reg[idx];
 		regval = req->regval[idx];
@@ -1229,6 +1544,21 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 			regval |= ((u64)nixlf << 24);
 		}
 
+		/* Mark config as done for TL1 by PF */
+		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
+		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
+			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+			mutex_lock(&rvu->rsrc_lock);
+
+			map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+			map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+			map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+			pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+			mutex_unlock(&rvu->rsrc_lock);
+		}
+
 		rvu_write64(rvu, blkaddr, reg, regval);
 
 		/* Check for SMQ flush, if so, poll for its completion */
@@ -1295,7 +1625,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
 	struct nix_aq_enq_req aq_req;
 	int err;
 
-	aq_req.hdr.pcifunc = pcifunc;
+	aq_req.hdr.pcifunc = 0;
 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
 	aq_req.op = op;
 	aq_req.qidx = mce;
@@ -1555,10 +1885,62 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 		 * PF/VF pcifunc mapping info.
 		 */
 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
-					       sizeof(u16), GFP_KERNEL);
+					       sizeof(u32), GFP_KERNEL);
 		if (!txsch->pfvf_map)
 			return -ENOMEM;
+		memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
+	}
+	return 0;
+}
+
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+				int blkaddr, u32 cfg)
+{
+	int fmt_idx;
+
+	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
+		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
+			return fmt_idx;
 	}
+	if (fmt_idx >= nix_hw->mark_format.total)
+		return -ERANGE;
+
+	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
+	nix_hw->mark_format.cfg[fmt_idx] = cfg;
+	nix_hw->mark_format.in_use++;
+	return fmt_idx;
+}
+
+static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
+				    int blkaddr)
+{
+	u64 cfgs[] = {
+		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
+		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
+		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
+		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
+		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
+		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
+		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
+		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
+		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
+	};
+	int i, rc;
+	u64 total;
+
+	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
+	nix_hw->mark_format.total = (u8)total;
+	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
+					       GFP_KERNEL);
+	if (!nix_hw->mark_format.cfg)
+		return -ENOMEM;
+	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
+		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
+		if (rc < 0)
+			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
+				i, rc);
+	}
+
 	return 0;
 }
 
@@ -1593,187 +1975,284 @@ int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
 }
 
 /* Returns the ALG index to be set into NPC_RX_ACTION */
-static int get_flowkey_alg_idx(u32 flow_cfg)
-{
-	u32 ip_cfg;
-
-	flow_cfg &= ~FLOW_KEY_TYPE_PORT;
-	ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
-	if (flow_cfg == ip_cfg)
-		return FLOW_KEY_ALG_IP;
-	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
-		return FLOW_KEY_ALG_TCP;
-	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
-		return FLOW_KEY_ALG_UDP;
-	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
-		return FLOW_KEY_ALG_SCTP;
-	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
-		return FLOW_KEY_ALG_TCP_UDP;
-	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
-		return FLOW_KEY_ALG_TCP_SCTP;
-	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
-		return FLOW_KEY_ALG_UDP_SCTP;
-	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
-			      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
-		return FLOW_KEY_ALG_TCP_UDP_SCTP;
-
-	return FLOW_KEY_ALG_PORT;
-}
-
-int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
-					 struct nix_rss_flowkey_cfg *req,
-					 struct msg_rsp *rsp)
+static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
 {
-	struct rvu_hwinfo *hw = rvu->hw;
-	u16 pcifunc = req->hdr.pcifunc;
-	int alg_idx, nixlf, blkaddr;
-
-	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
-	if (blkaddr < 0)
-		return NIX_AF_ERR_AF_LF_INVALID;
-
-	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
-	if (nixlf < 0)
-		return NIX_AF_ERR_AF_LF_INVALID;
+	int i;
 
-	alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
+	/* Scan over exiting algo entries to find a match */
+	for (i = 0; i < nix_hw->flowkey.in_use; i++)
+		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
+			return i;
 
-	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
-				       alg_idx, req->mcam_index);
-	return 0;
+	return -ERANGE;
 }
 
-static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
 {
-	struct nix_rx_flowkey_alg *field = NULL;
-	int idx, key_type;
+	int idx, nr_field, key_off, field_marker, keyoff_marker;
+	int max_key_off, max_bit_pos, group_member;
+	struct nix_rx_flowkey_alg *field;
+	struct nix_rx_flowkey_alg tmp;
+	u32 key_type, valid_key;
 
 	if (!alg)
-		return;
+		return -EINVAL;
 
-	/* FIELD0: IPv4
-	 * FIELD1: IPv6
-	 * FIELD2: TCP/UDP/SCTP/ALL
-	 * FIELD3: Unused
-	 * FIELD4: Unused
-	 *
-	 * Each of the 32 possible flow key algorithm definitions should
+#define FIELDS_PER_ALG  5
+#define MAX_KEY_OFF	40
+	/* Clear all fields */
+	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
+
+	/* Each of the 32 possible flow key algorithm definitions should
 	 * fall into above incremental config (except ALG0). Otherwise a
 	 * single NPC MCAM entry is not sufficient for supporting RSS.
 	 *
 	 * If a different definition or combination needed then NPC MCAM
 	 * has to be programmed to filter such pkts and it's action should
 	 * point to this definition to calculate flowtag or hash.
+	 *
+	 * The `for loop` goes over _all_ protocol field and the following
+	 * variables depicts the state machine forward progress logic.
+	 *
+	 * keyoff_marker - Enabled when hash byte length needs to be accounted
+	 * in field->key_offset update.
+	 * field_marker - Enabled when a new field needs to be selected.
+	 * group_member - Enabled when protocol is part of a group.
 	 */
-	for (idx = 0; idx < 32; idx++) {
-		key_type = flow_cfg & BIT_ULL(idx);
-		if (!key_type)
-			continue;
+
+	keyoff_marker = 0; max_key_off = 0; group_member = 0;
+	nr_field = 0; key_off = 0; field_marker = 1;
+	field = &tmp; max_bit_pos = fls(flow_cfg);
+	for (idx = 0;
+	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
+	     key_off < MAX_KEY_OFF; idx++) {
+		key_type = BIT(idx);
+		valid_key = flow_cfg & key_type;
+		/* Found a field marker, reset the field values */
+		if (field_marker)
+			memset(&tmp, 0, sizeof(tmp));
+
 		switch (key_type) {
-		case FLOW_KEY_TYPE_PORT:
-			field = &alg[0];
+		case NIX_FLOW_KEY_TYPE_PORT:
 			field->sel_chan = true;
 			/* This should be set to 1, when SEL_CHAN is set */
 			field->bytesm1 = 1;
+			field_marker = true;
+			keyoff_marker = true;
 			break;
-		case FLOW_KEY_TYPE_IPV4:
-			field = &alg[0];
+		case NIX_FLOW_KEY_TYPE_IPV4:
 			field->lid = NPC_LID_LC;
 			field->ltype_match = NPC_LT_LC_IP;
 			field->hdr_offset = 12; /* SIP offset */
 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
 			field->ltype_mask = 0xF; /* Match only IPv4 */
+			field_marker = true;
+			keyoff_marker = false;
 			break;
-		case FLOW_KEY_TYPE_IPV6:
-			field = &alg[1];
+		case NIX_FLOW_KEY_TYPE_IPV6:
 			field->lid = NPC_LID_LC;
 			field->ltype_match = NPC_LT_LC_IP6;
 			field->hdr_offset = 8; /* SIP offset */
 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
 			field->ltype_mask = 0xF; /* Match only IPv6 */
+			field_marker = true;
+			keyoff_marker = true;
 			break;
-		case FLOW_KEY_TYPE_TCP:
-		case FLOW_KEY_TYPE_UDP:
-		case FLOW_KEY_TYPE_SCTP:
-			field = &alg[2];
+		case NIX_FLOW_KEY_TYPE_TCP:
+		case NIX_FLOW_KEY_TYPE_UDP:
+		case NIX_FLOW_KEY_TYPE_SCTP:
 			field->lid = NPC_LID_LD;
 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
-			if (key_type == FLOW_KEY_TYPE_TCP)
+			if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
 				field->ltype_match |= NPC_LT_LD_TCP;
-			else if (key_type == FLOW_KEY_TYPE_UDP)
+				group_member = true;
+			} else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
+				   valid_key) {
 				field->ltype_match |= NPC_LT_LD_UDP;
-			else if (key_type == FLOW_KEY_TYPE_SCTP)
+				group_member = true;
+			} else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
+				   valid_key) {
 				field->ltype_match |= NPC_LT_LD_SCTP;
-			field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
+				group_member = true;
+			}
 			field->ltype_mask = ~field->ltype_match;
+			if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
+				/* Handle the case where any of the group item
+				 * is enabled in the group but not the final one
+				 */
+				if (group_member) {
+					valid_key = true;
+					group_member = false;
+				}
+				field_marker = true;
+				keyoff_marker = true;
+			} else {
+				field_marker = false;
+				keyoff_marker = false;
+			}
 			break;
 		}
-		if (field)
-			field->ena = 1;
-		field = NULL;
+		field->ena = 1;
+
+		/* Found a valid flow key type */
+		if (valid_key) {
+			field->key_offset = key_off;
+			memcpy(&alg[nr_field], field, sizeof(*field));
+			max_key_off = max(max_key_off, field->bytesm1 + 1);
+
+			/* Found a field marker, get the next field */
+			if (field_marker)
+				nr_field++;
+		}
+
+		/* Found a keyoff marker, update the new key_off */
+		if (keyoff_marker) {
+			key_off += max_key_off;
+			max_key_off = 0;
+		}
 	}
+	/* Processed all the flow key types */
+	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
+		return 0;
+	else
+		return NIX_AF_ERR_RSS_NOSPC_FIELD;
 }
 
-static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
 {
-#define FIELDS_PER_ALG	5
-	u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
-	u32 flowkey_cfg, minkey_cfg;
-	int alg, fid;
+	u64 field[FIELDS_PER_ALG];
+	struct nix_hw *hw;
+	int fid, rc;
 
-	memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
+	hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!hw)
+		return -EINVAL;
 
-	/* Only incoming channel number */
-	flowkey_cfg = FLOW_KEY_TYPE_PORT;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
+	/* No room to add new flow hash algoritham */
+	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
+		return NIX_AF_ERR_RSS_NOSPC_ALGO;
 
-	/* For a incoming pkt if none of the fields match then flowkey
-	 * will be zero, hence tag generated will also be zero.
-	 * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
-	 * be used to queue the packet.
-	 */
+	/* Generate algo fields for the given flow_cfg */
+	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
+	if (rc)
+		return rc;
+
+	/* Update ALGX_FIELDX register with generated fields */
+	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
+							   fid), field[fid]);
+
+	/* Store the flow_cfg for futher lookup */
+	rc = hw->flowkey.in_use;
+	hw->flowkey.flowkey[rc] = flow_cfg;
+	hw->flowkey.in_use++;
+
+	return rc;
+}
+
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+					 struct nix_rss_flowkey_cfg *req,
+					 struct nix_rss_flowkey_cfg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int alg_idx, nixlf, blkaddr;
+	struct nix_hw *nix_hw;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
+	/* Failed to get algo index from the exiting list, reserve new  */
+	if (alg_idx < 0) {
+		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
+						  req->flowkey_cfg);
+		if (alg_idx < 0)
+			return alg_idx;
+	}
+	rsp->alg_idx = alg_idx;
+	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+				       alg_idx, req->mcam_index);
+	return 0;
+}
+
+static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+	u32 flowkey_cfg, minkey_cfg;
+	int alg, fid, rc;
+
+	/* Disable all flow key algx fieldx */
+	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
+		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+			rvu_write64(rvu, blkaddr,
+				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+				    0);
+	}
 
 	/* IPv4/IPv6 SIP/DIPs */
-	flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
+	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
 	minkey_cfg = flowkey_cfg;
-	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
-	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
-	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
-	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+			NIX_FLOW_KEY_TYPE_UDP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
-	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+			NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
-	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
+			NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
-	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
-		      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
-			   flowkey_cfg);
+	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
-	for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
-		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
-			rvu_write64(rvu, blkaddr,
-				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
-				    field[alg][fid]);
-	}
+	return 0;
 }
 
 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
@@ -1919,7 +2398,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
 	mutex_lock(&rvu->rsrc_lock);
 	for (schq = 0; schq < txsch->schq.max; schq++) {
-		if (txsch->pfvf_map[schq] != pcifunc)
+		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 			continue;
 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
@@ -2034,6 +2513,48 @@ int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
 	return err;
 }
 
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+				    struct msg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_block *block;
+	struct rvu_pfvf *pfvf;
+	int nixlf, blkaddr;
+	u64 cfg;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	block = &hw->block[blkaddr];
+	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
+	/* Set the interface configuration */
+	if (req->len_verify & BIT(0))
+		cfg |= BIT_ULL(41);
+	else
+		cfg &= ~BIT_ULL(41);
+
+	if (req->len_verify & BIT(1))
+		cfg |= BIT_ULL(40);
+	else
+		cfg &= ~BIT_ULL(40);
+
+	if (req->csum_verify & BIT(0))
+		cfg |= BIT_ULL(37);
+	else
+		cfg &= ~BIT_ULL(37);
+
+	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
+
+	return 0;
+}
+
 static void nix_link_config(struct rvu *rvu, int blkaddr)
 {
 	struct rvu_hwinfo *hw = rvu->hw;
@@ -2212,9 +2733,6 @@ int rvu_nix_init(struct rvu *rvu)
 	/* Restore CINT timer delay to HW reset values */
 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
 
-	/* Configure segmentation offload formats */
-	nix_setup_lso(rvu, blkaddr);
-
 	if (blkaddr == BLKADDR_NIX0) {
 		hw->nix0 = devm_kzalloc(rvu->dev,
 					sizeof(struct nix_hw), GFP_KERNEL);
@@ -2225,24 +2743,48 @@ int rvu_nix_init(struct rvu *rvu)
 		if (err)
 			return err;
 
+		err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+		if (err)
+			return err;
+
 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
 		if (err)
 			return err;
 
-		/* Config Outer L2, IP, TCP and UDP's NPC layer info.
+		/* Configure segmentation offload formats */
+		nix_setup_lso(rvu, hw->nix0, blkaddr);
+
+		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
 		 * This helps HW protocol checker to identify headers
 		 * and validate length and checksums.
 		 */
 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
 			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
-		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
-			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
-		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
-			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
-
-		nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
+			    (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
+			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
+			    (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
+			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
+			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
+			    (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
+			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
+			    0x0F);
+
+		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+		if (err)
+			return err;
 
 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
 		nix_link_config(rvu, blkaddr);
@@ -2364,3 +2906,54 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
 
 	nix_ctx_free(rvu, pfvf);
 }
+
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+					struct nix_lso_format_cfg *req,
+					struct nix_lso_format_cfg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	int blkaddr, idx, f;
+	u64 reg;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (!pfvf->nixlf || blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	/* Find existing matching LSO format, if any */
+	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
+		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
+			reg = rvu_read64(rvu, blkaddr,
+					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
+			if (req->fields[f] != (reg & req->field_mask))
+				break;
+		}
+
+		if (f == NIX_LSO_FIELD_MAX)
+			break;
+	}
+
+	if (idx < nix_hw->lso.in_use) {
+		/* Match found */
+		rsp->lso_format_idx = idx;
+		return 0;
+	}
+
+	if (nix_hw->lso.in_use == nix_hw->lso.total)
+		return NIX_AF_ERR_LSO_CFG_FAIL;
+
+	rsp->lso_format_idx = nix_hw->lso.in_use++;
+
+	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
+			    req->fields[f]);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 6ea2b0e2df426dc63f3960b798a298e8880ed27a..15f70273e29c75789ff36d8f6da085b2468cad51 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -16,6 +16,7 @@
 #include "rvu_reg.h"
 #include "rvu.h"
 #include "npc.h"
+#include "cgx.h"
 #include "npc_profile.h"
 
 #define RSVD_MCAM_ENTRIES_PER_PF	2 /* Bcast & Promisc */
@@ -368,9 +369,9 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
 				   int nixlf, u64 chan, bool allmulti)
 {
 	struct npc_mcam *mcam = &rvu->hw->mcam;
+	int blkaddr, ucast_idx, index, kwi;
 	struct mcam_entry entry = { {0} };
 	struct nix_rx_action action = { };
-	int blkaddr, index, kwi;
 
 	/* Only PF or AF VF can add a promiscuous entry */
 	if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
@@ -392,9 +393,21 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
 		entry.kw_mask[kwi] = BIT_ULL(40);
 	}
 
-	*(u64 *)&action = 0x00;
-	action.op = NIX_RX_ACTIONOP_UCAST;
-	action.pf_func = pcifunc;
+	ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					     nixlf, NIXLF_UCAST_ENTRY);
+
+	/* If the corresponding PF's ucast action is RSS,
+	 * use the same action for promisc also
+	 */
+	if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+		*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+							blkaddr, ucast_idx);
+
+	if (action.op != NIX_RX_ACTIONOP_RSS) {
+		*(u64 *)&action = 0x00;
+		action.op = NIX_RX_ACTIONOP_UCAST;
+		action.pf_func = pcifunc;
+	}
 
 	entry.action = *(u64 *)&action;
 	npc_config_mcam_entry(rvu, mcam, blkaddr, index,
@@ -476,7 +489,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
 	 *
 	 */
 	entry.kw[0] = BIT_ULL(13) | chan;
-	entry.kw_mask[0] = ~entry.kw[0] & (BIT_ULL(13) | 0xFFFULL);
+	entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
 
 	*(u64 *)&action = 0x00;
 #ifdef MCAST_MCE
@@ -539,6 +552,21 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
 	rvu_write64(rvu, blkaddr,
 		    NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
 
+	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+					 nixlf, NIXLF_PROMISC_ENTRY);
+
+	/* If PF's promiscuous entry is enabled,
+	 * Set RSS action for that entry as well
+	 */
+	if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+		bank = npc_get_bank(mcam, index);
+		index &= (mcam->banksize - 1);
+
+		rvu_write64(rvu, blkaddr,
+			    NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+			    *(u64 *)&action);
+	}
+
 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
 }
 
@@ -704,6 +732,111 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
 	SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
 }
 
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+				     struct npc_mcam_kex *mkex)
+{
+	int lid, lt, ld, fl;
+
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+		    mkex->keyx_cfg[NIX_INTF_RX]);
+	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+		    mkex->keyx_cfg[NIX_INTF_TX]);
+
+	for (ld = 0; ld < NPC_MAX_LD; ld++)
+		rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+			    mkex->kex_ld_flags[ld]);
+
+	for (lid = 0; lid < NPC_MAX_LID; lid++) {
+		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+			for (ld = 0; ld < NPC_MAX_LD; ld++) {
+				SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+					   mkex->intf_lid_lt_ld[NIX_INTF_RX]
+					   [lid][lt][ld]);
+
+				SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
+					   mkex->intf_lid_lt_ld[NIX_INTF_TX]
+					   [lid][lt][ld]);
+			}
+		}
+	}
+
+	for (ld = 0; ld < NPC_MAX_LD; ld++) {
+		for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+			SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+					mkex->intf_ld_flags[NIX_INTF_RX]
+					[ld][fl]);
+
+			SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+					mkex->intf_ld_flags[NIX_INTF_TX]
+					[ld][fl]);
+		}
+	}
+}
+
+/* strtoull of "mkexprof" with base:36 */
+#define MKEX_SIGN      0x19bbfdbd15f
+#define MKEX_END_SIGN  0xdeadbeef
+
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+{
+	const char *mkex_profile = rvu->mkex_pfl_name;
+	struct device *dev = &rvu->pdev->dev;
+	void __iomem *mkex_prfl_addr = NULL;
+	struct npc_mcam_kex *mcam_kex;
+	u64 prfl_addr;
+	u64 prfl_sz;
+
+	/* If user not selected mkex profile */
+	if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
+		goto load_default;
+
+	if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
+		goto load_default;
+
+	if (!prfl_addr || !prfl_sz)
+		goto load_default;
+
+	mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
+	if (!mkex_prfl_addr)
+		goto load_default;
+
+	mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+
+	while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
+		/* Compare with mkex mod_param name string */
+		if (mcam_kex->mkex_sign == MKEX_SIGN &&
+		    !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
+			/* Due to an errata (35786) in A0 pass silicon,
+			 * parse nibble enable configuration has to be
+			 * identical for both Rx and Tx interfaces.
+			 */
+			if (is_rvu_9xxx_A0(rvu) &&
+			    mcam_kex->keyx_cfg[NIX_INTF_RX] !=
+			    mcam_kex->keyx_cfg[NIX_INTF_TX])
+				goto load_default;
+
+			/* Program selected mkex profile */
+			npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
+
+			goto unmap;
+		}
+
+		mcam_kex++;
+		prfl_sz -= sizeof(struct npc_mcam_kex);
+	}
+	dev_warn(dev, "Failed to load requested profile: %s\n",
+		 rvu->mkex_pfl_name);
+
+load_default:
+	dev_info(rvu->dev, "Using default mkex profile\n");
+	/* Config packet data and flags extraction into PARSE result */
+	npc_config_ldata_extract(rvu, blkaddr);
+
+unmap:
+	if (mkex_prfl_addr)
+		iounmap(mkex_prfl_addr);
+}
+
 static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
 				 struct npc_kpu_profile_action *kpuaction,
 				 int kpu, int entry, bool pkind)
@@ -1008,13 +1141,20 @@ int rvu_npc_init(struct rvu *rvu)
 	rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
 		    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
 
+	/* Config Inner IPV4 NPC layer info */
+	rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
+		    (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+
 	/* Enable below for Rx pkts.
 	 * - Outer IPv4 header checksum validation.
 	 * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+	 * - Inner IPv4 header checksum validation.
+	 * - Set non zero checksum error code value
 	 */
 	rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
 		    rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
-		    BIT_ULL(6) | BIT_ULL(2));
+		    BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
+		    BIT_ULL(2) | BIT_ULL(1));
 
 	/* Set RX and TX side MCAM search key size.
 	 * LA..LD (ltype only) + Channel
@@ -1034,8 +1174,8 @@ int rvu_npc_init(struct rvu *rvu)
 	if (err)
 		return err;
 
-	/* Config packet data and flags extraction into PARSE result */
-	npc_config_ldata_extract(rvu, blkaddr);
+	/* Configure MKEX profile */
+	npc_load_mkex_profile(rvu, blkaddr);
 
 	/* Set TX miss action to UCAST_DEFAULT i.e
 	 * transmit the packet on NIX LF SQ's default channel.
@@ -2043,6 +2183,7 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
 					GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
 		}
 	}
+	memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
 	return 0;
 }