lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1539689240-11526-6-git-send-email-sunil.kovvuri@gmail.com>
Date:   Tue, 16 Oct 2018 16:57:09 +0530
From:   sunil.kovvuri@...il.com
To:     netdev@...r.kernel.org, davem@...emloft.net
Cc:     arnd@...db.de, linux-soc@...r.kernel.org,
        Linu Cherian <lcherian@...vell.com>,
        Sunil Goutham <sgoutham@...vell.com>
Subject: [PATCH 05/16] octeontx2-af: Forward CGX link notifications to PFs

From: Linu Cherian <lcherian@...vell.com>

Upon receiving notification from firmware the CGX event handler
in the AF driver gets the current link info such as status, speed,
duplex etc from CGX driver and sends it across to PFs who have
registered to receive such notifications.

To support above
 - Mbox messaging support for sending msgs from AF to PF has been added.
 - Added mbox msgs so that PFs can register/unregister for link events.
 - Link notifications are sent to PF under two scenarioss.
  1. When a asynchronous link change notification is received from
     firmware with notification flag turned on for that PF.
  2. Upon notification turn on request, the current link status is
     send to the PF.

Also added a new mailbox msg using which RVU PF/VF can retrieve
their mapped CGX LMAC's current link info. Link info includes
status, speed, duplex and lmac type.

Signed-off-by: Linu Cherian <lcherian@...vell.com>
Signed-off-by: Sunil Goutham <sgoutham@...vell.com>
---
 drivers/net/ethernet/marvell/octeontx2/af/cgx.c    |  99 ++++++++++++--
 drivers/net/ethernet/marvell/octeontx2/af/cgx.h    |  21 ++-
 drivers/net/ethernet/marvell/octeontx2/af/mbox.h   |  22 +++
 drivers/net/ethernet/marvell/octeontx2/af/rvu.c    |  82 ++++++++++++
 drivers/net/ethernet/marvell/octeontx2/af/rvu.h    |   9 ++
 .../net/ethernet/marvell/octeontx2/af/rvu_cgx.c    | 147 ++++++++++++++++++++-
 6 files changed, 368 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index e7ae9e0..077f83f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -29,6 +29,7 @@
  * @wq_cmd_cmplt:	waitq to keep the process blocked until cmd completion
  * @cmd_lock:		Lock to serialize the command interface
  * @resp:		command response
+ * @link_info:		link related information
  * @event_cb:		callback for linkchange events
  * @cmd_pend:		flag set before new command is started
  *			flag cleared after command response is received
@@ -40,6 +41,7 @@ struct lmac {
 	wait_queue_head_t wq_cmd_cmplt;
 	struct mutex cmd_lock;
 	u64 resp;
+	struct cgx_link_user_info link_info;
 	struct cgx_event_cb event_cb;
 	bool cmd_pend;
 	struct cgx *cgx;
@@ -58,6 +60,12 @@ struct cgx {
 
 static LIST_HEAD(cgx_list);
 
+/* Convert firmware speed encoding to user format(Mbps) */
+static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+
+/* Convert firmware lmac type encoding to string */
+static char *cgx_lmactype_string[LMAC_MODE_MAX];
+
 /* Supported devices */
 static const struct pci_device_id cgx_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
@@ -119,6 +127,24 @@ void *cgx_get_pdata(int cgx_id)
 }
 EXPORT_SYMBOL(cgx_get_pdata);
 
+/* Ensure the required lock for event queue(where asynchronous events are
+ * posted) is acquired before calling this API. Else an asynchronous event(with
+ * latest link status) can reach the destination before this function returns
+ * and could make the link status appear wrong.
+ */
+int cgx_get_link_info(void *cgxd, int lmac_id,
+		      struct cgx_link_user_info *linfo)
+{
+	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
+
+	if (!lmac)
+		return -ENODEV;
+
+	*linfo = lmac->link_info;
+	return 0;
+}
+EXPORT_SYMBOL(cgx_get_link_info);
+
 static u64 mac2u64 (u8 *mac_addr)
 {
 	u64 mac = 0;
@@ -160,6 +186,14 @@ u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
 }
 EXPORT_SYMBOL(cgx_lmac_addr_get);
 
+static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+{
+	u64 cfg;
+
+	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
+}
+
 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
 {
 	struct cgx *cgx = cgx_get_pdata(cgx_id);
@@ -306,36 +340,79 @@ static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
 	return err;
 }
 
+static inline void cgx_link_usertable_init(void)
+{
+	cgx_speed_mbps[CGX_LINK_NONE] = 0;
+	cgx_speed_mbps[CGX_LINK_10M] = 10;
+	cgx_speed_mbps[CGX_LINK_100M] = 100;
+	cgx_speed_mbps[CGX_LINK_1G] = 1000;
+	cgx_speed_mbps[CGX_LINK_2HG] = 2500;
+	cgx_speed_mbps[CGX_LINK_5G] = 5000;
+	cgx_speed_mbps[CGX_LINK_10G] = 10000;
+	cgx_speed_mbps[CGX_LINK_20G] = 20000;
+	cgx_speed_mbps[CGX_LINK_25G] = 25000;
+	cgx_speed_mbps[CGX_LINK_40G] = 40000;
+	cgx_speed_mbps[CGX_LINK_50G] = 50000;
+	cgx_speed_mbps[CGX_LINK_100G] = 100000;
+
+	cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
+	cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
+	cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
+	cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
+	cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
+	cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
+	cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
+	cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
+	cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
+	cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
+}
+
+static inline void link_status_user_format(u64 lstat,
+					   struct cgx_link_user_info *linfo,
+					   struct cgx *cgx, u8 lmac_id)
+{
+	char *lmac_string;
+
+	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+}
+
 /* Hardware event handlers */
 static inline void cgx_link_change_handler(u64 lstat,
 					   struct lmac *lmac)
 {
+	struct cgx_link_user_info *linfo;
 	struct cgx *cgx = lmac->cgx;
 	struct cgx_link_event event;
 	struct device *dev;
+	int err_type;
 
 	dev = &cgx->pdev->dev;
 
-	event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
-	event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
-	event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat);
-	event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
+	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
 
 	event.cgx_id = cgx->cgx_id;
 	event.lmac_id = lmac->lmac_id;
 
+	/* update the local copy of link status */
+	lmac->link_info = event.link_uinfo;
+	linfo = &lmac->link_info;
+
 	if (!lmac->event_cb.notify_link_chg) {
 		dev_dbg(dev, "cgx port %d:%d Link change handler null",
 			cgx->cgx_id, lmac->lmac_id);
-		if (event.lstat.err_type != CGX_ERR_NONE) {
+		if (err_type != CGX_ERR_NONE) {
 			dev_err(dev, "cgx port %d:%d Link error %d\n",
-				cgx->cgx_id, lmac->lmac_id,
-				event.lstat.err_type);
+				cgx->cgx_id, lmac->lmac_id, err_type);
 		}
-		dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n",
+		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
 			 cgx->cgx_id, lmac->lmac_id,
-			event.lstat.link_up ? "UP" : "DOWN",
-			event.lstat.speed);
+			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
 		return;
 	}
 
@@ -563,6 +640,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	list_add(&cgx->cgx_list, &cgx_list);
 	cgx->cgx_id = cgx_get_cgx_cnt() - 1;
 
+	cgx_link_usertable_init();
+
 	err = cgx_lmac_init(cgx);
 	if (err)
 		goto err_release_lmac;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 3ae426b..c89edfa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -11,6 +11,7 @@
 #ifndef CGX_H
 #define CGX_H
 
+#include "mbox.h"
 #include "cgx_fw_if.h"
 
  /* PCI device IDs */
@@ -28,6 +29,8 @@
 #define  CMR_EN					BIT_ULL(55)
 #define  DATA_PKT_TX_EN				BIT_ULL(53)
 #define  DATA_PKT_RX_EN				BIT_ULL(54)
+#define  CGX_LMAC_TYPE_SHIFT			40
+#define  CGX_LMAC_TYPE_MASK			0xF
 #define CGXX_CMRX_INT			0x040
 #define  FW_CGX_INT				BIT_ULL(1)
 #define CGXX_CMRX_INT_ENA_W1S		0x058
@@ -55,8 +58,22 @@
 #define CGX_NVEC			37
 #define CGX_LMAC_FWI			0
 
+enum LMAC_TYPE {
+	LMAC_MODE_SGMII		= 0,
+	LMAC_MODE_XAUI		= 1,
+	LMAC_MODE_RXAUI		= 2,
+	LMAC_MODE_10G_R		= 3,
+	LMAC_MODE_40G_R		= 4,
+	LMAC_MODE_QSGMII	= 6,
+	LMAC_MODE_25G_R		= 7,
+	LMAC_MODE_50G_R		= 8,
+	LMAC_MODE_100G_R	= 9,
+	LMAC_MODE_USXGMII	= 10,
+	LMAC_MODE_MAX,
+};
+
 struct cgx_link_event {
-	struct cgx_lnk_sts lstat;
+	struct cgx_link_user_info link_uinfo;
 	u8 cgx_id;
 	u8 lmac_id;
 };
@@ -83,4 +100,6 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+int cgx_get_link_info(void *cgxd, int lmac_id,
+		      struct cgx_link_user_info *linfo);
 #endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 53946e8..9f3790b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -133,6 +133,9 @@ M(CGX_MAC_ADDR_GET,	0x204, cgx_mac_addr_set_or_get,			\
 				cgx_mac_addr_set_or_get)		\
 M(CGX_PROMISC_ENABLE,	0x205, msg_req, msg_rsp)			\
 M(CGX_PROMISC_DISABLE,	0x206, msg_req, msg_rsp)			\
+M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp)			\
+M(CGX_STOP_LINKEVENTS,	0x208, msg_req, msg_rsp)			\
+M(CGX_GET_LINKINFO,	0x209, msg_req, cgx_link_info_msg)		\
 /* NPA mbox IDs (range 0x400 - 0x5FF) */				\
 /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */				\
 /* TIM mbox IDs (range 0x800 - 0x9FF) */				\
@@ -140,9 +143,14 @@ M(CGX_PROMISC_DISABLE,	0x206, msg_req, msg_rsp)			\
 /* NPC mbox IDs (range 0x6000 - 0x7FFF) */				\
 /* NIX mbox IDs (range 0x8000 - 0xFFFF) */				\
 
+/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+#define MBOX_UP_CGX_MESSAGES						\
+M(CGX_LINK_EVENT,		0xC00, cgx_link_info_msg, msg_rsp)
+
 enum {
 #define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
 MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
 #undef M
 };
 
@@ -234,4 +242,18 @@ struct cgx_mac_addr_set_or_get {
 	struct mbox_msghdr hdr;
 	u8 mac_addr[ETH_ALEN];
 };
+
+struct cgx_link_user_info {
+	uint64_t link_up:1;
+	uint64_t full_duplex:1;
+	uint64_t lmac_type_id:4;
+	uint64_t speed:20; /* speed in Mbps */
+#define LMACTYPE_STR_LEN 16
+	char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+	struct mbox_msghdr hdr;
+	struct cgx_link_user_info link_info;
+};
 #endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 7cf5865..85994ab 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1316,6 +1316,63 @@ static void rvu_mbox_handler(struct work_struct *work)
 	otx2_mbox_msg_send(mbox, pf);
 }
 
+static void rvu_mbox_up_handler(struct work_struct *work)
+{
+	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+	struct rvu *rvu = mwork->rvu;
+	struct otx2_mbox_dev *mdev;
+	struct mbox_hdr *rsp_hdr;
+	struct mbox_msghdr *msg;
+	struct otx2_mbox *mbox;
+	int offset, id;
+	u16 pf;
+
+	mbox = &rvu->mbox_up;
+	pf = mwork - rvu->mbox_wrk_up;
+	mdev = &mbox->dev[pf];
+
+	rsp_hdr = mdev->mbase + mbox->rx_start;
+	if (rsp_hdr->num_msgs == 0) {
+		dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
+		return;
+	}
+
+	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+	for (id = 0; id < rsp_hdr->num_msgs; id++) {
+		msg = mdev->mbase + offset;
+
+		if (msg->id >= MBOX_MSG_MAX) {
+			dev_err(rvu->dev,
+				"Mbox msg with unknown ID 0x%x\n", msg->id);
+			goto end;
+		}
+
+		if (msg->sig != OTX2_MBOX_RSP_SIG) {
+			dev_err(rvu->dev,
+				"Mbox msg with wrong signature %x, ID 0x%x\n",
+				msg->sig, msg->id);
+			goto end;
+		}
+
+		switch (msg->id) {
+		case MBOX_MSG_CGX_LINK_EVENT:
+			break;
+		default:
+			if (msg->rc)
+				dev_err(rvu->dev,
+					"Mbox msg response has err %d, ID 0x%x\n",
+					msg->rc, msg->id);
+			break;
+		}
+end:
+		offset = mbox->rx_start + msg->next_msgoff;
+		mdev->msgs_acked++;
+	}
+
+	otx2_mbox_reset(mbox, 0);
+}
+
 static int rvu_mbox_init(struct rvu *rvu)
 {
 	struct rvu_hwinfo *hw = rvu->hw;
@@ -1337,6 +1394,13 @@ static int rvu_mbox_init(struct rvu *rvu)
 		goto exit;
 	}
 
+	rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
+					sizeof(struct rvu_work), GFP_KERNEL);
+	if (!rvu->mbox_wrk_up) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
 	/* Map mbox region shared with PFs */
 	bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
 	/* Mailbox is a reserved memory (in RAM) region shared between
@@ -1355,12 +1419,23 @@ static int rvu_mbox_init(struct rvu *rvu)
 	if (err)
 		goto exit;
 
+	err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
+			     MBOX_DIR_AFPF_UP, hw->total_pfs);
+	if (err)
+		goto exit;
+
 	for (pf = 0; pf < hw->total_pfs; pf++) {
 		mwork = &rvu->mbox_wrk[pf];
 		mwork->rvu = rvu;
 		INIT_WORK(&mwork->work, rvu_mbox_handler);
 	}
 
+	for (pf = 0; pf < hw->total_pfs; pf++) {
+		mwork = &rvu->mbox_wrk_up[pf];
+		mwork->rvu = rvu;
+		INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+	}
+
 	return 0;
 exit:
 	if (hwbase)
@@ -1381,6 +1456,7 @@ static void rvu_mbox_destroy(struct rvu *rvu)
 		iounmap((void __iomem *)rvu->mbox.hwbase);
 
 	otx2_mbox_destroy(&rvu->mbox);
+	otx2_mbox_destroy(&rvu->mbox_up);
 }
 
 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
@@ -1407,6 +1483,12 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
 			if (hdr->num_msgs)
 				queue_work(rvu->mbox_wq,
 					   &rvu->mbox_wrk[pf].work);
+			mbox = &rvu->mbox_up;
+			mdev = &mbox->dev[pf];
+			hdr = mdev->mbase + mbox->rx_start;
+			if (hdr->num_msgs)
+				queue_work(rvu->mbox_wq,
+					   &rvu->mbox_wrk_up[pf].work);
 		}
 	}
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 331b8b3..8347808 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -93,6 +93,8 @@ struct rvu {
 	/* Mbox */
 	struct otx2_mbox	mbox;
 	struct rvu_work		*mbox_wrk;
+	struct otx2_mbox        mbox_up;
+	struct rvu_work		*mbox_wrk_up;
 	struct workqueue_struct *mbox_wq;
 
 	/* MSI-X */
@@ -109,6 +111,7 @@ struct rvu {
 	u16			*cgxlmac2pf_map; /* bitmap of mapped pfs for
 						  * every cgx lmac port
 						  */
+	unsigned long		pf_notify_bmap; /* Flags for PF notification */
 	void			**cgx_idmap; /* cgx id to cgx data map table */
 	struct			work_struct cgx_evh_work;
 	struct			workqueue_struct *cgx_evh_wq;
@@ -183,4 +186,10 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
 					struct msg_rsp *rsp);
 int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
 					 struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+					  struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+					 struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+				      struct cgx_link_info_msg *rsp);
 #endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 71e795e..e917f28 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -20,6 +20,31 @@ struct cgx_evq_entry {
 	struct cgx_link_event link_event;
 };
 
+#define M(_name, _id, _req_type, _rsp_type)				\
+static struct _req_type __maybe_unused					\
+*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid)		\
+{									\
+	struct _req_type *req;						\
+									\
+	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
+		&rvu->mbox_up, devid, sizeof(struct _req_type),		\
+		sizeof(struct _rsp_type));				\
+	if (!req)							\
+		return NULL;						\
+	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
+	req->hdr.id = _id;						\
+	return req;							\
+}
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Returns bitmap of mapped PFs */
+static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+{
+	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+}
+
 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
 {
 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
@@ -77,6 +102,34 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 	return 0;
 }
 
+static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
+{
+	struct cgx_evq_entry *qentry;
+	unsigned long flags;
+	int err;
+
+	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
+	if (!qentry)
+		return -ENOMEM;
+
+	/* Lock the event queue before we read the local link status */
+	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+				&qentry->link_event.link_uinfo);
+	qentry->link_event.cgx_id = cgx_id;
+	qentry->link_event.lmac_id = lmac_id;
+	if (err)
+		goto skip_add;
+	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+skip_add:
+	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+
+	/* start worker to process the events */
+	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+	return 0;
+}
+
 /* This is called from interrupt context and is expected to be atomic */
 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
 {
@@ -98,6 +151,41 @@ static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
 	return 0;
 }
 
+static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+{
+	struct cgx_link_user_info *linfo;
+	struct cgx_link_info_msg *msg;
+	unsigned long pfmap;
+	int err, pfid;
+
+	linfo = &event->link_uinfo;
+	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+
+	do {
+		pfid = find_first_bit(&pfmap, 16);
+		clear_bit(pfid, &pfmap);
+
+		/* check if notification is enabled */
+		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
+			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
+				 event->cgx_id, event->lmac_id,
+				 linfo->link_up ? "UP" : "DOWN");
+			continue;
+		}
+
+		/* Send mbox message to PF */
+		msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+		if (!msg)
+			continue;
+		msg->link_info = *linfo;
+		otx2_mbox_msg_send(&rvu->mbox_up, pfid);
+		err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+		if (err)
+			dev_warn(rvu->dev, "notification to pf %d failed\n",
+				 pfid);
+	} while (pfmap);
+}
+
 static void cgx_evhandler_task(struct work_struct *work)
 {
 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
@@ -119,7 +207,8 @@ static void cgx_evhandler_task(struct work_struct *work)
 
 		event = &qentry->link_event;
 
-		/* Do nothing for now */
+		/* process event */
+		cgx_notify_pfs(event, rvu);
 		kfree(qentry);
 	} while (1);
 }
@@ -334,3 +423,59 @@ int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
 	return 0;
 }
+
+static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
+{
+	int pf = rvu_get_pf(pcifunc);
+	u8 cgx_id, lmac_id;
+
+	/* This msg is expected only from PFs that are mapped to CGX LMACs,
+	 * if received from other PF/VF simply ACK, nothing to do.
+	 */
+	if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	if (en) {
+		set_bit(pf, &rvu->pf_notify_bmap);
+		/* Send the current link status to PF */
+		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
+	} else {
+		clear_bit(pf, &rvu->pf_notify_bmap);
+	}
+
+	return 0;
+}
+
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+					  struct msg_rsp *rsp)
+{
+	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
+	return 0;
+}
+
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+					 struct msg_rsp *rsp)
+{
+	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
+	return 0;
+}
+
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+				      struct cgx_link_info_msg *rsp)
+{
+	u8 cgx_id, lmac_id;
+	int pf, err;
+
+	pf = rvu_get_pf(req->hdr.pcifunc);
+
+	if (!is_pf_cgxmapped(rvu, pf))
+		return -ENODEV;
+
+	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+				&rsp->link_info);
+	return err;
+}
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ