lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230407203143.2189681-13-fenghua.yu@intel.com>
Date:   Fri,  7 Apr 2023 13:31:39 -0700
From:   Fenghua Yu <fenghua.yu@...el.com>
To:     "Vinod Koul" <vkoul@...nel.org>,
        "Dave Jiang" <dave.jiang@...el.com>
Cc:     dmaengine@...r.kernel.org,
        "linux-kernel" <linux-kernel@...r.kernel.org>,
        Tony Zhu <tony.zhu@...el.com>,
        Fenghua Yu <fenghua.yu@...el.com>
Subject: [PATCH v4 12/16] dmaengine: idxd: add per file user counters for completion record faults

From: Dave Jiang <dave.jiang@...el.com>

Add counters per opened file for the char device in order to keep track how
many completion record faults occurred and how many of those faults failed
the writeback by the driver after attempt to fault in the page. The
counters are managed by xarray that associates the PASID with
struct idxd_user_context.

Tested-by: Tony Zhu <tony.zhu@...el.com>
Signed-off-by: Dave Jiang <dave.jiang@...el.com>
Co-developed-by: Fenghua Yu <fenghua.yu@...el.com>
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
---
v3:
- Move majority of the xarry code to patch 8 which implements new function
  idxd_copy_cr() since the function needs the xarry to maintain and find
  mm by wq and PASID. Only keep the user counters related xarry code here.
 drivers/dma/idxd/cdev.c | 18 ++++++++++++++++++
 drivers/dma/idxd/idxd.h |  7 +++++++
 drivers/dma/idxd/irq.c  |  4 ++++
 3 files changed, 29 insertions(+)

diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 0a51c33198f6..5c8e964e671b 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -39,6 +39,7 @@ struct idxd_user_context {
 	struct mm_struct *mm;
 	unsigned int flags;
 	struct iommu_sva *sva;
+	u64 counters[COUNTER_MAX];
 };
 
 static void idxd_cdev_dev_release(struct device *dev)
@@ -84,6 +85,23 @@ static void idxd_xa_pasid_remove(struct idxd_user_context *ctx)
 	mutex_unlock(&wq->uc_lock);
 }
 
+void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index)
+{
+	struct idxd_user_context *ctx;
+
+	if (index >= COUNTER_MAX)
+		return;
+
+	mutex_lock(&wq->uc_lock);
+	ctx = xa_load(&wq->upasid_xa, pasid);
+	if (!ctx) {
+		mutex_unlock(&wq->uc_lock);
+		return;
+	}
+	ctx->counters[index]++;
+	mutex_unlock(&wq->uc_lock);
+}
+
 static int idxd_cdev_open(struct inode *inode, struct file *filp)
 {
 	struct idxd_user_context *ctx;
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 4c4baa80c731..9fb26d017285 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -127,6 +127,12 @@ struct idxd_pmu {
 
 #define IDXD_MAX_PRIORITY	0xf
 
+enum {
+	COUNTER_FAULTS = 0,
+	COUNTER_FAULT_FAILS,
+	COUNTER_MAX
+};
+
 enum idxd_wq_state {
 	IDXD_WQ_DISABLED = 0,
 	IDXD_WQ_ENABLED,
@@ -713,6 +719,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq);
 void idxd_wq_del_cdev(struct idxd_wq *wq);
 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
 		 void *buf, int len);
+void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
 
 /* perfmon */
 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index c660d63a3eb8..f4b0f59c95ba 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -240,6 +240,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
 			evl->batch_fail[entry_head->batch_id] = false;
 
 		copy_size = cr_size;
+		idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
 		break;
 	case DSA_COMP_BATCH_EVL_ERR:
 		bf = &evl->batch_fail[entry_head->batch_id];
@@ -251,6 +252,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
 			*result = 1;
 			*bf = false;
 		}
+		idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
 		break;
 	case DSA_COMP_DRAIN_EVL:
 		copy_size = cr_size;
@@ -282,6 +284,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
 	switch (fault->status) {
 	case DSA_COMP_CRA_XLAT:
 		if (copied != copy_size) {
+			idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
 			dev_dbg_ratelimited(dev, "Failed to write to completion record: (%d:%d)\n",
 					    copy_size, copied);
 			if (entry_head->batch)
@@ -290,6 +293,7 @@ static void idxd_evl_fault_work(struct work_struct *work)
 		break;
 	case DSA_COMP_BATCH_EVL_ERR:
 		if (copied != copy_size) {
+			idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
 			dev_dbg_ratelimited(dev, "Failed to write to batch completion record: (%d:%d)\n",
 					    copy_size, copied);
 		}
-- 
2.37.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ