lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1448023677-22296-4-git-send-email-m@bjorling.me>
Date:	Fri, 20 Nov 2015 13:47:55 +0100
From:	Matias Bjørling <m@...rling.me>
To:	linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
	axboe@...com
Cc:	Wenwei Tao <ww.tao0320@...il.com>,
	Matias Bjørling <m@...rling.me>
Subject: [PATCH 3/5] nvme: lightnvm: use admin queues for admin cmds

From: Wenwei Tao <ww.tao0320@...il.com>

According to the Open-Channel SSD Specification, the NVMe-NVM admin
commands use vendor specific opcodes of NVMe, so use the NVMe admin
queue to dispatch these commands.

Signed-off-by: Wenwei Tao <ww.tao0320@...il.com>
Updated by me to include set bad block table as well and also use
the admin queue for l2p len calculation.

Signed-off-by: Matias Bjørling <m@...rling.me>
---
 drivers/nvme/host/lightnvm.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 7e82fe3..9202d1a 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -276,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
 static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
 {
 	struct nvme_ns *ns = q->queuedata;
+	struct nvme_dev *dev = ns->dev;
 	struct nvme_nvm_id *nvme_nvm_id;
 	struct nvme_nvm_command c = {};
 	int ret;
@@ -288,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
 	if (!nvme_nvm_id)
 		return -ENOMEM;
 
-	ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id,
-						sizeof(struct nvme_nvm_id));
+	ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+				nvme_nvm_id, sizeof(struct nvme_nvm_id));
 	if (ret) {
 		ret = -EIO;
 		goto out;
@@ -315,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
 	struct nvme_ns *ns = q->queuedata;
 	struct nvme_dev *dev = ns->dev;
 	struct nvme_nvm_command c = {};
-	u32 len = queue_max_hw_sectors(q) << 9;
+	u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
 	u32 nlb_pr_rq = len / sizeof(u64);
 	u64 cmd_slba = slba;
 	void *entries;
@@ -333,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
 		c.l2p.slba = cpu_to_le64(cmd_slba);
 		c.l2p.nlb = cpu_to_le32(cmd_nlb);
 
-		ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c,
-								entries, len);
+		ret = nvme_submit_sync_cmd(dev->admin_q,
+				(struct nvme_command *)&c, entries, len);
 		if (ret) {
 			dev_err(dev->dev, "L2P table transfer failed (%d)\n",
 									ret);
@@ -375,7 +376,8 @@ static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
 	if (!bb_tbl)
 		return -ENOMEM;
 
-	ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_tbl, tblsz);
+	ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+								bb_tbl, tblsz);
 	if (ret) {
 		dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
 		ret = -EIO;
@@ -427,7 +429,8 @@ static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
 	c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
 	c.set_bb.value = type;
 
-	ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
+	ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
+								NULL, 0);
 	if (ret)
 		dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
 	return ret;
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ