[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <2756c572e066ed91a41e35568a4d9346e47d6b42.1472462539.git.luto@kernel.org>
Date: Mon, 29 Aug 2016 02:25:45 -0700
From: Andy Lutomirski <luto@...nel.org>
To: Keith Busch <keith.busch@...el.com>, Jens Axboe <axboe@...com>
Cc: linux-nvme@...ts.infradead.org, Christoph Hellwig <hch@....de>,
linux-kernel@...r.kernel.org, Andy Lutomirski <luto@...nel.org>
Subject: [PATCH 2/3] nvme: Pass pointers, not dma addresses, to nvme_get/set_features()
Any user I can imagine that needs a buffer at all will want to pass
a pointer directly. There are no currently callers that use
buffers, so this change is painless, and it will make it much easier
to start using features that use buffers (e.g. APST).
Signed-off-by: Andy Lutomirski <luto@...nel.org>
---
drivers/nvme/host/core.c | 32 ++++++++++++++++++++++++--------
drivers/nvme/host/nvme.h | 4 ++--
drivers/nvme/host/scsi.c | 6 +++---
3 files changed, 29 insertions(+), 13 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2feacc70bf61..3f7561ab54dc 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -597,19 +597,25 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
}
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result)
+ void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
struct nvme_completion cqe;
int ret;
+ /*
+ * A controller "page" may be bigger than a Linux page, but we can
+ * be conservative here.
+ */
+ WARN_ONCE(((unsigned long)buffer & (PAGE_SIZE-1)) + buflen > PAGE_SIZE,
+ "NVME feature crosses a page boundary\n");
+
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
c.features.nsid = cpu_to_le32(nsid);
- c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
@@ -617,20 +623,30 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
}
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result)
+ const void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
struct nvme_completion cqe;
int ret;
+ /*
+ * A controller "page" may be bigger than a Linux page, but we can
+ * be conservative here.
+ */
+ WARN_ONCE(((unsigned long)buffer & (PAGE_SIZE-1)) + buflen > PAGE_SIZE,
+ "NVME feature crosses a page boundary\n");
+
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
- c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
- NVME_QID_ANY, 0, 0);
+ /*
+ * Casting buffer to void* is safe here: __nvme_submit_sync_cmd knows
+ * that we're writing because it decodes the opcode.
+ */
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
+ (void *)buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
@@ -664,7 +680,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
u32 result;
int status, nr_io_queues;
- status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
if (status < 0)
return status;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ab18b78102bf..383ae22e169e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -292,9 +292,9 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
struct nvme_id_ns **id);
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result);
+ void *buffer, size_t buflen, u32 *result);
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result);
+ const void *buffer, size_t buflen, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index 44009105f8c8..c2a0a1c7d05d 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -906,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
kfree(smart_log);
/* Get Features for Temp Threshold */
- res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0,
+ res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0,
&feature_resp);
if (res != NVME_SC_SUCCESS)
temp_c_thresh = LOG_TEMP_UNKNOWN;
@@ -1039,7 +1039,7 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
if (len < MODE_PAGE_CACHING_LEN)
return -EINVAL;
- nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0,
+ nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0,
&feature_resp);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
@@ -1328,7 +1328,7 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
case MODE_PAGE_CACHING:
dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
- dword11, 0, NULL);
+ dword11, NULL, 0, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
break;
case MODE_PAGE_CONTROL:
--
2.7.4
Powered by blists - more mailing lists