[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170523200909.953043654@linuxfoundation.org>
Date: Tue, 23 May 2017 22:08:01 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
"Bryant G. Ly" <bryantly@...ux.vnet.ibm.com>,
Michael Cyr <mikecyr@...ux.vnet.ibm.com>,
Nicholas Bellinger <nab@...ux-iscsi.org>
Subject: [PATCH 4.9 064/164] ibmvscsis: Do not send aborted task response
4.9-stable review patch. If anyone has any objections, please let me know.
------------------
From: Bryant G. Ly <bryantly@...ux.vnet.ibm.com>
commit 25e78531268e9240fc594ce76587601b873d37c9 upstream.
The driver is sending a response to the actual scsi op that was
aborted by an abort task TM, while LIO is sending a response to
the abort task TM.
ibmvscsis_tgt does not send the response to the client until
release_cmd time. The reason for this was because if we did it
at queue_status time, then the client would be free to reuse the
tag for that command, but we're still using the tag until the
command is released at release_cmd time, so we chose to delay
sending the response until then. That then caused this issue, because
release_cmd is always called, even if queue_status is not.
SCSI spec says that the initiator that sends the abort task
TM NEVER gets a response to the aborted op and with the current
code it will send a response. Thus this fix will remove that response
if the CMD_T_ABORTED && !CMD_T_TAS.
Another case with a small timing window is the case where if LIO sends a
TMR_DOES_NOT_EXIST, and the release_cmd callback is called for the TMR Abort
cmd before the release_cmd for the (attemped) aborted cmd, then we need to
ensure that we send the response for the (attempted) abort cmd to the client
before we send the response for the TMR Abort cmd.
Signed-off-by: Bryant G. Ly <bryantly@...ux.vnet.ibm.com>
Signed-off-by: Michael Cyr <mikecyr@...ux.vnet.ibm.com>
Signed-off-by: Nicholas Bellinger <nab@...ux-iscsi.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 120 +++++++++++++++++++++++--------
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h | 2
2 files changed, 94 insertions(+), 28 deletions(-)
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1169,6 +1169,7 @@ static struct ibmvscsis_cmd *ibmvscsis_g
cmd = list_first_entry_or_null(&vscsi->free_cmd,
struct ibmvscsis_cmd, list);
if (cmd) {
+ cmd->flags &= ~(DELAY_SEND);
list_del(&cmd->list);
cmd->iue = iue;
cmd->type = UNSET_TYPE;
@@ -1748,45 +1749,79 @@ static void srp_snd_msg_failed(struct sc
static void ibmvscsis_send_messages(struct scsi_info *vscsi)
{
u64 msg_hi = 0;
- /* note do not attmempt to access the IU_data_ptr with this pointer
+ /* note do not attempt to access the IU_data_ptr with this pointer
* it is not valid
*/
struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
struct ibmvscsis_cmd *cmd, *nxt;
struct iu_entry *iue;
long rc = ADAPT_SUCCESS;
+ bool retry = false;
if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
- list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
- iue = cmd->iue;
-
- crq->valid = VALID_CMD_RESP_EL;
- crq->format = cmd->rsp.format;
-
- if (cmd->flags & CMD_FAST_FAIL)
- crq->status = VIOSRP_ADAPTER_FAIL;
-
- crq->IU_length = cpu_to_be16(cmd->rsp.len);
+ do {
+ retry = false;
+ list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
+ list) {
+ /*
+ * Check to make sure abort cmd gets processed
+ * prior to the abort tmr cmd
+ */
+ if (cmd->flags & DELAY_SEND)
+ continue;
- rc = h_send_crq(vscsi->dma_dev->unit_address,
- be64_to_cpu(msg_hi),
- be64_to_cpu(cmd->rsp.tag));
+ if (cmd->abort_cmd) {
+ retry = true;
+ cmd->abort_cmd->flags &= ~(DELAY_SEND);
+ }
- pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
- cmd, be64_to_cpu(cmd->rsp.tag), rc);
+ /*
+ * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
+ * the case where LIO issued a
+ * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
+ * case then we dont send a response, since it
+ * was already done.
+ */
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
+ !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi,
+ cmd);
+ } else {
+ iue = cmd->iue;
+
+ crq->valid = VALID_CMD_RESP_EL;
+ crq->format = cmd->rsp.format;
+
+ if (cmd->flags & CMD_FAST_FAIL)
+ crq->status = VIOSRP_ADAPTER_FAIL;
+
+ crq->IU_length = cpu_to_be16(cmd->rsp.len);
+
+ rc = h_send_crq(vscsi->dma_dev->unit_address,
+ be64_to_cpu(msg_hi),
+ be64_to_cpu(cmd->rsp.tag));
- /* if all ok free up the command element resources */
- if (rc == H_SUCCESS) {
- /* some movement has occurred */
- vscsi->rsp_q_timer.timer_pops = 0;
- list_del(&cmd->list);
+ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag), rc);
- ibmvscsis_free_cmd_resources(vscsi, cmd);
- } else {
- srp_snd_msg_failed(vscsi, rc);
- break;
+ /* if all ok free up the command
+ * element resources
+ */
+ if (rc == H_SUCCESS) {
+ /* some movement has occurred */
+ vscsi->rsp_q_timer.timer_pops = 0;
+ list_del(&cmd->list);
+
+ ibmvscsis_free_cmd_resources(vscsi,
+ cmd);
+ } else {
+ srp_snd_msg_failed(vscsi, rc);
+ break;
+ }
+ }
}
- }
+ } while (retry);
if (!rc) {
/*
@@ -2707,6 +2742,7 @@ static int ibmvscsis_alloc_cmds(struct s
for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
i++, cmd++) {
+ cmd->abort_cmd = NULL;
cmd->adapter = vscsi;
INIT_WORK(&cmd->work, ibmvscsis_scheduler);
list_add_tail(&cmd->list, &vscsi->free_cmd);
@@ -3578,9 +3614,20 @@ static int ibmvscsis_write_pending(struc
{
struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
struct iu_entry *iue = cmd->iue;
int rc;
+ /*
+ * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
+ * since LIO can't do anything about it, and we dont want to
+ * attempt an srp_transfer_data.
+ */
+ if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
+ pr_err("write_pending failed since: %d\n", vscsi->flags);
+ return 0;
+ }
+
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
1, 1);
if (rc) {
@@ -3659,11 +3706,28 @@ static void ibmvscsis_queue_tm_rsp(struc
struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
se_cmd);
struct scsi_info *vscsi = cmd->adapter;
+ struct ibmvscsis_cmd *cmd_itr;
+ struct iu_entry *iue = iue = cmd->iue;
+ struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
+ u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
uint len;
pr_debug("queue_tm_rsp %p, status %d\n",
se_cmd, (int)se_cmd->se_tmr_req->response);
+ if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
+ cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
+ spin_lock_bh(&vscsi->intr_lock);
+ list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
+ if (tag_to_abort == cmd_itr->se_cmd.tag) {
+ cmd_itr->abort_cmd = cmd;
+ cmd->flags |= DELAY_SEND;
+ break;
+ }
+ }
+ spin_unlock_bh(&vscsi->intr_lock);
+ }
+
srp_build_response(vscsi, cmd, &len);
cmd->rsp.format = SRP_FORMAT;
cmd->rsp.len = len;
@@ -3671,8 +3735,8 @@ static void ibmvscsis_queue_tm_rsp(struc
static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
{
- /* TBD: What (if anything) should we do here? */
- pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
+ pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
+ se_cmd, se_cmd->tag);
}
static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -167,10 +167,12 @@ struct ibmvscsis_cmd {
struct iu_rsp rsp;
struct work_struct work;
struct scsi_info *adapter;
+ struct ibmvscsis_cmd *abort_cmd;
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
u64 init_time;
#define CMD_FAST_FAIL BIT(0)
+#define DELAY_SEND BIT(1)
u32 flags;
char type;
};
Powered by blists - more mailing lists