lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <D42E8924.3413A%manish.rangankar@cavium.com>
Date:   Thu, 20 Oct 2016 09:24:13 +0000
From:   "Rangankar, Manish" <Manish.Rangankar@...ium.com>
To:     Hannes Reinecke <hare@...e.de>,
        "lduncan@...e.com" <lduncan@...e.com>,
        "cleech@...hat.com" <cleech@...hat.com>
CC:     "martin.petersen@...cle.com" <martin.petersen@...cle.com>,
        "jejb@...ux.vnet.ibm.com" <jejb@...ux.vnet.ibm.com>,
        "linux-scsi@...r.kernel.org" <linux-scsi@...r.kernel.org>,
        "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
        "Mintz, Yuval" <Yuval.Mintz@...ium.com>,
        Dept-Eng QLogic Storage Upstream 
        <QLogic-Storage-Upstream@...ium.com>,
        "Javali, Nilesh" <Nilesh.Javali@...ium.com>,
        Adheer Chandravanshi <adheer.chandravanshi@...gic.com>,
        "Dupuis, Chad" <Chad.Dupuis@...ium.com>,
        "Kashyap, Saurav" <Saurav.Kashyap@...ium.com>,
        "Easi, Arun" <Arun.Easi@...ium.com>
Subject: Re: [RFC 6/6] qedi: Add support for data path.



On 19/10/16 3:54 PM, "Hannes Reinecke" <hare@...e.de> wrote:

>On 10/19/2016 07:01 AM, manish.rangankar@...ium.com wrote:
>> From: Manish Rangankar <manish.rangankar@...ium.com>
>> 
>> This patch adds support for data path and TMF handling.
>> 
>> Signed-off-by: Nilesh Javali <nilesh.javali@...ium.com>
>> Signed-off-by: Adheer Chandravanshi <adheer.chandravanshi@...gic.com>
>> Signed-off-by: Chad Dupuis <chad.dupuis@...ium.com>
>> Signed-off-by: Saurav Kashyap <saurav.kashyap@...ium.com>
>> Signed-off-by: Arun Easi <arun.easi@...ium.com>
>> Signed-off-by: Manish Rangankar <manish.rangankar@...ium.com>
>> ---
>>  drivers/scsi/qedi/qedi_fw.c    | 1282
>>++++++++++++++++++++++++++++++++++++++++
>>  drivers/scsi/qedi/qedi_gbl.h   |    6 +
>>  drivers/scsi/qedi/qedi_iscsi.c |    6 +
>>  drivers/scsi/qedi/qedi_main.c  |    4 +
>>  4 files changed, 1298 insertions(+)
>> 
>> diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
>> index a820785..af1e14d 100644
>> --- a/drivers/scsi/qedi/qedi_fw.c
>> +++ b/drivers/scsi/qedi/qedi_fw.c
>> @@ -147,6 +147,114 @@ static void qedi_process_text_resp(struct
>>qedi_ctx *qedi,
>>  	spin_unlock(&session->back_lock);
>>  }

--snipped--
>> +void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
>> +		   u16 tid, int8_t direction)
>> +{
>> +	struct qedi_io_log *io_log;
>> +	struct iscsi_conn *conn = task->conn;
>> +	struct qedi_conn *qedi_conn = conn->dd_data;
>> +	struct scsi_cmnd *sc_cmd = task->sc;
>> +	unsigned long flags;
>> +	u8 op;
>> +
>> +	spin_lock_irqsave(&qedi->io_trace_lock, flags);
>> +
>> +	io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
>> +	io_log->direction = direction;
>> +	io_log->task_id = tid;
>> +	io_log->cid = qedi_conn->iscsi_conn_id;
>> +	io_log->lun = sc_cmd->device->lun;
>> +	io_log->op = sc_cmd->cmnd[0];
>> +	op = sc_cmd->cmnd[0];
>> +
>> +	if (op == READ_10 || op == WRITE_10) {
>> +		io_log->lba[0] = sc_cmd->cmnd[2];
>> +		io_log->lba[1] = sc_cmd->cmnd[3];
>> +		io_log->lba[2] = sc_cmd->cmnd[4];
>> +		io_log->lba[3] = sc_cmd->cmnd[5];
>> +	} else {
>> +		io_log->lba[0] = 0;
>> +		io_log->lba[1] = 0;
>> +		io_log->lba[2] = 0;
>> +		io_log->lba[3] = 0;
>> +	}
>Only for READ_10 and WRITE_10? What about the other read or write
>commands?

We will add support for other scsi commands in the next revision.

>
>> +	io_log->bufflen = scsi_bufflen(sc_cmd);
>> +	io_log->sg_count = scsi_sg_count(sc_cmd);
>> +	io_log->fast_sgs = qedi->fast_sgls;
>> +	io_log->cached_sgs = qedi->cached_sgls;
>> +	io_log->slow_sgs = qedi->slow_sgls;
>> +	io_log->cached_sge = qedi->use_cached_sge;
>> +	io_log->slow_sge = qedi->use_slow_sge;
>> +	io_log->fast_sge = qedi->use_fast_sge;
>> +	io_log->result = sc_cmd->result;
>> +	io_log->jiffies = jiffies;
>> +	io_log->blk_req_cpu = smp_processor_id();
>> +
>> +	if (direction == QEDI_IO_TRACE_REQ) {
>> +		/* For requests we only care about the submission CPU */
>> +		io_log->req_cpu = smp_processor_id() % qedi->num_queues;
>> +		io_log->intr_cpu = 0;
>> +		io_log->blk_rsp_cpu = 0;
>> +	} else if (direction == QEDI_IO_TRACE_RSP) {
>> +		io_log->req_cpu = smp_processor_id() % qedi->num_queues;
>> +		io_log->intr_cpu = qedi->intr_cpu;
>> +		io_log->blk_rsp_cpu = smp_processor_id();
>> +	}
>> +
>> +	qedi->io_trace_idx++;
>> +	if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
>> +		qedi->io_trace_idx = 0;
>> +
>> +	qedi->use_cached_sge = false;
>> +	qedi->use_slow_sge = false;
>> +	qedi->use_fast_sge = false;
>> +
>> +	spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
>> +}
>> +
>> +int qedi_iscsi_send_ioreq(struct iscsi_task *task)
>> +{
>> +	struct iscsi_conn *conn = task->conn;
>> +	struct iscsi_session *session = conn->session;
>> +	struct Scsi_Host *shost =
>>iscsi_session_to_shost(session->cls_session);
>> +	struct qedi_ctx *qedi = iscsi_host_priv(shost);
>> +	struct qedi_conn *qedi_conn = conn->dd_data;
>> +	struct qedi_cmd *cmd = task->dd_data;
>> +	struct scsi_cmnd *sc = task->sc;
>> +	struct iscsi_task_context *fw_task_ctx;
>> +	struct iscsi_cached_sge_ctx *cached_sge;
>> +	struct iscsi_phys_sgl_ctx *phys_sgl;
>> +	struct iscsi_virt_sgl_ctx *virt_sgl;
>> +	struct ystorm_iscsi_task_st_ctx *yst_cxt;
>> +	struct mstorm_iscsi_task_st_ctx *mst_cxt;
>> +	struct iscsi_sgl *sgl_struct;
>> +	struct iscsi_sge *single_sge;
>> +	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
>> +	struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
>> +	enum iscsi_task_type task_type;
>> +	struct iscsi_cmd_hdr *fw_cmd;
>> +	u32 scsi_lun[2];
>> +	u16 cq_idx = smp_processor_id() % qedi->num_queues;
>> +	s16 ptu_invalidate = 0;
>> +	s16 tid = 0;
>> +	u8 num_fast_sgs;
>> +
>> +	tid = qedi_get_task_idx(qedi);
>> +	if (tid == -1)
>> +		return -ENOMEM;
>> +
>> +	qedi_iscsi_map_sg_list(cmd);
>> +
>> +	int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
>> +	fw_task_ctx =
>> +	      (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
>>tid);
>> +
>> +	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
>> +	cmd->task_id = tid;
>> +
>> +	/* Ystrom context */
>Ystrom or Ystorm?

Noted

>
>> +	fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
>> +	SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
>> +
>> +	if (sc->sc_data_direction == DMA_TO_DEVICE) {
>> +		if (conn->session->initial_r2t_en) {
>> +			fw_task_ctx->ustorm_ag_context.exp_data_acked =
>> +				min((conn->session->imm_data_en *
>> +				    conn->max_xmit_dlength),
>> +				    conn->session->first_burst);
>> +			fw_task_ctx->ustorm_ag_context.exp_data_acked =
>> +			      min(fw_task_ctx->ustorm_ag_context.exp_data_acked,
>> +				  scsi_bufflen(sc));
>> +		} else {
>> +			fw_task_ctx->ustorm_ag_context.exp_data_acked =
>> +			      min(conn->session->first_burst, scsi_bufflen(sc));
>> +		}
>> +
>> +		SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
>> +		task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
>> +	} else {
>> +		if (scsi_bufflen(sc))
>> +			SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
>> +		task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
>> +	}
>> +
>> +	fw_cmd->lun.lo = be32_to_cpu(scsi_lun[0]);
>> +	fw_cmd->lun.hi = be32_to_cpu(scsi_lun[1]);
>> +
>> +	qedi_update_itt_map(qedi, tid, task->itt);
>> +	fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
>> +	fw_cmd->expected_transfer_length = scsi_bufflen(sc);
>> +	fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
>> +	fw_cmd->opcode = hdr->opcode;
>> +	qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
>> +
>> +	/* Mstorm context */
>> +	fw_task_ctx->mstorm_st_context.sense_db.lo =
>>(u32)cmd->sense_buffer_dma;
>> +	fw_task_ctx->mstorm_st_context.sense_db.hi =
>> +					(u32)((u64)cmd->sense_buffer_dma >> 32);
>> +	fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
>> +	fw_task_ctx->mstorm_st_context.task_type = task_type;
>> +
>> +	if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
>> +		ptu_invalidate = 1;
>> +		qedi->tid_reuse_count[tid] = 0;
>> +	}
>> +	fw_task_ctx->ystorm_st_context.state.reuse_count =
>> +						     qedi->tid_reuse_count[tid];
>> +	fw_task_ctx->mstorm_st_context.reuse_count =
>> +						   qedi->tid_reuse_count[tid]++;
>> +
>> +	/* Ustrorm context */
>Ustrorm?

Noted

Thanks,
Manish R.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ