[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b2305a5aaefdd64630a6b99c7b46397ccb029fd9.camel@perches.com>
Date: Thu, 25 Jun 2020 19:57:31 -0700
From: Joe Perches <joe@...ches.com>
To: Jeff Kirsher <jeffrey.t.kirsher@...el.com>, davem@...emloft.net
Cc: Alice Michael <alice.michael@...el.com>, netdev@...r.kernel.org,
nhorman@...hat.com, sassmann@...hat.com,
Alan Brady <alan.brady@...el.com>,
Phani Burra <phani.r.burra@...el.com>,
Joshua Hay <joshua.a.hay@...el.com>,
Madhu Chittim <madhu.chittim@...el.com>,
Pavan Kumar Linga <pavan.kumar.linga@...el.com>,
Donald Skidmore <donald.c.skidmore@...el.com>,
Jesse Brandeburg <jesse.brandeburg@...el.com>,
Sridhar Samudrala <sridhar.samudrala@...el.com>
Subject: Re: [net-next v3 06/15] iecm: Implement mailbox functionality
On Thu, 2020-06-25 at 19:07 -0700, Jeff Kirsher wrote:
> From: Alice Michael <alice.michael@...el.com>
>
> Implement mailbox setup, take down, and commands.
[]
> diff --git a/drivers/net/ethernet/intel/iecm/iecm_controlq.c b/drivers/net/ethernet/intel/iecm/iecm_controlq.c
[]
> @@ -73,7 +142,74 @@ enum iecm_status iecm_ctlq_add(struct iecm_hw *hw,
> struct iecm_ctlq_create_info *qinfo,
> struct iecm_ctlq_info **cq)
Multiple functions using **cp and *cp can be error prone.
> {
> - /* stub */
> + enum iecm_status status = 0;
> + bool is_rxq = false;
> +
> + if (!qinfo->len || !qinfo->buf_size ||
> + qinfo->len > IECM_CTLQ_MAX_RING_SIZE ||
> + qinfo->buf_size > IECM_CTLQ_MAX_BUF_LEN)
> + return IECM_ERR_CFG;
> +
> + *cq = kcalloc(1, sizeof(struct iecm_ctlq_info), GFP_KERNEL);
> + if (!(*cq))
> + return IECM_ERR_NO_MEMORY;
pity there is an iecm_status and not just a generic -ENOMEM.
Is there much value in the separate enum?
[]
@@ -152,7 +388,58 @@ enum iecm_status iecm_ctlq_clean_sq(struct iecm_ctlq_info *cq,
> u16 *clean_count,
> struct iecm_ctlq_msg *msg_status[])
> {
> - /* stub */
> + enum iecm_status ret = 0;
> + struct iecm_ctlq_desc *desc;
> + u16 i = 0, num_to_clean;
> + u16 ntc, desc_err;
> +
> + if (!cq || !cq->ring_size)
> + return IECM_ERR_CTLQ_EMPTY;
> +
> + if (*clean_count == 0)
> + return 0;
> + else if (*clean_count > cq->ring_size)
> + return IECM_ERR_PARAM;
unnecessary else
> +
> + mutex_lock(&cq->cq_lock);
> +
> + ntc = cq->next_to_clean;
> +
> + num_to_clean = *clean_count;
> +
> + for (i = 0; i < num_to_clean; i++) {
> + /* Fetch next descriptor and check if marked as done */
> + desc = IECM_CTLQ_DESC(cq, ntc);
> + if (!(le16_to_cpu(desc->flags) & IECM_CTLQ_FLAG_DD))
> + break;
> +
> + desc_err = le16_to_cpu(desc->ret_val);
> + if (desc_err) {
> + /* strip off FW internal code */
> + desc_err &= 0xff;
> + }
> +
> + msg_status[i] = cq->bi.tx_msg[ntc];
> + msg_status[i]->status = desc_err;
> +
> + cq->bi.tx_msg[ntc] = NULL;
> +
> + /* Zero out any stale data */
> + memset(desc, 0, sizeof(*desc));
> +
> + ntc++;
> + if (ntc == cq->ring_size)
> + ntc = 0;
> + }
> +
> + cq->next_to_clean = ntc;
> +
> + mutex_unlock(&cq->cq_lock);
> +
> + /* Return number of descriptors actually cleaned */
> + *clean_count = i;
> +
> + return ret;
> }
>
> /**
> @@ -175,7 +462,111 @@ enum iecm_status iecm_ctlq_post_rx_buffs(struct iecm_hw *hw,
> u16 *buff_count,
> struct iecm_dma_mem **buffs)
> {
> - /* stub */
> + enum iecm_status status = 0;
> + struct iecm_ctlq_desc *desc;
> + u16 ntp = cq->next_to_post;
> + bool buffs_avail = false;
> + u16 tbp = ntp + 1;
> + int i = 0;
> +
> + if (*buff_count > cq->ring_size)
> + return IECM_ERR_PARAM;
> +
> + if (*buff_count > 0)
> + buffs_avail = true;
> +
> + mutex_lock(&cq->cq_lock);
> +
> + if (tbp >= cq->ring_size)
> + tbp = 0;
> +
> + if (tbp == cq->next_to_clean)
> + /* Nothing to do */
> + goto post_buffs_out;
> +
> + /* Post buffers for as many as provided or up until the last one used */
> + while (ntp != cq->next_to_clean) {
> + desc = IECM_CTLQ_DESC(cq, ntp);
> +
> + if (!cq->bi.rx_buff[ntp]) {
if (cq->bi.rx_buff[ntp])
continue;
and unindent?
> + if (!buffs_avail) {
> + /* If the caller hasn't given us any buffers or
> + * there are none left, search the ring itself
> + * for an available buffer to move to this
> + * entry starting at the next entry in the ring
> + */
> + tbp = ntp + 1;
> +
> + /* Wrap ring if necessary */
> + if (tbp >= cq->ring_size)
> + tbp = 0;
> +
> + while (tbp != cq->next_to_clean) {
> + if (cq->bi.rx_buff[tbp]) {
> + cq->bi.rx_buff[ntp] =
> + cq->bi.rx_buff[tbp];
> + cq->bi.rx_buff[tbp] = NULL;
> +
> + /* Found a buffer, no need to
> + * search anymore
> + */
> + break;
> + }
> +
> + /* Wrap ring if necessary */
> + tbp++;
> + if (tbp >= cq->ring_size)
> + tbp = 0;
> + }
> +
> + if (tbp == cq->next_to_clean)
> + goto post_buffs_out;
> + } else {
> + /* Give back pointer to DMA buffer */
> + cq->bi.rx_buff[ntp] = buffs[i];
> + i++;
> +
> + if (i >= *buff_count)
> + buffs_avail = false;
> + }
> + }
> +
> + desc->flags =
> + cpu_to_le16(IECM_CTLQ_FLAG_BUF | IECM_CTLQ_FLAG_RD);
> +
> + /* Post buffers to descriptor */
> + desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
> + desc->params.indirect.addr_high =
> + cpu_to_le32(IECM_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
> + desc->params.indirect.addr_low =
> + cpu_to_le32(IECM_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
> +
> + ntp++;
> + if (ntp == cq->ring_size)
> + ntp = 0;
> + }
[]
> @@ -186,7 +555,27 @@ iecm_wait_for_event(struct iecm_adapter *adapter,
> enum iecm_vport_vc_state state,
> enum iecm_vport_vc_state err_check)
> {
> - /* stub */
> + enum iecm_status status;
> + int event;
> +
> + event = wait_event_timeout(adapter->vchnl_wq,
> + test_and_clear_bit(state, adapter->vc_state),
> + msecs_to_jiffies(500));
> + if (event) {
> + if (test_and_clear_bit(err_check, adapter->vc_state)) {
> + dev_err(&adapter->pdev->dev,
> + "VC response error %d", err_check);
missing format terminating newlines
> + status = IECM_ERR_CFG;
> + } else {
> + status = 0;
> + }
> + } else {
> + /* Timeout occurred */
> + status = IECM_ERR_NOT_READY;
> + dev_err(&adapter->pdev->dev, "VC timeout, state = %u", state);
here too
Powered by blists - more mailing lists