[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <767d0af3-82cc-8e16-a4c6-b0854a518cd5@linaro.org>
Date: Tue, 15 Feb 2022 16:40:08 -0600
From: Alex Elder <elder@...aro.org>
To: Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>,
mhi@...ts.linux.dev
Cc: quic_hemantk@...cinc.com, quic_bbhatt@...cinc.com,
quic_jhugo@...cinc.com, vinod.koul@...aro.org,
bjorn.andersson@...aro.org, dmitry.baryshkov@...aro.org,
quic_vbadigan@...cinc.com, quic_cang@...cinc.com,
quic_skananth@...cinc.com, linux-arm-msm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 21/25] bus: mhi: ep: Add support for reading from the
host
On 2/12/22 12:21 PM, Manivannan Sadhasivam wrote:
> Data transfer between host and the ep device happens over the transfer
> ring associated with each bi-directional channel pair. Host defines the
> transfer ring by allocating memory for it. The read and write pointer
> addresses of the transfer ring are stored in the channel context.
>
> Once host places the elements in the transfer ring, it increments the
> write pointer and rings the channel doorbell. Device will receive the
> doorbell interrupt and will process the transfer ring elements.
>
> This commit adds support for reading the transfer ring elements from
> the transfer ring till write pointer, incrementing the read pointer and
> finally sending the completion event to the host through corresponding
> event ring.
>
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>
Indentation nits mentioned.
Reviewed-by: Alex Elder <elder@...aro.org>
> ---
> drivers/bus/mhi/ep/main.c | 103 ++++++++++++++++++++++++++++++++++++++
> include/linux/mhi_ep.h | 9 ++++
> 2 files changed, 112 insertions(+)
>
> diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
> index 4c2ee517832c..b937c6cda9ba 100644
> --- a/drivers/bus/mhi/ep/main.c
> +++ b/drivers/bus/mhi/ep/main.c
> @@ -336,6 +336,109 @@ int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element
> return ret;
> }
>
> +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
> +{
> + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
> + mhi_dev->ul_chan;
> + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
> + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
> +
> + return !!(ring->rd_offset == ring->wr_offset);
> +}
> +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
> +
> +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
> + struct mhi_ep_ring *ring,
> + struct mhi_result *result,
> + u32 len)
> +{
> + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
> + size_t bytes_to_read, read_offset, write_offset;
> + struct device *dev = &mhi_cntrl->mhi_dev->dev;
> + struct mhi_ep_ring_element *el;
> + bool td_done = false;
> + void *write_to_loc;
> + u64 read_from_loc;
> + u32 buf_remaining;
> + int ret;
> +
> + buf_remaining = len;
> +
> + do {
> + /* Don't process the transfer ring if the channel is not in RUNNING state */
> + if (mhi_chan->state != MHI_CH_STATE_RUNNING)
> + return -ENODEV;
> +
> + el = &ring->ring_cache[ring->rd_offset];
> +
> + /* Check if there is data pending to be read from previous read operation */
> + if (mhi_chan->tre_bytes_left) {
> + dev_dbg(dev, "TRE bytes remaining: %d\n", mhi_chan->tre_bytes_left);
> + bytes_to_read = min(buf_remaining, mhi_chan->tre_bytes_left);
> + } else {
> + mhi_chan->tre_loc = MHI_EP_TRE_GET_PTR(el);
> + mhi_chan->tre_size = MHI_EP_TRE_GET_LEN(el);
> + mhi_chan->tre_bytes_left = mhi_chan->tre_size;
> +
> + bytes_to_read = min(buf_remaining, mhi_chan->tre_size);
> + }
> +
> + read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
> + write_offset = len - buf_remaining;
> + read_from_loc = mhi_chan->tre_loc + read_offset;
> + write_to_loc = result->buf_addr + write_offset;
> +
> + dev_dbg(dev, "Reading %zd bytes from channel (%d)\n", bytes_to_read, ring->ch_id);
> + ret = mhi_cntrl->read_from_host(mhi_cntrl, read_from_loc, write_to_loc,
> + bytes_to_read);
> + if (ret < 0)
> + return ret;
> +
> + buf_remaining -= bytes_to_read;
> + mhi_chan->tre_bytes_left -= bytes_to_read;
> +
> + /*
> + * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
> + * read completely:
> + *
> + * 1. Send completion event to the host based on the flags set in TRE.
> + * 2. Increment the local read offset of the transfer ring.
Your comments in this section explain some things that
I did not completely understand for a *very* long time.
The same flags are used in IPA, but are not as well
documented as they are for MHI.
> + */
> + if (!mhi_chan->tre_bytes_left) {
> + /*
> + * The host will split the data packet into multiple TREs if it can't fit
> + * the packet in a single TRE. In that case, CHAIN flag will be set by the
> + * host for all TREs except the last one.
> + */
> + if (MHI_EP_TRE_GET_CHAIN(el)) {
> + /*
> + * IEOB (Interrupt on End of Block) flag will be set by the host if
> + * it expects the completion event for all TREs of a TD.
> + */
> + if (MHI_EP_TRE_GET_IEOB(el))
> + mhi_ep_send_completion_event(mhi_cntrl,
> + ring, MHI_EP_TRE_GET_LEN(el), MHI_EV_CC_EOB);
Check your indentation above.
> + } else {
> + /*
> + * IEOT (Interrupt on End of Transfer) flag will be set by the host
> + * for the last TRE of the TD and expects the completion event for
> + * the same.
> + */
> + if (MHI_EP_TRE_GET_IEOT(el))
> + mhi_ep_send_completion_event(mhi_cntrl,
> + ring, MHI_EP_TRE_GET_LEN(el), MHI_EV_CC_EOT);
Indentation here too.
> + td_done = true;
> + }
> +
> + mhi_ep_ring_inc_index(ring);
> + }
> +
> + result->bytes_xferd += bytes_to_read;
> + } while (buf_remaining && !td_done);
> +
> + return 0;
> +}
> +
> static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
> {
> struct device *dev = &mhi_cntrl->mhi_dev->dev;
> diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
> index 276d29fef465..aaf4b6942037 100644
> --- a/include/linux/mhi_ep.h
> +++ b/include/linux/mhi_ep.h
> @@ -268,4 +268,13 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
> */
> void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
>
> +/**
> + * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
> + * @mhi_dev: Device associated with the channels
> + * @dir: DMA direction for the channel
> + *
> + * Return: true if the queue is empty, false otherwise.
> + */
> +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
> +
> #endif
Powered by blists - more mailing lists