lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1532023084-28083-22-git-send-email-pawell@cadence.com>
Date:   Thu, 19 Jul 2018 18:57:54 +0100
From:   Pawel Laszczak <pawell@...ence.com>
To:     unlisted-recipients:; (no To-header on input)
CC:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        <linux-usb@...r.kernel.org>, Felipe Balbi <balbi@...nel.org>,
        <linux-kernel@...r.kernel.org>, <ltyrala@...ence.com>,
        <adouglas@...ence.com>, <pawell@...ence.com>
Subject: [PATCH 21/31] usb: usbssp: added queuing procedure for BULK and INT transfer.

Patch adds usbssp_queue_bulk_tx and usbssp_queue_int_tx function
that prepares TD, adds TD to transfer ring and arms the transfer.

Signed-off-by: Pawel Laszczak <pawell@...ence.com>
---
 drivers/usb/usbssp/gadget-ring.c | 286 ++++++++++++++++++++++++++++++-
 1 file changed, 285 insertions(+), 1 deletion(-)

diff --git a/drivers/usb/usbssp/gadget-ring.c b/drivers/usb/usbssp/gadget-ring.c
index a0cfce0dc49d..4bb13f9e311a 100644
--- a/drivers/usb/usbssp/gadget-ring.c
+++ b/drivers/usb/usbssp/gadget-ring.c
@@ -1573,6 +1573,74 @@ static int prepare_transfer(struct usbssp_udc *usbssp_data,
 	return 0;
 }
 
+unsigned int count_trbs(u64 addr, u64 len)
+{
+	unsigned int num_trbs;
+
+	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+				TRB_MAX_BUFF_SIZE);
+	if (num_trbs == 0)
+		num_trbs++;
+
+	return num_trbs;
+}
+
+static inline unsigned int count_trbs_needed(struct usbssp_request *req_priv)
+{
+	return count_trbs(req_priv->request.dma, req_priv->request.length);
+}
+
+static unsigned int count_sg_trbs_needed(struct usbssp_request *req_priv)
+{
+	struct scatterlist *sg;
+	unsigned int i, len, full_len, num_trbs = 0;
+
+	full_len = req_priv->request.length;
+
+	for_each_sg(req_priv->sg, sg, req_priv->num_pending_sgs, i) {
+		len = sg_dma_len(sg);
+		num_trbs += count_trbs(sg_dma_address(sg), len);
+		len = min_t(unsigned int, len, full_len);
+		full_len -= len;
+		if (full_len == 0)
+			break;
+	}
+
+	return num_trbs;
+}
+
+static void check_trb_math(struct usbssp_request *req_priv, int running_total)
+{
+	if (unlikely(running_total != req_priv->request.length))
+		dev_err(req_priv->dep->usbssp_data->dev,
+			"%s - ep %#x - Miscalculated tx length, "
+			"queued %#x (%d), asked for %#x (%d)\n",
+			__func__,
+			req_priv->dep->endpoint.desc->bEndpointAddress,
+			running_total, running_total,
+			req_priv->request.length,
+			req_priv->request.length);
+}
+
+static void giveback_first_trb(struct usbssp_udc *usbssp_data,
+			       unsigned int ep_index,
+			       unsigned int stream_id,
+			       int start_cycle,
+			       struct usbssp_generic_trb *start_trb)
+{
+	/*
+	 * Pass all the TRBs to the hardware at once and make sure this write
+	 * isn't reordered.
+	 */
+	wmb();
+	if (start_cycle)
+		start_trb->field[3] |= cpu_to_le32(start_cycle);
+	else
+		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+	usbssp_ring_ep_doorbell(usbssp_data, ep_index, stream_id);
+}
+
 /*
  * USBSSP uses normal TRBs for both bulk and interrupt. When the interrupt
  * endpoint is to be serviced, the DC will consume (at most) one TD. A TD
@@ -1628,12 +1696,228 @@ static u32 usbssp_td_remainder(struct usbssp_udc *usbssp_data,
 	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
 }
 
+static int usbssp_align_td(struct usbssp_udc *usbssp_data,
+			   struct usbssp_request *req_priv, u32 enqd_len,
+			   u32 *trb_buff_len, struct usbssp_segment *seg)
+{
+	struct device *dev = usbssp_data->dev;
+	unsigned int unalign;
+	unsigned int max_pkt;
+	u32 new_buff_len;
+
+	max_pkt = GET_MAX_PACKET(
+			usb_endpoint_maxp(req_priv->dep->endpoint.desc));
+	unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+	/* we got lucky, last normal TRB data on segment is packet aligned */
+	if (unalign == 0)
+		return 0;
+
+	dev_dbg(usbssp_data->dev, "Unaligned %d bytes, buff len %d\n",
+		unalign, *trb_buff_len);
+
+	/* is the last nornal TRB alignable by splitting it */
+	if (*trb_buff_len > unalign) {
+		*trb_buff_len -= unalign;
+		dev_dbg(usbssp_data->dev, "split align, new buff len %d\n",
+			*trb_buff_len);
+		return 0;
+	}
+
+	/*
+	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
+	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+	 */
+	new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+	if (new_buff_len > (req_priv->request.length - enqd_len))
+		new_buff_len = (req_priv->request.length - enqd_len);
+
+	/* create a max max_pkt sized bounce buffer pointed to by last trb */
+	if (req_priv->direction) {
+		sg_pcopy_to_buffer(req_priv->request.sg,
+				req_priv->request.num_mapped_sgs,
+				seg->bounce_buf, new_buff_len, enqd_len);
+		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+						max_pkt, DMA_TO_DEVICE);
+	} else {
+		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+						max_pkt, DMA_FROM_DEVICE);
+	}
+
+	if (dma_mapping_error(dev, seg->bounce_dma)) {
+		/* try without aligning.*/
+		dev_warn(usbssp_data->dev,
+			"Failed mapping bounce buffer, not aligning\n");
+		return 0;
+	}
+	*trb_buff_len = new_buff_len;
+	seg->bounce_len = new_buff_len;
+	seg->bounce_offs = enqd_len;
+
+	dev_dbg(usbssp_data->dev, "Bounce align, new buff len %d\n",
+			*trb_buff_len);
+
+	return 1;
+}
+
+
 int usbssp_queue_bulk_tx(struct usbssp_udc *usbssp_data,
 			 gfp_t mem_flags,
 			 struct usbssp_request *req_priv,
 			 unsigned int ep_index)
 {
-	/*TODO: function musb be implemented*/
+	struct usbssp_ring *ring;
+	struct usbssp_td *td;
+	struct usbssp_generic_trb *start_trb;
+	struct scatterlist *sg = NULL;
+	bool more_trbs_coming = true;
+	bool need_zero_pkt = false;
+	bool first_trb = true;
+	unsigned int num_trbs;
+	unsigned int start_cycle, num_sgs = 0;
+	unsigned int enqd_len, block_len, trb_buff_len, full_len;
+	int sent_len, ret;
+	u32 field, length_field, remainder;
+	u64 addr, send_addr;
+
+	ring = usbssp_request_to_transfer_ring(usbssp_data, req_priv);
+	if (!ring)
+		return -EINVAL;
+
+	full_len = req_priv->request.length;
+	/* If we have scatter/gather list, we use it. */
+	if (req_priv->request.num_sgs) {
+		num_sgs = req_priv->num_pending_sgs;
+		sg = req_priv->sg;
+		addr = (u64) sg_dma_address(sg);
+		block_len = sg_dma_len(sg);
+		num_trbs = count_sg_trbs_needed(req_priv);
+	} else {
+		num_trbs = count_trbs_needed(req_priv);
+		addr = (u64) req_priv->request.dma;
+		block_len = full_len;
+	}
+
+	ret = prepare_transfer(usbssp_data, &usbssp_data->devs,
+			ep_index, req_priv->request.stream_id,
+			num_trbs, req_priv, 0, mem_flags);
+	if (unlikely(ret < 0))
+		return ret;
+
+	/* Deal with request.zero - need one more td/trb */
+	if (req_priv->request.zero && req_priv->num_tds_done > 1)
+		need_zero_pkt = true;
+
+	td = &req_priv->td[0];
+
+	dev_dbg(usbssp_data->dev, "Queue Bulk transfer to %s - ep_index: %d,"
+		" num trb: %d, block len %d, nzp: %d\n",
+		req_priv->dep->name, ep_index,
+		num_trbs, block_len, need_zero_pkt);
+
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs. The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ring->enqueue->generic;
+	start_cycle = ring->cycle_state;
+	send_addr = addr;
+
+	/* Queue the TRBs, even if they are zero-length */
+	for (enqd_len = 0; first_trb || enqd_len < full_len;
+	     enqd_len += trb_buff_len) {
+		field = TRB_TYPE(TRB_NORMAL);
+
+		/* TRB buffer should not cross 64KB boundaries */
+		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+		trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
+
+		if (enqd_len + trb_buff_len > full_len)
+			trb_buff_len = full_len - enqd_len;
+
+		/* Don't change the cycle bit of the first TRB until later */
+		if (first_trb) {
+			first_trb = false;
+			if (start_cycle == 0)
+				field |= TRB_CYCLE;
+		} else
+			field |= ring->cycle_state;
+
+		/* Chain all the TRBs together; clear the chain bit in the last
+		 * TRB to indicate it's the last TRB in the chain.
+		 */
+		if (enqd_len + trb_buff_len < full_len) {
+			field |= TRB_CHAIN;
+			if (trb_is_link(ring->enqueue + 1)) {
+				if (usbssp_align_td(usbssp_data, req_priv,
+					enqd_len, &trb_buff_len,
+					ring->enq_seg)) {
+					send_addr = ring->enq_seg->bounce_dma;
+					/* assuming TD won't span 2 segs */
+					td->bounce_seg = ring->enq_seg;
+				}
+			}
+		}
+		if (enqd_len + trb_buff_len >= full_len) {
+			field &= ~TRB_CHAIN;
+			field |= TRB_IOC;
+			more_trbs_coming = false;
+			td->last_trb = ring->enqueue;
+		}
+
+		/* Only set interrupt on short packet for OUT endpoints */
+		if (!req_priv->direction)
+			field |= TRB_ISP;
+
+		/* Set the TRB length, TD size, and interrupter fields. */
+		remainder = usbssp_td_remainder(usbssp_data, enqd_len,
+						trb_buff_len, full_len, req_priv,
+						more_trbs_coming);
+
+		length_field = TRB_LEN(trb_buff_len) |
+			TRB_TD_SIZE(remainder) |
+			TRB_INTR_TARGET(0);
+
+		queue_trb(usbssp_data, ring, more_trbs_coming | need_zero_pkt,
+			lower_32_bits(send_addr),
+			upper_32_bits(send_addr),
+			length_field,
+			field);
+
+		addr += trb_buff_len;
+		sent_len = trb_buff_len;
+
+		while (sg && sent_len >= block_len) {
+			/* New sg entry */
+			--num_sgs;
+			sent_len -= block_len;
+			if (num_sgs != 0) {
+				sg = sg_next(sg);
+				block_len = sg_dma_len(sg);
+				addr = (u64) sg_dma_address(sg);
+				addr += sent_len;
+			}
+		}
+		block_len -= sent_len;
+		send_addr = addr;
+	}
+
+	if (need_zero_pkt) {
+		ret = prepare_transfer(usbssp_data, &usbssp_data->devs,
+				ep_index, req_priv->request.stream_id,
+				1, req_priv, 1, mem_flags);
+		req_priv->td[1].last_trb = ring->enqueue;
+		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
+		queue_trb(usbssp_data, ring, 0, 0, 0,
+			TRB_INTR_TARGET(0), field);
+	}
+
+	check_trb_math(req_priv, enqd_len);
+	giveback_first_trb(usbssp_data, ep_index, req_priv->request.stream_id,
+			start_cycle, start_trb);
 	return 0;
 }
 
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ