lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1532023084-28083-21-git-send-email-pawell@cadence.com>
Date:   Thu, 19 Jul 2018 18:57:53 +0100
From:   Pawel Laszczak <pawell@...ence.com>
To:     unlisted-recipients:; (no To-header on input)
CC:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        <linux-usb@...r.kernel.org>, Felipe Balbi <balbi@...nel.org>,
        <linux-kernel@...r.kernel.org>, <ltyrala@...ence.com>,
        <adouglas@...ence.com>, <pawell@...ence.com>
Subject: [PATCH 20/31] usb: usbssp: added queuing procedure for control transfer.

Patch implements generic use in driver usbssp_enqueue function.
All requests queuing in driver must be send with it help.

It also adds specific for control transfer usbssp_queue_ctrl_tx function
that prepares TRB, adds them to EP0 transfer ring and set doorbell.

Signed-off-by: Pawel Laszczak <pawell@...ence.com>
---
 drivers/usb/usbssp/gadget-ring.c | 272 +++++++++++++++++++++++++++++++
 drivers/usb/usbssp/gadget.c      | 115 ++++++++++++-
 drivers/usb/usbssp/gadget.h      |  29 ++++
 3 files changed, 414 insertions(+), 2 deletions(-)

diff --git a/drivers/usb/usbssp/gadget-ring.c b/drivers/usb/usbssp/gadget-ring.c
index 1b57f5180115..a0cfce0dc49d 100644
--- a/drivers/usb/usbssp/gadget-ring.c
+++ b/drivers/usb/usbssp/gadget-ring.c
@@ -310,6 +310,43 @@ static void ring_doorbell_for_active_rings(struct usbssp_udc *usbssp_data,
 	}
 }
 
+/* Get the right ring for the given ep_index and stream_id.
+ * If the endpoint supports streams, boundary check the USB request's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+struct usbssp_ring *usbssp_triad_to_transfer_ring(struct usbssp_udc *usbssp_data,
+						  unsigned int ep_index,
+						  unsigned int stream_id)
+{
+	struct usbssp_ep *ep;
+
+	ep = &usbssp_data->devs.eps[ep_index];
+
+	/* Common case: no streams */
+	if (!(ep->ep_state & EP_HAS_STREAMS))
+		return ep->ring;
+
+	if (stream_id == 0) {
+		dev_warn(usbssp_data->dev,
+			"WARN: ep index %u has streams, "
+			"but USB Request has no stream ID.\n",
+			 ep_index);
+		return NULL;
+	}
+
+	if (stream_id < ep->stream_info->num_streams)
+		return ep->stream_info->stream_rings[stream_id];
+
+	dev_warn(usbssp_data->dev,
+		"WARN: ep index %u has "
+		"stream IDs 1 to %u allocated, "
+		"but stream ID %u is requested.\n",
+		ep_index,
+		ep->stream_info->num_streams - 1,
+		stream_id);
+	return NULL;
+}
+
 /* Must be called with usbssp_data->lock held in interrupt context
  * or usbssp_data->irq_thread_lock from thread conext (defered interrupt)
  */
@@ -1494,6 +1531,232 @@ static int prepare_ring(struct usbssp_udc *usbssp_data,
 	return 0;
 }
 
+static int prepare_transfer(struct usbssp_udc *usbssp_data,
+			    struct usbssp_device *dev_priv,
+			    unsigned int ep_index,
+			    unsigned int stream_id,
+			    unsigned int num_trbs,
+			    struct usbssp_request *req_priv,
+			    unsigned int td_index,
+			    gfp_t mem_flags)
+{
+	int ret;
+	struct usbssp_td *td;
+	struct usbssp_ring *ep_ring;
+	struct usbssp_ep_ctx *ep_ctx = usbssp_get_ep_ctx(usbssp_data,
+					dev_priv->out_ctx, ep_index);
+
+	ep_ring = usbssp_stream_id_to_ring(dev_priv, ep_index, stream_id);
+
+	if (!ep_ring) {
+		dev_dbg(usbssp_data->dev,
+			"Can't prepare ring for bad stream ID %u\n",
+			stream_id);
+		return -EINVAL;
+	}
+
+	ret = prepare_ring(usbssp_data, ep_ring, GET_EP_CTX_STATE(ep_ctx),
+			num_trbs, mem_flags);
+
+	if (ret)
+		return ret;
+
+	td = &req_priv->td[td_index];
+	INIT_LIST_HEAD(&td->td_list);
+
+	td->priv_request = req_priv;
+	/* Add this TD to the tail of the endpoint ring's TD list */
+	list_add_tail(&td->td_list, &ep_ring->td_list);
+	td->start_seg = ep_ring->enq_seg;
+	td->first_trb = ep_ring->enqueue;
+
+	return 0;
+}
+
+/*
+ * USBSSP uses normal TRBs for both bulk and interrupt. When the interrupt
+ * endpoint is to be serviced, the DC will consume (at most) one TD. A TD
+ * (comprised of sg list entries) can take several service intervals to
+ * transmit.
+ */
+int usbssp_queue_intr_tx(struct usbssp_udc *usbssp_data, gfp_t mem_flags,
+			 struct usbssp_request *req_priv, unsigned int ep_index)
+{
+	struct usbssp_ep_ctx *ep_ctx;
+
+	ep_ctx = usbssp_get_ep_ctx(usbssp_data, usbssp_data->devs.out_ctx,
+				ep_index);
+
+	return usbssp_queue_bulk_tx(usbssp_data, mem_flags, req_priv, ep_index);
+}
+
+/*
+ * For USBSSP controllers, TD size is the number of max packet sized
+ * packets remaining in the TD (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+ *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+ *
+ * TD size = total_packet_count - packets_transferred
+ *
+ * For USBSSP it must fit in bits 21:17, so it can't be bigger than 31.
+ * This is taken care of in the TRB_TD_SIZE() macro
+ *
+ * The last TRB in a TD must have the TD size set to zero.
+ */
+static u32 usbssp_td_remainder(struct usbssp_udc *usbssp_data,
+			       int transferred,
+			       int trb_buff_len,
+			       unsigned int td_total_len,
+			       struct usbssp_request *req_priv,
+			       bool more_trbs_coming)
+{
+	u32 maxp, total_packet_count;
+
+	/* One TRB with a zero-length data packet. */
+	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
+	    trb_buff_len == td_total_len)
+		return 0;
+
+	maxp = usb_endpoint_maxp(req_priv->dep->endpoint.desc);
+	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
+
+	/* Queuing functions don't count the current TRB into transferred */
+	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
+}
+
+int usbssp_queue_bulk_tx(struct usbssp_udc *usbssp_data,
+			 gfp_t mem_flags,
+			 struct usbssp_request *req_priv,
+			 unsigned int ep_index)
+{
+	/*TODO: function musb be implemented*/
+	return 0;
+}
+
+int usbssp_queue_ctrl_tx(struct usbssp_udc *usbssp_data,
+			 gfp_t mem_flags,
+			 struct usbssp_request *req_priv,
+			 unsigned int ep_index)
+{
+	struct usbssp_ring *ep_ring;
+	int num_trbs;
+	int ret;
+	struct usbssp_generic_trb *start_trb;
+	int start_cycle;
+	u32 field, length_field, remainder;
+	struct usbssp_td *td;
+	struct usbssp_ep *dep = req_priv->dep;
+
+	ep_ring = usbssp_request_to_transfer_ring(usbssp_data, req_priv);
+	if (!ep_ring)
+		return -EINVAL;
+
+	if (usbssp_data->delayed_status) {
+		dev_dbg(usbssp_data->dev, "Queue CTRL: delayed finished\n");
+		usbssp_data->delayed_status = false;
+		usb_gadget_set_state(&usbssp_data->gadget,
+				USB_STATE_CONFIGURED);
+	}
+
+	/* 1 TRB for data, 1 for status */
+	if (usbssp_data->three_stage_setup)
+		num_trbs = 2;
+	else
+		num_trbs = 1;
+
+	ret = prepare_transfer(usbssp_data, &usbssp_data->devs,
+			req_priv->epnum, req_priv->request.stream_id,
+			num_trbs, req_priv, 0, mem_flags);
+
+	if (ret < 0)
+		return ret;
+
+	td = &req_priv->td[0];
+
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs. The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ep_ring->enqueue->generic;
+	start_cycle = ep_ring->cycle_state;
+
+	/* If there's data, queue data TRBs */
+	/* Only set interrupt on short packet for OUT endpoints */
+
+	if (usbssp_data->ep0_expect_in)
+		field = TRB_TYPE(TRB_DATA) | TRB_IOC;
+	else
+		field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
+
+	if (req_priv->request.length > 0) {
+		remainder = usbssp_td_remainder(usbssp_data, 0,
+						req_priv->request.length,
+						req_priv->request.length,
+						req_priv, 1);
+
+		length_field = TRB_LEN(req_priv->request.length) |
+			TRB_TD_SIZE(remainder) |
+			TRB_INTR_TARGET(0);
+
+		if (usbssp_data->ep0_expect_in)
+			field |= TRB_DIR_IN;
+
+		queue_trb(usbssp_data, ep_ring, true,
+			lower_32_bits(req_priv->request.dma),
+			upper_32_bits(req_priv->request.dma),
+			length_field,
+			field | ep_ring->cycle_state |
+			TRB_SETUPID(usbssp_data->setupId) |
+			usbssp_data->setup_speed);
+		usbssp_data->ep0state = USBSSP_EP0_DATA_PHASE;
+	}
+
+	/* Save the DMA address of the last TRB in the TD */
+	td->last_trb = ep_ring->enqueue;
+
+	/* Queue status TRB*/
+
+	if (req_priv->request.length > 0 && usbssp_data->ep0_expect_in)
+		field = TRB_DIR_IN;
+	else
+		field = 0;
+
+	if (req_priv->request.length == 0)
+		field |= ep_ring->cycle_state;
+	else
+		field |= (ep_ring->cycle_state ^ 1);
+
+	if (dep->ep_state & EP0_HALTED_STATUS) {
+		/*
+		 * If endpoint should be halted in Status Stage then
+		 * driver shall set TRB_SETUPSTAT_STALL bit
+		 */
+		dev_dbg(usbssp_data->dev,
+			"Status Stage phase prepared with STALL bit\n");
+		dep->ep_state &= ~EP0_HALTED_STATUS;
+		field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
+	} else {
+		field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
+	}
+
+	queue_trb(usbssp_data, ep_ring, false,
+		0,
+		0,
+		TRB_INTR_TARGET(0),
+		/* Event on completion */
+		field | TRB_IOC | TRB_SETUPID(usbssp_data->setupId) |
+		TRB_TYPE(TRB_STATUS) | usbssp_data->setup_speed);
+
+	usbssp_ring_ep_doorbell(usbssp_data, ep_index,
+				req_priv->request.stream_id);
+	return 0;
+}
+
 /* Stop endpoint after disconnecting device.*/
 int usbssp_cmd_stop_ep(struct usbssp_udc *usbssp_data, struct usb_gadget *g,
 		       struct usbssp_ep *ep_priv)
@@ -1555,6 +1818,15 @@ int usbssp_cmd_stop_ep(struct usbssp_udc *usbssp_data, struct usb_gadget *g,
 	return ret;
 }
 
+int usbssp_queue_isoc_tx_prepare(struct usbssp_udc *usbssp_data,
+				 gfp_t mem_flags,
+				 struct usbssp_request *req_priv,
+				 unsigned int ep_index)
+{
+	/*TODO: function must be implemented*/
+	return 0;
+}
+
 /****		Command Ring Operations		****/
 /*
  * Generic function for queueing a command TRB on the command ring.
diff --git a/drivers/usb/usbssp/gadget.c b/drivers/usb/usbssp/gadget.c
index 2fd6d27ef0bd..e2751693404d 100644
--- a/drivers/usb/usbssp/gadget.c
+++ b/drivers/usb/usbssp/gadget.c
@@ -401,8 +401,119 @@ static int usbssp_check_args(struct usbssp_udc *usbssp_data,
 
 int usbssp_enqueue(struct usbssp_ep *dep, struct usbssp_request *req_priv)
 {
-	/*TODO: this function must be implemented*/
-	return 0;
+	int ret = 0;
+	unsigned int ep_index;
+	unsigned int ep_state;
+	const struct usb_endpoint_descriptor *desc;
+	struct usbssp_udc *usbssp_data = dep->usbssp_data;
+	int num_tds;
+
+	if (usbssp_check_args(usbssp_data, dep, true, true, __func__) <= 0)
+		return -EINVAL;
+
+	if (!dep->endpoint.desc) {
+		dev_err(usbssp_data->dev, "%s: can't queue to disabled endpoint\n",
+			dep->name);
+		return -ESHUTDOWN;
+	}
+
+	if (WARN(req_priv->dep != dep, "request %p belongs to '%s'\n",
+	    &req_priv->request, req_priv->dep->name)) {
+		dev_err(usbssp_data->dev, "%s: reequest %p belongs to '%s'\n",
+			dep->name, &req_priv->request, req_priv->dep->name);
+		return -EINVAL;
+	}
+
+	if (!list_empty(&dep->pending_list) && req_priv->epnum == 0) {
+		dev_warn(usbssp_data->dev,
+			"Ep0 has incomplete previous transfer'\n");
+		return -EBUSY;
+	}
+
+	//pm_runtime_get(usbssp_data->dev);
+	req_priv->request.actual = 0;
+	req_priv->request.status = -EINPROGRESS;
+	req_priv->direction = dep->direction;
+	req_priv->epnum = dep->number;
+
+	desc = req_priv->dep->endpoint.desc;
+	ep_index = usbssp_get_endpoint_index(desc);
+	ep_state = usbssp_data->devs.eps[ep_index].ep_state;
+	req_priv->sg = req_priv->request.sg;
+
+	req_priv->num_pending_sgs = req_priv->request.num_mapped_sgs;
+	dev_info(usbssp_data->dev, "SG list addr: %p with %d elements.\n",
+		req_priv->sg, req_priv->num_pending_sgs);
+
+	list_add_tail(&req_priv->list, &dep->pending_list);
+
+	if (req_priv->num_pending_sgs > 0)
+		num_tds = req_priv->num_pending_sgs;
+	else
+		num_tds = 1;
+
+	if (req_priv->request.zero && req_priv->request.length &&
+	   (req_priv->request.length & (dep->endpoint.maxpacket == 0))) {
+		num_tds++;
+	}
+
+	ret = usb_gadget_map_request_by_dev(usbssp_data->dev,
+					&req_priv->request,
+					dep->direction);
+
+	if (ret) {
+		dev_err(usbssp_data->dev, "Can't map request to DMA\n");
+		goto req_del;
+	}
+
+	/*allocating memory for transfer descriptors*/
+	req_priv->td = kzalloc(num_tds * sizeof(struct usbssp_td), GFP_ATOMIC);
+
+	if (!req_priv->td) {
+		ret = -ENOMEM;
+		goto free_priv;
+	}
+
+	if (ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
+		dev_warn(usbssp_data->dev, "WARN: Can't enqueue USB Request, "
+			"ep in streams transition state %x\n",
+			ep_state);
+		ret = -EINVAL;
+		goto free_priv;
+	}
+
+	req_priv->num_tds = num_tds;
+	req_priv->num_tds_done = 0;
+	trace_usbssp_request_enqueue(&req_priv->request);
+
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		ret = usbssp_queue_ctrl_tx(usbssp_data, GFP_ATOMIC, req_priv,
+					ep_index);
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		ret = usbssp_queue_bulk_tx(usbssp_data, GFP_ATOMIC, req_priv,
+					ep_index);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		ret = usbssp_queue_intr_tx(usbssp_data, GFP_ATOMIC, req_priv,
+					ep_index);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		ret = usbssp_queue_isoc_tx_prepare(usbssp_data, GFP_ATOMIC,
+						req_priv, ep_index);
+	}
+
+	if (ret < 0) {
+free_priv:
+		usb_gadget_unmap_request_by_dev(usbssp_data->dev,
+					&req_priv->request, dep->direction);
+		usbssp_request_free_priv(req_priv);
+
+req_del:
+		list_del(&req_priv->list);
+	}
+	return ret;
 }
 
 /*
diff --git a/drivers/usb/usbssp/gadget.h b/drivers/usb/usbssp/gadget.h
index d582c9dbe6b4..0477eb0f354c 100644
--- a/drivers/usb/usbssp/gadget.h
+++ b/drivers/usb/usbssp/gadget.h
@@ -1752,6 +1752,20 @@ int usbssp_queue_address_device(struct usbssp_udc *usbssp_data,
 int usbssp_queue_stop_endpoint(struct usbssp_udc *usbssp_data,
 			struct usbssp_command *cmd,
 			unsigned int ep_index, int suspend);
+int usbssp_queue_ctrl_tx(struct usbssp_udc *usbssp_data, gfp_t mem_flags,
+			 struct usbssp_request *req_priv,
+			 unsigned int ep_index);
+
+int usbssp_queue_bulk_tx(struct usbssp_udc *usbssp_data, gfp_t mem_flags,
+			struct usbssp_request *req_priv,
+			unsigned int ep_index);
+int usbssp_queue_intr_tx(struct usbssp_udc *usbssp_data, gfp_t mem_flags,
+			struct usbssp_request *req_priv,
+			unsigned int ep_index);
+int usbssp_queue_isoc_tx_prepare(
+			struct usbssp_udc *usbssp_data, gfp_t mem_flags,
+			struct usbssp_request *req_priv,
+			unsigned int ep_index);
 int usbssp_queue_reset_ep(struct usbssp_udc *usbssp_data,
 			struct usbssp_command *cmd,
 			unsigned int ep_index,
@@ -1784,6 +1798,10 @@ struct usbssp_slot_ctx *usbssp_get_slot_ctx(struct usbssp_udc *usbssp_data,
 struct usbssp_ep_ctx *usbssp_get_ep_ctx(struct usbssp_udc *usbssp_data,
 					struct usbssp_container_ctx *ctx,
 					unsigned int ep_index);
+struct usbssp_ring *usbssp_triad_to_transfer_ring(
+						struct usbssp_udc *usbssp_data,
+						unsigned int ep_index,
+						unsigned int stream_id);
 /* USBSSP gadget interface*/
 void usbssp_suspend_gadget(struct usbssp_udc *usbssp_data);
 void usbssp_resume_gadget(struct usbssp_udc *usbssp_data);
@@ -1807,6 +1825,17 @@ int usbssp_setup_analyze(struct usbssp_udc *usbssp_data);
 int usbssp_status_stage(struct usbssp_udc *usbssp_data);
 
 int usbssp_reset_device(struct usbssp_udc *usbssp_data);
+
+static inline struct usbssp_ring *usbssp_request_to_transfer_ring(
+					struct usbssp_udc *usbssp_data,
+					struct usbssp_request *req_priv)
+{
+	return usbssp_triad_to_transfer_ring(usbssp_data,
+			usbssp_get_endpoint_index(req_priv->dep->endpoint.desc),
+			req_priv->request.stream_id);
+}
+
+
 static inline char *usbssp_slot_state_string(u32 state)
 {
 	switch (state) {
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ