[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251119120252.74379adb.michal.pecio@gmail.com>
Date: Wed, 19 Nov 2025 12:02:52 +0100
From: Michal Pecio <michal.pecio@...il.com>
To: Mathias Nyman <mathias.nyman@...el.com>, Greg Kroah-Hartman
<gregkh@...uxfoundation.org>
Cc: linux-usb@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 1/5] usb: xhci: Track transfer ring dequeue progress
properly
xHCI 4.9.2 has clear requirements regarding transfer ring management:
Software uses the Dequeue Pointer to determine when a Transfer Ring
is full. As it processes Transfer Events, it updates its copy of
the Dequeue Pointer with the value of the Transfer Event TRB Pointer
field. If advancing the Enqueue Pointer would make it equal to the
Dequeue Pointer then the Transfer Ring is full and software shall
wait for Transfer Events that will advance the Dequeue Pointer.
This and the first two rows of Table 4-2 in 4.6.9 imply that the xHC
is not required to atomically advance Dequeue to the next TRB after
completing one. The TRB referenced by latest event is still owned by
the xHC and not supposed to be reused for new transfers yet.
The driver allows such reuse and then worse. When a TD completes with
Short Packet, Dequeue is moved past remaining TRBs without waiting
for their completion. This opens them for reuse if it happens across
segment boundary and complicates handling events for those TRBs.
This is due to sloppy historic assumptions that Dequeue points to the
next TD to execute. Those assumptions stopped working when unlinking
was implemented and have been fully cleaned up last year. Dequeue is
now only used for free space tracking and in move_dequeue_past_td().
So let's fix this. When TD is given back, set Dequeue to the current
TRB pointer rather than past the TD. Future patch will also update
Dequeue when (and if) events are received for the remaining TRBs.
Note: updating Dequeue before giveback would break sum_trb_lengths().
Skipping moves Dequeue past the TD because it is triggered by events
outside the TD, so we know that the xHC has left it completely. That
being said, replace inc_deq() with next_trb() to ensure that Dequeue
doesn't get ahead of the xHC (and/or Enqueue) on Link TRBs.
Signed-off-by: Michal Pecio <michal.pecio@...il.com>
---
drivers/usb/host/xhci-ring.c | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bf077cd13ffa..3d5124912a09 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -927,7 +927,7 @@ static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xh
{
ring->dequeue = td->end_trb;
ring->deq_seg = td->end_seg;
- inc_deq(xhci, ring);
+ next_trb(&ring->deq_seg, &ring->dequeue);
xhci_td_cleanup(xhci, td, ring, status);
}
@@ -2229,8 +2229,8 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
}
static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- u32 trb_comp_code)
+ struct xhci_ring *ep_ring, struct xhci_td *td, u32 trb_comp_code,
+ struct xhci_segment *ep_seg, union xhci_trb *ep_trb)
{
struct xhci_ep_ctx *ep_ctx;
@@ -2267,7 +2267,11 @@ static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
return; /* xhci_handle_halted_endpoint marked td cancelled */
}
- xhci_dequeue_td(xhci, td, ep_ring, td->status);
+ /* update ring dequeue state */
+ ep_ring->deq_seg = ep_seg;
+ ep_ring->dequeue = ep_trb;
+
+ xhci_td_cleanup(xhci, td, ep_ring, td->status);
}
/* sum trb lengths from the first trb up to stop_trb, _excluding_ stop_trb */
@@ -2289,7 +2293,8 @@ static u32 sum_trb_lengths(struct xhci_td *td, union xhci_trb *stop_trb)
*/
static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+ struct xhci_segment *ep_seg, union xhci_trb *ep_trb,
+ struct xhci_transfer_event *event)
{
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
@@ -2373,7 +2378,7 @@ static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb->actual_length = requested;
finish_td:
- finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code, ep_seg, ep_trb);
}
/*
@@ -2381,7 +2386,8 @@ static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
*/
static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+ struct xhci_segment *ep_seg, union xhci_trb *ep_trb,
+ struct xhci_transfer_event *event)
{
struct urb_priv *urb_priv;
int idx;
@@ -2483,7 +2489,7 @@ static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb_length_set = true;
return;
}
- finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code, ep_seg, ep_trb);
}
static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
@@ -2511,7 +2517,8 @@ static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
*/
static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+ struct xhci_segment *ep_seg, union xhci_trb *ep_trb,
+ struct xhci_transfer_event *event)
{
struct xhci_slot_ctx *slot_ctx;
u32 trb_comp_code;
@@ -2573,7 +2580,7 @@ static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb->actual_length = 0;
}
- finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code, ep_seg, ep_trb);
}
/* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */
@@ -2941,11 +2948,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* update the urb's actual_length and give back to the core */
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
- process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
+ process_ctrl_td(xhci, ep, ep_ring, td, ep_seg, ep_trb, event);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
- process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
+ process_isoc_td(xhci, ep, ep_ring, td, ep_seg, ep_trb, event);
else
- process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
+ process_bulk_intr_td(xhci, ep, ep_ring, td, ep_seg, ep_trb, event);
return 0;
check_endpoint_halted:
--
2.48.1
Powered by blists - more mailing lists