[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <d59a6694-e0e7-46b7-874e-0c6acd8c9126@linux.intel.com>
Date: Fri, 21 Feb 2025 16:54:11 +0200
From: Mathias Nyman <mathias.nyman@...ux.intel.com>
To: Michal Pecio <michal.pecio@...il.com>,
Mathias Nyman <mathias.nyman@...el.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: linux-usb@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/3] usb: xhci: Unify duplicate inc_enq() code
On 21.2.2025 0.47, Michal Pecio wrote:
> Remove a block of code copied from inc_enq(). As often happens, the two
> copies have diverged somewhat - in this case, inc_enq() has a bug where
> it may leave the chain bit of a link TRB unset on a quirky HC. Fix this.
> Remove the pointless 'next' variable which only aliases ring->enqueue.
The linnk TRB chain bit should be set when the ring is created, and never cleared
on those quirky hosts.
>
> Note: I don't know if any 0.95 xHC ever reached series production, but
> "AMD 0.96 host" appears to be the "Llano" family APU. Example dmesg at
> https://linux-hardware.org/?probe=79d5cfd4fd&log=dmesg
>
> pci 0000:00:10.0: [1022:7812] type 00 class 0x0c0330
> hcc params 0x014042c3 hci version 0x96 quirks 0x0000000000000608
>
> Signed-off-by: Michal Pecio <michal.pecio@...il.com>
> ---
> drivers/usb/host/xhci-ring.c | 135 +++++++++++++++--------------------
> 1 file changed, 58 insertions(+), 77 deletions(-)
>
> diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
> index 46ca98066856..f400ba85ebc5 100644
> --- a/drivers/usb/host/xhci-ring.c
> +++ b/drivers/usb/host/xhci-ring.c
> @@ -208,6 +208,52 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
> return;
> }
>
> +/*
> + * If enqueue points at a link TRB, follow links until an ordinary TRB is reached.
> + * Toggle the cycle bit of passed link TRBs and optionally chain them.
> + */
> +static void inc_enq_past_link(struct xhci_hcd *xhci, struct xhci_ring *ring, u32 chain)
> +{
> + unsigned int link_trb_count = 0;
> +
> + while (trb_is_link(ring->enqueue)) {
> +
> + /*
> + * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
> + * set, but other sections talk about dealing with the chain bit set. This was
> + * fixed in the 0.96 specification errata, but we have to assume that all 0.95
> + * xHCI hardware can't handle the chain bit being cleared on a link TRB.
> + *
> + * If we're dealing with 0.95 hardware or isoc rings on AMD 0.96 host, override
> + * the chain bit to always be set.
> + */
> + if (!xhci_link_chain_quirk(xhci, ring->type)) {
> + ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN);
> + ring->enqueue->link.control |= cpu_to_le32(chain);
> + } else {
> + ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
> + }
> +
> + /* Give this link TRB to the hardware */
> + wmb();
> + ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
> +
> + /* Toggle the cycle bit after the last ring segment. */
> + if (link_trb_toggles_cycle(ring->enqueue))
> + ring->cycle_state ^= 1;
> +
> + ring->enq_seg = ring->enq_seg->next;
> + ring->enqueue = ring->enq_seg->trbs;
> +
> + trace_xhci_inc_enq(ring);
> +
> + if (link_trb_count++ > ring->num_segs) {
> + xhci_warn(xhci, "Link TRB loop at enqueue\n");
> + break;
> + }
> + }
> +}
> +
> /*
> * See Cycle bit rules. SW is the consumer for the event ring only.
> *
> @@ -216,11 +262,6 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
> * If we've enqueued the last TRB in a TD, make sure the following link TRBs
> * have their chain bit cleared (so that each Link TRB is a separate TD).
> *
> - * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
> - * set, but other sections talk about dealing with the chain bit set. This was
> - * fixed in the 0.96 specification errata, but we have to assume that all 0.95
> - * xHCI hardware can't handle the chain bit being cleared on a link TRB.
> - *
> * @more_trbs_coming: Will you enqueue more TRBs before calling
> * prepare_transfer()?
> */
> @@ -228,8 +269,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
> bool more_trbs_coming)
> {
> u32 chain;
> - union xhci_trb *next;
> - unsigned int link_trb_count = 0;
>
> chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
>
> @@ -238,48 +277,16 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
> return;
> }
>
> - next = ++(ring->enqueue);
> -
> - /* Update the dequeue pointer further if that was a link TRB */
> - while (trb_is_link(next)) {
> -
> - /*
> - * If the caller doesn't plan on enqueueing more TDs before
> - * ringing the doorbell, then we don't want to give the link TRB
> - * to the hardware just yet. We'll give the link TRB back in
> - * prepare_ring() just before we enqueue the TD at the top of
> - * the ring.
> - */
> - if (!chain && !more_trbs_coming)
> - break;
> -
> - /* If we're not dealing with 0.95 hardware or isoc rings on
> - * AMD 0.96 host, carry over the chain bit of the previous TRB
> - * (which may mean the chain bit is cleared).
> - */
> - if (!xhci_link_chain_quirk(xhci, ring->type)) {
> - next->link.control &= cpu_to_le32(~TRB_CHAIN);
> - next->link.control |= cpu_to_le32(chain);
> - }
> - /* Give this link TRB to the hardware */
> - wmb();
> - next->link.control ^= cpu_to_le32(TRB_CYCLE);
> -
> - /* Toggle the cycle bit after the last ring segment. */
> - if (link_trb_toggles_cycle(next))
> - ring->cycle_state ^= 1;
> -
> - ring->enq_seg = ring->enq_seg->next;
> - ring->enqueue = ring->enq_seg->trbs;
> - next = ring->enqueue;
> -
> - trace_xhci_inc_enq(ring);
> -
> - if (link_trb_count++ > ring->num_segs) {
> - xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
> - break;
> - }
> - }
> + ring->enqueue++;
> +
> + /*
> + * If we are in the middle of a TD or the caller plans to enqueue more
> + * TDs as one transfer (eg. control), traverse any link TRBs right now.
> + * Otherwise, enqueue can stay on a link until the next prepare_ring().
> + * This avoids enqueue entering deq_seg and simplifies ring expansion.
> + */
> + if (chain || more_trbs_coming)> + inc_enq_past_link(xhci, ring, chain);
maybe
if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming))
inc_eng_past_link(xhci, ring, chain);
Avoids calling inc_enq_past_link() every time we increase enqueue, and explains why we call it.
> }
>
> /*
> @@ -3177,7 +3184,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
> static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
> u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
> {
> - unsigned int link_trb_count = 0;
> unsigned int new_segs = 0;
>
> /* Make sure the endpoint has been added to xHC schedule */
> @@ -3225,33 +3231,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
> }
> }
>
> - while (trb_is_link(ep_ring->enqueue)) {
> - /* If we're not dealing with 0.95 hardware or isoc rings
> - * on AMD 0.96 host, clear the chain bit.
> - */
> - if (!xhci_link_chain_quirk(xhci, ep_ring->type))
> - ep_ring->enqueue->link.control &=
> - cpu_to_le32(~TRB_CHAIN);
> - else
> - ep_ring->enqueue->link.control |=
> - cpu_to_le32(TRB_CHAIN);
> -
> - wmb();
> - ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
> -
> - /* Toggle the cycle bit after the last ring segment. */
> - if (link_trb_toggles_cycle(ep_ring->enqueue))
> - ep_ring->cycle_state ^= 1;
> -
> - ep_ring->enq_seg = ep_ring->enq_seg->next;
> - ep_ring->enqueue = ep_ring->enq_seg->trbs;
> -
> - /* prevent infinite loop if all first trbs are link trbs */
> - if (link_trb_count++ > ep_ring->num_segs) {
> - xhci_warn(xhci, "Ring is an endless link TRB loop\n");
> - return -EINVAL;
> - }
> - }
> + /* Ensure that new TRBs won't overwrite a link */
Maybe add: if (trb_is_link(ep_ring->enqueue)) check here as well
> + inc_enq_past_link(xhci, ep_ring, 0);
>
> if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
> xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
Otherwise this is a nice cleanup, and the chain bit seems correct, in short:
if quirky host, set chain bit in link TRB before a TD, otherwise clear it.
if quirky host, set chain bit in link TRB mid TD, otherwise set chain bit to
same value as previous TRB.
Powered by blists - more mailing lists