[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190806150802.72e0ef02@cakuba.netronome.com>
Date: Tue, 6 Aug 2019 15:08:02 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: Hayes Wang <hayeswang@...ltek.com>
Cc: <netdev@...r.kernel.org>, <nic_swsd@...ltek.com>,
<linux-kernel@...r.kernel.org>, <linux-usb@...r.kernel.org>
Subject: Re: [PATCH net-next 4/5] r8152: support skb_add_rx_frag
On Tue, 6 Aug 2019 19:18:03 +0800, Hayes Wang wrote:
> Use skb_add_rx_frag() to reduce the memory copy for rx data.
>
> Use a new list of rx_used to store the rx buffer which couldn't be
> reused yet.
>
> Besides, the total number of rx buffer may be increased or decreased
> dynamically. And it is limited by RTL8152_MAX_RX_AGG.
>
> Signed-off-by: Hayes Wang <hayeswang@...ltek.com>
> diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
> index 401e56112365..1615900c8592 100644
> --- a/drivers/net/usb/r8152.c
> +++ b/drivers/net/usb/r8152.c
> @@ -584,6 +584,9 @@ enum rtl_register_content {
> #define TX_ALIGN 4
> #define RX_ALIGN 8
>
> +#define RTL8152_MAX_RX_AGG (10 * RTL8152_MAX_RX)
> +#define RTL8152_RXFG_HEADSZ 256
> +
> #define INTR_LINK 0x0004
>
> #define RTL8152_REQT_READ 0xc0
> @@ -720,7 +723,7 @@ struct r8152 {
> struct net_device *netdev;
> struct urb *intr_urb;
> struct tx_agg tx_info[RTL8152_MAX_TX];
> - struct list_head rx_info;
> + struct list_head rx_info, rx_used;
I don't see where entries on the rx_used list get freed when driver is
unloaded, could you explain how that's taken care of?
> struct list_head rx_done, tx_free;
> struct sk_buff_head tx_queue, rx_queue;
> spinlock_t rx_lock, tx_lock;
> @@ -1476,7 +1479,7 @@ static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg)
> list_del(&agg->info_list);
>
> usb_free_urb(agg->urb);
> - __free_pages(agg->page, get_order(tp->rx_buf_sz));
> + put_page(agg->page);
> kfree(agg);
>
> atomic_dec(&tp->rx_count);
> @@ -1493,7 +1496,7 @@ static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
> if (rx_agg) {
> unsigned long flags;
>
> - rx_agg->page = alloc_pages(mflags, order);
> + rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
> if (!rx_agg->page)
> goto free_rx;
>
> @@ -1951,6 +1954,50 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
> return checksum;
> }
>
> +static inline bool rx_count_exceed(struct r8152 *tp)
> +{
> + return atomic_read(&tp->rx_count) > RTL8152_MAX_RX;
> +}
> +
> +static inline int agg_offset(struct rx_agg *agg, void *addr)
> +{
> + return (int)(addr - agg->buffer);
> +}
> +
> +static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags)
> +{
> + struct list_head *cursor, *next;
> + struct rx_agg *agg_free = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&tp->rx_lock, flags);
> +
> + list_for_each_safe(cursor, next, &tp->rx_used) {
> + struct rx_agg *agg;
> +
> + agg = list_entry(cursor, struct rx_agg, list);
> +
> + if (page_count(agg->page) == 1) {
> + if (!agg_free) {
> + list_del_init(cursor);
> + agg_free = agg;
> + continue;
> + } else if (rx_count_exceed(tp)) {
nit: else unnecessary after continue
> + list_del_init(cursor);
> + free_rx_agg(tp, agg);
> + }
> + break;
> + }
> + }
> +
> + spin_unlock_irqrestore(&tp->rx_lock, flags);
> +
> + if (!agg_free && atomic_read(&tp->rx_count) < RTL8152_MAX_RX_AGG)
> + agg_free = alloc_rx_agg(tp, mflags);
> +
> + return agg_free;
> +}
> +
> static int rx_bottom(struct r8152 *tp, int budget)
> {
> unsigned long flags;
Powered by blists - more mailing lists