[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5a7697aa7d271405cb7b9476d592053c12364bd7.1512518311.git.julia@ni.com>
Date: Tue, 5 Dec 2017 18:02:49 -0600
From: Julia Cartwright <julia@...com>
To: David Miller <davem@...emloft.net>
CC: <julia.lawall@...6.fr>, <rafalo@...ence.com>,
<nicolas.ferre@...rochip.com>, <netdev@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <kbuild-all@...org>
Subject: [PATCH v2 2/3] net: macb: reduce scope of rx_fs_lock-protected regions
Commit ae8223de3df5 ("net: macb: Added support for RX filtering")
introduces a lock, rx_fs_lock which is intended to protect the list of
rx_flow items and synchronize access to the hardware rx filtering
registers.
However, the region protected by this lock is overscoped, unnecessarily
including things like slab allocation. Reduce this lock scope to only
include operations which must be performed atomically: list traversal,
addition, and removal, and hitting the macb filtering registers.
This fixes the use of kmalloc w/ GFP_KERNEL in atomic context.
Fixes: ae8223de3df5 ("net: macb: Added support for RX filtering")
Cc: Rafal Ozieblo <rafalo@...ence.com>
Cc: Julia Lawall <julia.lawall@...6.fr>
Acked-by: Nicolas Ferre <nicolas.ferre@...rochip.com>
Signed-off-by: Julia Cartwright <julia@...com>
---
drivers/net/ethernet/cadence/macb_main.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b7644836aba1..758e8b3042b2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2796,6 +2796,7 @@ static int gem_add_flow_filter(struct net_device *netdev,
struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct ethtool_rx_fs_item *item, *newfs;
+ unsigned long flags;
int ret = -EINVAL;
bool added = false;
@@ -2811,6 +2812,8 @@ static int gem_add_flow_filter(struct net_device *netdev,
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
/* find correct place to add in list */
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (item->fs.location > newfs->fs.location) {
@@ -2833,9 +2836,11 @@ static int gem_add_flow_filter(struct net_device *netdev,
if (netdev->features & NETIF_F_NTUPLE)
gem_enable_flow_filters(bp, 1);
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
return 0;
err:
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
kfree(newfs);
return ret;
}
@@ -2846,6 +2851,9 @@ static int gem_del_flow_filter(struct net_device *netdev,
struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_fs_item *item;
struct ethtool_rx_flow_spec *fs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (item->fs.location == cmd->fs.location) {
@@ -2862,12 +2870,14 @@ static int gem_del_flow_filter(struct net_device *netdev,
gem_writel_n(bp, SCRT2, fs->location, 0);
list_del(&item->list);
- kfree(item);
bp->rx_fs_list.count--;
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(item);
return 0;
}
}
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
return -EINVAL;
}
@@ -2936,11 +2946,8 @@ static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct macb *bp = netdev_priv(netdev);
- unsigned long flags;
int ret;
- spin_lock_irqsave(&bp->rx_fs_lock, flags);
-
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.location >= bp->max_tuples)
@@ -2959,7 +2966,6 @@ static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
ret = -EOPNOTSUPP;
}
- spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
return ret;
}
--
2.14.2
Powered by blists - more mailing lists