[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1658941416-74393-3-git-send-email-moshe@nvidia.com>
Date: Wed, 27 Jul 2022 20:03:29 +0300
From: Moshe Shemesh <moshe@...dia.com>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Saeed Mahameed <saeedm@...dia.com>,
Leon Romanovsky <leon@...nel.org>,
"Tariq Toukan" <tariqt@...dia.com>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>
CC: Jiri Pirko <jiri@...dia.com>, <netdev@...r.kernel.org>
Subject: [PATCH net-next 2/9] net: devlink: remove region snapshots list dependency on devlink->lock
From: Jiri Pirko <jiri@...dia.com>
After mlx4 driver is converted to do locked reload,
devlink_region_snapshot_create() may be called from both locked and
unlocked context.
So resolve this by removing dependency on devlink->lock for region
snapshots list consistency and introduce new mutex to ensure it.
Signed-off-by: Jiri Pirko <jiri@...dia.com>
---
net/core/devlink.c | 41 +++++++++++++++++++++++++++++------------
1 file changed, 29 insertions(+), 12 deletions(-)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index da002791e300..4de1f93053a2 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -695,6 +695,10 @@ struct devlink_region {
const struct devlink_region_ops *ops;
const struct devlink_port_region_ops *port_ops;
};
+ struct mutex snapshot_lock; /* protects snapshot_list,
+ * max_snapshots and cur_snapshots
+ * consistency.
+ */
struct list_head snapshot_list;
u32 max_snapshots;
u32 cur_snapshots;
@@ -5818,7 +5822,7 @@ static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
* Multiple snapshots can be created on a region.
* The @snapshot_id should be obtained using the getter function.
*
- * Must be called only while holding the devlink instance lock.
+ * Must be called only while holding the region snapshot lock.
*
* @region: devlink region of the snapshot
* @data: snapshot data
@@ -5832,7 +5836,7 @@ __devlink_region_snapshot_create(struct devlink_region *region,
struct devlink_snapshot *snapshot;
int err;
- devl_assert_locked(devlink);
+ lockdep_assert_held(®ion->snapshot_lock);
/* check if region can hold one more snapshot */
if (region->cur_snapshots == region->max_snapshots)
@@ -5870,7 +5874,7 @@ static void devlink_region_snapshot_del(struct devlink_region *region,
{
struct devlink *devlink = region->devlink;
- devl_assert_locked(devlink);
+ lockdep_assert_held(®ion->snapshot_lock);
devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
region->cur_snapshots--;
@@ -6049,11 +6053,15 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb,
if (!region)
return -EINVAL;
+ mutex_lock(®ion->snapshot_lock);
snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
- if (!snapshot)
+ if (!snapshot) {
+ mutex_unlock(®ion->snapshot_lock);
return -EINVAL;
+ }
devlink_region_snapshot_del(region, snapshot);
+ mutex_unlock(®ion->snapshot_lock);
return 0;
}
@@ -6101,9 +6109,12 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
}
+ mutex_lock(®ion->snapshot_lock);
+
if (region->cur_snapshots == region->max_snapshots) {
NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
- return -ENOSPC;
+ err = -ENOSPC;
+ goto unlock;
}
snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
@@ -6112,17 +6123,18 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
- return -EEXIST;
+ err = -EEXIST;
+ goto unlock;
}
err = __devlink_snapshot_id_insert(devlink, snapshot_id);
if (err)
- return err;
+ goto unlock;
} else {
err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id");
- return err;
+ goto unlock;
}
}
@@ -6160,16 +6172,20 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
goto err_notify;
}
+ mutex_unlock(®ion->snapshot_lock);
return 0;
err_snapshot_create:
region->ops->destructor(data);
err_snapshot_capture:
__devlink_snapshot_id_decrement(devlink, snapshot_id);
+ mutex_unlock(®ion->snapshot_lock);
return err;
err_notify:
devlink_region_snapshot_del(region, snapshot);
+unlock:
+ mutex_unlock(®ion->snapshot_lock);
return err;
}
@@ -11095,6 +11111,7 @@ struct devlink_region *devl_region_create(struct devlink *devlink,
region->ops = ops;
region->size = region_size;
INIT_LIST_HEAD(®ion->snapshot_list);
+ mutex_init(®ion->snapshot_lock);
list_add_tail(®ion->list, &devlink->region_list);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
@@ -11168,6 +11185,7 @@ devlink_port_region_create(struct devlink_port *port,
region->port_ops = ops;
region->size = region_size;
INIT_LIST_HEAD(®ion->snapshot_list);
+ mutex_init(®ion->snapshot_lock);
list_add_tail(®ion->list, &port->region_list);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
@@ -11197,6 +11215,7 @@ void devl_region_destroy(struct devlink_region *region)
devlink_region_snapshot_del(region, snapshot);
list_del(®ion->list);
+ mutex_destroy(®ion->snapshot_lock);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
kfree(region);
@@ -11272,13 +11291,11 @@ EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
int devlink_region_snapshot_create(struct devlink_region *region,
u8 *data, u32 snapshot_id)
{
- struct devlink *devlink = region->devlink;
int err;
- devl_lock(devlink);
+ mutex_lock(®ion->snapshot_lock);
err = __devlink_region_snapshot_create(region, data, snapshot_id);
- devl_unlock(devlink);
-
+ mutex_unlock(®ion->snapshot_lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
--
2.18.2
Powered by blists - more mailing lists