[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <Z-682XjIjxjAZ9j-@kernel.org>
Date: Thu, 3 Apr 2025 20:05:56 +0300
From: Jarkko Sakkinen <jarkko@...nel.org>
To: David Howells <dhowells@...hat.com>
Cc: keyrings@...r.kernel.org, Jarkko Sakkinen <jarkko.sakkinen@...nsys.com>,
Paul Moore <paul@...l-moore.com>, James Morris <jmorris@...ei.org>,
"Serge E. Hallyn" <serge@...lyn.com>, linux-kernel@...r.kernel.org,
linux-security-module@...r.kernel.org
Subject: Re: [RFC PATCH v4] KEYS: Add a list for unreferenced keys
On Thu, Apr 03, 2025 at 06:38:09PM +0300, Jarkko Sakkinen wrote:
> From: Jarkko Sakkinen <jarkko.sakkinen@...nsys.com>
>
> Add an isolated list of unreferenced keys to be queued for deletion, and
> try to pin the keys in the garbage collector before processing anything.
> Skip unpinnable keys.
>
> In effect this means that exactly from the point of time when the usage
> count is zeroed all of the GC collector processing will be skipped. Earlier
> this was done only when key_put() function had done its processing, meaning
> that keys with usage count in zero might get still processed.
>
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@...nsys.com>
> ---
> v4:
> - Pin the key while processing key type teardown. Skip dead keys.
> - Revert key_gc_graveyard back key_gc_unused_keys.
> - Rewrote the commit message.
> - "unsigned long flags" declaration somehow did make to the previous
> patch (sorry).
> v3:
> - Using spin_lock() fails since key_put() is executed inside IRQs.
> Using spin_lock_irqsave() would neither work given the lock is
> acquired for /proc/keys. Therefore, separate the lock for
> graveyard and key_graveyard before reaping key_serial_tree.
> v2:
> - Rename key_gc_unused_keys as key_gc_graveyard, and re-document the
> function.
> ---
> include/linux/key.h | 7 ++-----
> security/keys/gc.c | 36 ++++++++++++++++++++----------------
> security/keys/internal.h | 2 ++
> security/keys/key.c | 7 +++++--
> 4 files changed, 29 insertions(+), 23 deletions(-)
>
> diff --git a/include/linux/key.h b/include/linux/key.h
> index ba05de8579ec..c50659184bdf 100644
> --- a/include/linux/key.h
> +++ b/include/linux/key.h
> @@ -195,10 +195,8 @@ enum key_state {
> struct key {
> refcount_t usage; /* number of references */
> key_serial_t serial; /* key serial number */
> - union {
> - struct list_head graveyard_link;
> - struct rb_node serial_node;
> - };
> + struct list_head graveyard_link; /* key->usage == 0 */
> + struct rb_node serial_node;
> #ifdef CONFIG_KEY_NOTIFICATIONS
> struct watch_list *watchers; /* Entities watching this key for changes */
> #endif
> @@ -236,7 +234,6 @@ struct key {
> #define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
> #define KEY_FLAG_KEEP 8 /* set if key should not be removed */
> #define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
> -#define KEY_FLAG_FINAL_PUT 10 /* set if final put has happened on key */
>
> /* the key type and key description string
> * - the desc is used to match a key against search criteria
> diff --git a/security/keys/gc.c b/security/keys/gc.c
> index f27223ea4578..9ccd8ee6fcdb 100644
> --- a/security/keys/gc.c
> +++ b/security/keys/gc.c
> @@ -189,6 +189,7 @@ static void key_garbage_collector(struct work_struct *work)
> struct rb_node *cursor;
> struct key *key;
> time64_t new_timer, limit, expiry;
> + unsigned long flags;
>
> kenter("[%lx,%x]", key_gc_flags, gc_state);
>
> @@ -206,21 +207,35 @@ static void key_garbage_collector(struct work_struct *work)
>
> new_timer = TIME64_MAX;
>
> + spin_lock_irqsave(&key_graveyard_lock, flags);
> + list_splice_init(&key_graveyard, &graveyard);
> + spin_unlock_irqrestore(&key_graveyard_lock, flags);
> +
> + list_for_each_entry(key, &graveyard, graveyard_link) {
> + spin_lock(&key_serial_lock);
> + kdebug("unrefd key %d", key->serial);
> + rb_erase(&key->serial_node, &key_serial_tree);
> + spin_unlock(&key_serial_lock);
> + }
> +
> /* As only this function is permitted to remove things from the key
> * serial tree, if cursor is non-NULL then it will always point to a
> * valid node in the tree - even if lock got dropped.
> */
> spin_lock(&key_serial_lock);
> + key = NULL;
> cursor = rb_first(&key_serial_tree);
>
> continue_scanning:
> + key_put(key);
> while (cursor) {
> key = rb_entry(cursor, struct key, serial_node);
> cursor = rb_next(cursor);
> -
> - if (test_bit(KEY_FLAG_FINAL_PUT, &key->flags)) {
> - smp_mb(); /* Clobber key->user after FINAL_PUT seen. */
> - goto found_unreferenced_key;
> + /* key_get(), unless zero: */
> + if (!refcount_inc_not_zero(&key->usage)) {
> + key = NULL;
> + gc_state |= KEY_GC_REAP_AGAIN;
> + goto skip_dead_key;
> }
>
> if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
> @@ -274,6 +289,7 @@ static void key_garbage_collector(struct work_struct *work)
> spin_lock(&key_serial_lock);
> goto continue_scanning;
> }
> + key_put(key);
>
> /* We've completed the pass. Set the timer if we need to and queue a
> * new cycle if necessary. We keep executing cycles until we find one
> @@ -328,18 +344,6 @@ static void key_garbage_collector(struct work_struct *work)
> kleave(" [end %x]", gc_state);
> return;
>
> - /* We found an unreferenced key - once we've removed it from the tree,
> - * we can safely drop the lock.
> - */
> -found_unreferenced_key:
> - kdebug("unrefd key %d", key->serial);
> - rb_erase(&key->serial_node, &key_serial_tree);
> - spin_unlock(&key_serial_lock);
> -
> - list_add_tail(&key->graveyard_link, &graveyard);
> - gc_state |= KEY_GC_REAP_AGAIN;
> - goto maybe_resched;
> -
> /* We found a restricted keyring and need to update the restriction if
> * it is associated with the dead key type.
> */
> diff --git a/security/keys/internal.h b/security/keys/internal.h
> index 2cffa6dc8255..08f356d04d78 100644
> --- a/security/keys/internal.h
> +++ b/security/keys/internal.h
> @@ -66,6 +66,8 @@ struct key_user {
> extern struct rb_root key_user_tree;
> extern spinlock_t key_user_lock;
> extern struct key_user root_key_user;
> +extern struct list_head key_graveyard;
> +extern spinlock_t key_graveyard_lock;
>
> extern struct key_user *key_user_lookup(kuid_t uid);
> extern void key_user_put(struct key_user *user);
> diff --git a/security/keys/key.c b/security/keys/key.c
> index 7198cd2ac3a3..7511f2017b6b 100644
> --- a/security/keys/key.c
> +++ b/security/keys/key.c
> @@ -22,6 +22,8 @@ DEFINE_SPINLOCK(key_serial_lock);
>
> struct rb_root key_user_tree; /* tree of quota records indexed by UID */
> DEFINE_SPINLOCK(key_user_lock);
> +LIST_HEAD(key_graveyard);
> +DEFINE_SPINLOCK(key_graveyard_lock);
>
> unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
> unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
> @@ -658,8 +660,9 @@ void key_put(struct key *key)
> key->user->qnbytes -= key->quotalen;
> spin_unlock_irqrestore(&key->user->lock, flags);
> }
> - smp_mb(); /* key->user before FINAL_PUT set. */
> - set_bit(KEY_FLAG_FINAL_PUT, &key->flags);
> + spin_lock_irqsave(&key_graveyard_lock, flags);
> + list_add_tail(&key->graveyard_link, &key_graveyard);
> + spin_unlock_irqrestore(&key_graveyard_lock, flags);
> schedule_work(&key_gc_work);
> }
> }
> --
> 2.39.5
>
>
David, I'd almost call this version a bug fix, and would consider adding
fixes tag too.
The reason being that this fully addresses racy access to struct key
between key_put() and the GC. The small change I did for v4 makes a
world of difference for this patch.
BR, Jarkko
Powered by blists - more mailing lists