lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2236FBA76BA1254E88B949DDB74E612B802D9C2D@IRSMSX102.ger.corp.intel.com>
Date:   Tue, 9 Jan 2018 12:00:14 +0000
From:   "Reshetova, Elena" <elena.reshetova@...el.com>
To:     Jan Kara <jack@...e.cz>
CC:     "viro@...iv.linux.org.uk" <viro@...iv.linux.org.uk>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        "linux-fsdevel@...r.kernel.org" <linux-fsdevel@...r.kernel.org>,
        "tytso@....edu" <tytso@....edu>,
        "peterz@...radead.org" <peterz@...radead.org>,
        "keescook@...omium.org" <keescook@...omium.org>
Subject: RE: [PATCH] mbcache: convert mb_cache_entry.e_refcnt from atomic_t
 to refcount_t


> On Wed 29-11-17 13:22:20, Elena Reshetova wrote:
> > atomic_t variables are currently used to implement reference
> > counters with the following properties:
> >  - counter is initialized to 1 using atomic_set()
> >  - a resource is freed upon counter reaching zero
> >  - once counter reaches zero, its further
> >    increments aren't allowed
> >  - counter schema uses basic atomic operations
> >    (set, inc, inc_not_zero, dec_and_test, etc.)
> >
> > Such atomic variables should be converted to a newly provided
> > refcount_t type and API that prevents accidental counter overflows
> > and underflows. This is important since overflows and underflows
> > can lead to use-after-free situation and be exploitable.
> >
> > The variable mb_cache_entry.e_refcnt is used as pure reference counter.
> > Convert it to refcount_t and fix up the operations.
> >
> > **Important note for maintainers:
> >
> > Some functions from refcount_t API defined in lib/refcount.c
> > have different memory ordering guarantees than their atomic
> > counterparts.
> > The full comparison can be seen in
> > https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
> > in state to be merged to the documentation tree.
> > Normally the differences should not matter since refcount_t provides
> > enough guarantees to satisfy the refcounting use cases, but in
> > some rare cases it might matter.
> > Please double check that you don't have some undocumented
> > memory guarantees for this variable usage.
> >
> > For the mb_cache_entry.e_refcnt it might make a difference
> > in following places:
> >  - mb_cache_destroy(), mb_cache_shrink() and mb_cache_entry_delete():
> >    decrement in refcount_dec() provides RELEASE ordering vs. fully
> >    unordered atomic counterpart. Since the change is for better, it
> >    should not matter for these cases.
> >  - mb_cache_entry_put(): decrement in refcount_dec_and_test() only
> >    provides RELEASE ordering and control dependency on success
> >    vs. fully ordered atomic counterpart
> >
> > Suggested-by: Kees Cook <keescook@...omium.org>
> > Reviewed-by: David Windsor <dwindsor@...il.com>
> > Reviewed-by: Hans Liljestrand <ishkamiel@...il.com>
> > Signed-off-by: Elena Reshetova <elena.reshetova@...el.com>
> 
> The patch looks good to me. You can add:
> 
> Reviewed-by: Jan Kara <jack@...e.cz>

Thank you Jan! 
Would you be able to propagate the patch further into the respective tree?
These patches are best to merge using usual paths. 

Best Regards,
Elena.


> 
> 
> 			Honza
> 
> > ---
> >  fs/mbcache.c            | 16 ++++++++--------
> >  include/linux/mbcache.h |  6 +++---
> >  2 files changed, 11 insertions(+), 11 deletions(-)
> >
> > diff --git a/fs/mbcache.c b/fs/mbcache.c
> > index d818fd2..8bdb52b 100644
> > --- a/fs/mbcache.c
> > +++ b/fs/mbcache.c
> > @@ -90,7 +90,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t
> mask, u32 key,
> >
> >  	INIT_LIST_HEAD(&entry->e_list);
> >  	/* One ref for hash, one ref returned */
> > -	atomic_set(&entry->e_refcnt, 1);
> > +	refcount_set(&entry->e_refcnt, 1);
> >  	entry->e_key = key;
> >  	entry->e_value = value;
> >  	entry->e_reusable = reusable;
> > @@ -109,7 +109,7 @@ int mb_cache_entry_create(struct mb_cache *cache,
> gfp_t mask, u32 key,
> >  	spin_lock(&cache->c_list_lock);
> >  	list_add_tail(&entry->e_list, &cache->c_list);
> >  	/* Grab ref for LRU list */
> > -	atomic_inc(&entry->e_refcnt);
> > +	refcount_inc(&entry->e_refcnt);
> >  	cache->c_entry_count++;
> >  	spin_unlock(&cache->c_list_lock);
> >
> > @@ -141,7 +141,7 @@ static struct mb_cache_entry *__entry_find(struct
> mb_cache *cache,
> >  		entry = hlist_bl_entry(node, struct mb_cache_entry,
> >  				       e_hash_list);
> >  		if (entry->e_key == key && entry->e_reusable) {
> > -			atomic_inc(&entry->e_refcnt);
> > +			refcount_inc(&entry->e_refcnt);
> >  			goto out;
> >  		}
> >  		node = node->next;
> > @@ -204,7 +204,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct
> mb_cache *cache, u32 key,
> >  	hlist_bl_lock(head);
> >  	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
> >  		if (entry->e_key == key && entry->e_value == value) {
> > -			atomic_inc(&entry->e_refcnt);
> > +			refcount_inc(&entry->e_refcnt);
> >  			goto out;
> >  		}
> >  	}
> > @@ -239,7 +239,7 @@ void mb_cache_entry_delete(struct mb_cache *cache,
> u32 key, u64 value)
> >  			if (!list_empty(&entry->e_list)) {
> >  				list_del_init(&entry-
> >e_list);
> >  				cache->c_entry_count--;
> > -				atomic_dec(&entry-
> >e_refcnt);
> > +				refcount_dec(&entry-
> >e_refcnt);
> >  			}
> >  			spin_unlock(&cache->c_list_lock);
> >  			mb_cache_entry_put(cache, entry);
> > @@ -300,7 +300,7 @@ static unsigned long mb_cache_shrink(struct mb_cache
> *cache,
> >  		hlist_bl_lock(head);
> >  		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
> >  			hlist_bl_del_init(&entry->e_hash_list);
> > -			atomic_dec(&entry->e_refcnt);
> > +			refcount_dec(&entry->e_refcnt);
> >  		}
> >  		hlist_bl_unlock(head);
> >  		if (mb_cache_entry_put(cache, entry))
> > @@ -397,11 +397,11 @@ void mb_cache_destroy(struct mb_cache *cache)
> >  	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
> >  		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
> >  			hlist_bl_del_init(&entry->e_hash_list);
> > -			atomic_dec(&entry->e_refcnt);
> > +			refcount_dec(&entry->e_refcnt);
> >  		} else
> >  			WARN_ON(1);
> >  		list_del(&entry->e_list);
> > -		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
> > +		WARN_ON(refcount_read(&entry->e_refcnt) != 1);
> >  		mb_cache_entry_put(cache, entry);
> >  	}
> >  	kfree(cache->c_hash);
> > diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
> > index 20f1e3f..0bf2af6 100644
> > --- a/include/linux/mbcache.h
> > +++ b/include/linux/mbcache.h
> > @@ -5,7 +5,7 @@
> >  #include <linux/hash.h>
> >  #include <linux/list_bl.h>
> >  #include <linux/list.h>
> > -#include <linux/atomic.h>
> > +#include <linux/refcount.h>
> >  #include <linux/fs.h>
> >
> >  struct mb_cache;
> > @@ -15,7 +15,7 @@ struct mb_cache_entry {
> >  	struct list_head	e_list;
> >  	/* Hash table list - protected by hash chain bitlock */
> >  	struct hlist_bl_node	e_hash_list;
> > -	atomic_t		e_refcnt;
> > +	refcount_t		e_refcnt;
> >  	/* Key in hash - stable during lifetime of the entry */
> >  	u32			e_key;
> >  	u32			e_referenced:1;
> > @@ -33,7 +33,7 @@ void __mb_cache_entry_free(struct mb_cache_entry
> *entry);
> >  static inline int mb_cache_entry_put(struct mb_cache *cache,
> >  				     struct
> mb_cache_entry *entry)
> >  {
> > -	if (!atomic_dec_and_test(&entry->e_refcnt))
> > +	if (!refcount_dec_and_test(&entry->e_refcnt))
> >  		return 0;
> >  	__mb_cache_entry_free(entry);
> >  	return 1;
> > --
> > 2.7.4
> >
> --
> Jan Kara <jack@...e.com>
> SUSE Labs, CR

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ