lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 22 Mar 2021 10:04:47 +0300
From:   Dan Carpenter <dan.carpenter@...cle.com>
To:     Namjae Jeon <namjae.jeon@...sung.com>
Cc:     linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
        linux-cifs@...r.kernel.org,
        linux-cifsd-devel@...ts.sourceforge.net, smfrench@...il.com,
        senozhatsky@...omium.org, hyc.lee@...il.com,
        viro@...iv.linux.org.uk, hch@....de, hch@...radead.org,
        ronniesahlberg@...il.com, aurelien.aptel@...il.com,
        aaptel@...e.com, sandeen@...deen.net, colin.king@...onical.com,
        rdunlap@...radead.org,
        Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
        Steve French <stfrench@...rosoft.com>
Subject: Re: [PATCH 3/5] cifsd: add file operations

On Mon, Mar 22, 2021 at 02:13:42PM +0900, Namjae Jeon wrote:
> +void *ksmbd_alloc(size_t size)
> +{
> +	return kvmalloc(size, GFP_KERNEL | __GFP_ZERO);


This patch adds a bunch of wrappers around kvmalloc().  Don't do that.
Just use kvmalloc() directly instead.  They just make the code hard to
read.  kvmalloc() is not appropriate for small allocations.  This
wrapper hides a GFP_KERNEL which may lead to scheduling in atomic bugs
and the secret ZEROing makes the code hard to read.

> +}
> +
> +void ksmbd_free(void *ptr)
> +{
> +	kvfree(ptr);
> +}
> +
> +static struct wm *wm_alloc(size_t sz, gfp_t flags)
> +{
> +	struct wm *wm;
> +	size_t alloc_sz = sz + sizeof(struct wm);
                          ^^^^^^^^^^^^^^^^^^^^^^

Check for integer overflow.

> +
> +	wm = kvmalloc(alloc_sz, flags);
> +	if (!wm)
> +		return NULL;
> +	wm->sz = sz;
> +	return wm;
> +}
> +
> +static int register_wm_size_class(size_t sz)
> +{
> +	struct wm_list *l, *nl;
> +
> +	nl = kvmalloc(sizeof(struct wm_list), GFP_KERNEL);

Just use kmalloc() for small allocations.

> +	if (!nl)
> +		return -ENOMEM;
> +
> +	nl->sz = sz;
> +	spin_lock_init(&nl->wm_lock);
> +	INIT_LIST_HEAD(&nl->idle_wm);
> +	INIT_LIST_HEAD(&nl->list);
> +	init_waitqueue_head(&nl->wm_wait);
> +	nl->avail_wm = 0;
> +
> +	write_lock(&wm_lists_lock);
> +	list_for_each_entry(l, &wm_lists, list) {
> +		if (l->sz == sz) {
> +			write_unlock(&wm_lists_lock);
> +			kvfree(nl);
> +			return 0;
> +		}
> +	}
> +
> +	list_add(&nl->list, &wm_lists);
> +	write_unlock(&wm_lists_lock);
> +	return 0;
> +}
> +
> +static struct wm_list *match_wm_list(size_t size)
> +{
> +	struct wm_list *l, *rl = NULL;
> +
> +	read_lock(&wm_lists_lock);
> +	list_for_each_entry(l, &wm_lists, list) {
> +		if (l->sz == size) {
> +			rl = l;
> +			break;
> +		}
> +	}
> +	read_unlock(&wm_lists_lock);
> +	return rl;
> +}
> +
> +static struct wm *find_wm(size_t size)
> +{
> +	struct wm_list *wm_list;
> +	struct wm *wm;
> +
> +	wm_list = match_wm_list(size);
> +	if (!wm_list) {
> +		if (register_wm_size_class(size))
> +			return NULL;
> +		wm_list = match_wm_list(size);
> +	}
> +
> +	if (!wm_list)
> +		return NULL;
> +
> +	while (1) {
> +		spin_lock(&wm_list->wm_lock);
> +		if (!list_empty(&wm_list->idle_wm)) {
> +			wm = list_entry(wm_list->idle_wm.next,
> +					struct wm,
> +					list);
> +			list_del(&wm->list);
> +			spin_unlock(&wm_list->wm_lock);
> +			return wm;
> +		}
> +
> +		if (wm_list->avail_wm > num_online_cpus()) {
> +			spin_unlock(&wm_list->wm_lock);
> +			wait_event(wm_list->wm_wait,
> +				   !list_empty(&wm_list->idle_wm));
> +			continue;
> +		}
> +
> +		wm_list->avail_wm++;

I don't think we should increment this until after the allocation
succeeds?

> +		spin_unlock(&wm_list->wm_lock);
> +
> +		wm = wm_alloc(size, GFP_KERNEL);
> +		if (!wm) {
> +			spin_lock(&wm_list->wm_lock);
> +			wm_list->avail_wm--;
> +			spin_unlock(&wm_list->wm_lock);
> +			wait_event(wm_list->wm_wait,
> +				   !list_empty(&wm_list->idle_wm));
> +			continue;
> +		}
> +		break;
> +	}
> +
> +	return wm;
> +}

regards,
dan carpenter

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ