[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YFqxm7UQBtWqH6VU@google.com>
Date: Tue, 23 Mar 2021 20:27:23 -0700
From: Minchan Kim <minchan@...nel.org>
To: John Hubbard <jhubbard@...dia.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
linux-mm <linux-mm@...ck.org>,
LKML <linux-kernel@...r.kernel.org>, gregkh@...uxfoundation.org,
surenb@...gle.com, joaodias@...gle.com, willy@...radead.org,
digetx@...il.com
Subject: Re: [PATCH v6] mm: cma: support sysfs
On Tue, Mar 23, 2021 at 07:34:12PM -0700, John Hubbard wrote:
> On 3/23/21 6:05 PM, Minchan Kim wrote:
> ...> diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c
> > new file mode 100644
> > index 000000000000..c3791a032dc5
> > --- /dev/null
> > +++ b/mm/cma_sysfs.c
> > @@ -0,0 +1,107 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * CMA SysFS Interface
> > + *
> > + * Copyright (c) 2021 Minchan Kim <minchan@...nel.org>
> > + */
> > +
> > +#include <linux/cma.h>
> > +#include <linux/kernel.h>
> > +#include <linux/slab.h>
> > +
> > +#include "cma.h"
> > +
> > +void cma_sysfs_account_success_pages(struct cma *cma, size_t count)
> > +{
> > + atomic64_add(count, &cma->nr_pages_succeeded);
> > +}
> > +
> > +void cma_sysfs_account_fail_pages(struct cma *cma, size_t count)
> > +{
> > + atomic64_add(count, &cma->nr_pages_failed);
> > +}
> > +
> > +#define CMA_ATTR_RO(_name) \
> > + static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
> > +
> > +#define to_cma_kobject(x) container_of(x, struct cma_kobject, kobj)
>
> I really don't think that helps. container_of() is so widely used and
> understood that it is not a good move make people read one more wrapper
> for it. Instead, see below...
>
> > +
> > +static ssize_t alloc_pages_success_show(struct kobject *kobj,
> > + struct kobj_attribute *attr, char *buf)
> > +{
> > + struct cma_kobject *cma_kobj = to_cma_kobject(kobj);
> > + struct cma *cma = cma_kobj->cma;
>
> ...if you're looking to get rid of the real code duplication, then you
> could put *both* of those lines into a wrapper function, instead, like this:
>
> static inline struct cma* cma_from_kobj(struct kobject *kobj)
> {
> struct cma_kobject *cma_kobj = container_of(kobj, struct cma_kobject,
> kobj);
> struct cma *cma = cma_kobj->cma;
>
> return cma;
> }
>
> static ssize_t alloc_pages_success_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> struct cma *cma = cma_from_kobj(kobj);
>
> return sysfs_emit(buf, "%llu\n",
> atomic64_read(&cma->nr_pages_succeeded));
> }
> CMA_ATTR_RO(alloc_pages_success);
>
> static ssize_t alloc_pages_fail_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> struct cma *cma = cma_from_kobj(kobj);
>
> return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_failed));
> }
> CMA_ATTR_RO(alloc_pages_fail);
>
> static void cma_kobj_release(struct kobject *kobj)
> {
> struct cma_kobject *cma_kobj = container_of(kobj, struct cma_kobject,
> kobj);
> struct cma *cma = cma_kobj->cma;
>
> kfree(cma_kobj);
> cma->kobj = NULL;
> }
>
> ...isn't that nicer? Saves a little code, gets rid of a macro.
Yub.
>
> > +
> > + return sysfs_emit(buf, "%llu\n",
> > + atomic64_read(&cma->nr_pages_succeeded));
> > +}
> > +CMA_ATTR_RO(alloc_pages_success);
> > +
> > +static ssize_t alloc_pages_fail_show(struct kobject *kobj,
> > + struct kobj_attribute *attr, char *buf)
> > +{
> > + struct cma_kobject *cma_kobj = to_cma_kobject(kobj);
> > + struct cma *cma = cma_kobj->cma;
> > +
> > + return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_failed));
> > +}
> > +CMA_ATTR_RO(alloc_pages_fail);
> > +
> > +static void cma_kobj_release(struct kobject *kobj)
> > +{
> > + struct cma_kobject *cma_kobj = to_cma_kobject(kobj);
> > + struct cma *cma = cma_kobj->cma;
> > +
> > + kfree(cma_kobj);
> > + cma->kobj = NULL;
> > +}
> > +
> > +static struct attribute *cma_attrs[] = {
> > + &alloc_pages_success_attr.attr,
> > + &alloc_pages_fail_attr.attr,
> > + NULL,
> > +};
> > +ATTRIBUTE_GROUPS(cma);
> > +
> > +static struct kobject *cma_kobj_root;
> > +
> > +static struct kobj_type cma_ktype = {
> > + .release = cma_kobj_release,
> > + .sysfs_ops = &kobj_sysfs_ops,
> > + .default_groups = cma_groups
> > +};
> > +
> > +static int __init cma_sysfs_init(void)
> > +{
> > + unsigned int i;
> > +
> > + cma_kobj_root = kobject_create_and_add("cma", mm_kobj);
> > + if (!cma_kobj_root)
> > + return -ENOMEM;
> > +
> > + for (i = 0; i < cma_area_count; i++) {
> > + int err;
> > + struct cma *cma;
> > + struct cma_kobject *cma_kobj;
> > +
> > + cma_kobj = kzalloc(sizeof(*cma_kobj), GFP_KERNEL);
> > + if (!cma_kobj) {
> > + kobject_put(cma_kobj_root);
> > + return -ENOMEM;
>
> This leaks little cma_kobj's all over the floor. :)
I thought kobject_put(cma_kobj_root) should deal with it. No?
>
> What you might want here is a separate routine to clean up, because
> it has to loop through and free whatever was allocated on previous
> iterations of this loop here.
>
> > + }
> > +
> > + cma = &cma_areas[i];
> > + cma->kobj = cma_kobj;
> > + cma_kobj->cma = cma;
> > + err = kobject_init_and_add(&cma_kobj->kobj, &cma_ktype,
> > + cma_kobj_root, "%s", cma->name);
> > + if (err) {
> > + kobject_put(&cma_kobj->kobj);
> > + kobject_put(cma_kobj_root);
> > + return err;
>
> Hopefully this little bit of logic could also go into the cleanup
> routine.
>
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +subsys_initcall(cma_sysfs_init);
> >
>
> thanks,
> --
> John Hubbard
> NVIDIA
Powered by blists - more mailing lists