[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20151104095557.3c3c8473@redhat.com>
Date: Wed, 4 Nov 2015 09:55:57 -0500
From: Luiz Capitulino <lcapitulino@...hat.com>
To: Fenghua Yu <fenghua.yu@...el.com>
Cc: "H Peter Anvin" <hpa@...or.com>, "Ingo Molnar" <mingo@...hat.com>,
"Thomas Gleixner" <tglx@...utronix.de>,
"Peter Zijlstra" <peterz@...radead.org>,
"linux-kernel" <linux-kernel@...r.kernel.org>,
"x86" <x86@...nel.org>,
"Vikas Shivappa" <vikas.shivappa@...ux.intel.com>
Subject: Re: [PATCH V15 05/11] x86/intel_rdt: Add Class of service
management
On Thu, 1 Oct 2015 23:09:39 -0700
Fenghua Yu <fenghua.yu@...el.com> wrote:
> Adds some data-structures and APIs to support Class of service
> management(closid). There is a new clos_cbm table which keeps a 1:1
> mapping between closid and capacity bit mask (cbm)
> and a count of usage of closid. Each task would be associated with a
> Closid at a time and this patch adds a new field closid to task_struct
> to keep track of the same.
>
> Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
> Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
> ---
> arch/x86/include/asm/intel_rdt.h | 12 ++++++
> arch/x86/kernel/cpu/intel_rdt.c | 82 +++++++++++++++++++++++++++++++++++++++-
> include/linux/sched.h | 3 ++
> 3 files changed, 95 insertions(+), 2 deletions(-)
> create mode 100644 arch/x86/include/asm/intel_rdt.h
>
> diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
> new file mode 100644
> index 0000000..88b7643
> --- /dev/null
> +++ b/arch/x86/include/asm/intel_rdt.h
> @@ -0,0 +1,12 @@
> +#ifndef _RDT_H_
> +#define _RDT_H_
> +
> +#ifdef CONFIG_INTEL_RDT
> +
> +struct clos_cbm_table {
> + unsigned long l3_cbm;
> + unsigned int clos_refcnt;
> +};
Isn't this a single entry?
> +
> +#endif
> +#endif
> diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
> index f49e970..d79213a 100644
> --- a/arch/x86/kernel/cpu/intel_rdt.c
> +++ b/arch/x86/kernel/cpu/intel_rdt.c
> @@ -24,17 +24,95 @@
>
> #include <linux/slab.h>
> #include <linux/err.h>
> +#include <asm/intel_rdt.h>
> +
> +/*
> + * cctable maintains 1:1 mapping between CLOSid and cache bitmask.
> + */
> +static struct clos_cbm_table *cctable;
> +/*
> + * closid availability bit map.
> + */
> +unsigned long *closmap;
> +static DEFINE_MUTEX(rdt_group_mutex);
> +
> +static inline void closid_get(u32 closid)
> +{
> + struct clos_cbm_table *cct = &cctable[closid];
> +
> + lockdep_assert_held(&rdt_group_mutex);
> +
> + cct->clos_refcnt++;
> +}
> +
> +static int closid_alloc(u32 *closid)
> +{
> + u32 maxid;
> + u32 id;
> +
> + lockdep_assert_held(&rdt_group_mutex);
> +
> + maxid = boot_cpu_data.x86_cache_max_closid;
> + id = find_first_zero_bit(closmap, maxid);
> + if (id == maxid)
> + return -ENOSPC;
This causes echo to print "No space left in device" when you run
out of CBMs. I think -ENOMEM makes more sense.
> +
> + set_bit(id, closmap);
> + closid_get(id);
> + *closid = id;
> +
> + return 0;
> +}
> +
> +static inline void closid_free(u32 closid)
> +{
> + clear_bit(closid, closmap);
> + cctable[closid].l3_cbm = 0;
> +}
> +
> +static void closid_put(u32 closid)
> +{
> + struct clos_cbm_table *cct = &cctable[closid];
> +
> + lockdep_assert_held(&rdt_group_mutex);
> + if (WARN_ON(!cct->clos_refcnt))
> + return;
> +
> + if (!--cct->clos_refcnt)
> + closid_free(closid);
> +}
This is very minor, but IMHO you got closid_put() and closid_free()
naming backwards. closeid_free() is the opposite operation of
closeid_alloc(). Likewise, closeid_put() is the opposite of
closide_get().
>
> static int __init intel_rdt_late_init(void)
> {
> struct cpuinfo_x86 *c = &boot_cpu_data;
> + u32 maxid, max_cbm_len;
> + int err = 0, size;
>
> if (!cpu_has(c, X86_FEATURE_CAT_L3))
> return -ENODEV;
>
> - pr_info("Intel cache allocation detected\n");
> + maxid = c->x86_cache_max_closid;
> + max_cbm_len = c->x86_cache_max_cbm_len;
>
> - return 0;
> + size = maxid * sizeof(struct clos_cbm_table);
> + cctable = kzalloc(size, GFP_KERNEL);
> + if (!cctable) {
> + err = -ENOMEM;
> + goto out_err;
> + }
> +
> + size = BITS_TO_LONGS(maxid) * sizeof(long);
> + closmap = kzalloc(size, GFP_KERNEL);
> + if (!closmap) {
> + kfree(cctable);
> + err = -ENOMEM;
> + goto out_err;
> + }
> +
> + pr_info("Intel cache allocation enabled\n");
> +out_err:
> +
> + return err;
> }
>
> late_initcall(intel_rdt_late_init);
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index b7b9501..24bfbac 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1668,6 +1668,9 @@ struct task_struct {
> /* cg_list protected by css_set_lock and tsk->alloc_lock */
> struct list_head cg_list;
> #endif
> +#ifdef CONFIG_INTEL_RDT
> + u32 closid;
> +#endif
> #ifdef CONFIG_FUTEX
> struct robust_list_head __user *robust_list;
> #ifdef CONFIG_COMPAT
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists