[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1495658507-7413-2-git-send-email-Yazen.Ghannam@amd.com>
Date: Wed, 24 May 2017 15:41:46 -0500
From: Yazen Ghannam <Yazen.Ghannam@....com>
To: <linux-edac@...r.kernel.org>
CC: Borislav Petkov <bp@...e.de>, Tony Luck <tony.luck@...el.com>,
<x86@...nel.org>, <linux-kernel@...r.kernel.org>,
Yazen Ghannam <yazen.ghannam@....com>
Subject: [PATCH 2/3] x86/mce/AMD: Define a list_head for threshold blocks outside the list
From: Yazen Ghannam <yazen.ghannam@....com>
There needs to be a list_head outside of a linked list in order to iterate
over it and have access to all its elements. This is because the
list_for_each* macros iterate starting from head->next rather than head.
Define a list_head for the threshold blocks list in struct threshold_bank
since this is the container of the list. Use this list_head as the head
instead of the first element in the blocks list.
This is needed in a future patch where we read the blocks list in the
threshold interrupt handler. Currently, we'll always skip block 0 since it
is the head of the list.
Signed-off-by: Yazen Ghannam <yazen.ghannam@....com>
---
arch/x86/include/asm/amd_nb.h | 2 ++
arch/x86/kernel/cpu/mcheck/mce_amd.c | 19 ++++++++++---------
2 files changed, 12 insertions(+), 9 deletions(-)
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index da181ad..81334b4 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -55,6 +55,8 @@ struct threshold_bank {
struct kobject *kobj;
struct threshold_block *blocks;
+ struct list_head blocks_head;
+
/* initialized to the number of CPUs on the node sharing this bank */
refcount_t cpus;
};
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index d11f94e..2074b870 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -1119,15 +1119,15 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
threshold_ktype.default_attrs[2] = NULL;
}
- INIT_LIST_HEAD(&b->miscj);
-
- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
- list_add(&b->miscj,
- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
- } else {
+ if (!per_cpu(threshold_banks, cpu)[bank]->blocks) {
+ INIT_LIST_HEAD(&per_cpu(threshold_banks, cpu)[bank]->blocks_head);
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
}
+ INIT_LIST_HEAD(&b->miscj);
+
+ list_add(&b->miscj, &per_cpu(threshold_banks, cpu)[bank]->blocks_head);
+
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
per_cpu(threshold_banks, cpu)[bank]->kobj,
get_name(bank, b));
@@ -1158,7 +1158,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
static int __threshold_add_blocks(struct threshold_bank *b)
{
- struct list_head *head = &b->blocks->miscj;
+ struct list_head *head = &b->blocks_head;
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
int err = 0;
@@ -1256,7 +1256,7 @@ static void deallocate_threshold_block(unsigned int cpu,
if (!head)
return;
- list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+ list_for_each_entry_safe(pos, tmp, &head->blocks_head, miscj) {
kobject_put(&pos->kobj);
list_del(&pos->miscj);
kfree(pos);
@@ -1273,7 +1273,7 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
kobject_del(b->kobj);
- list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
+ list_for_each_entry_safe(pos, tmp, &b->blocks_head, miscj)
kobject_del(&pos->kobj);
}
@@ -1307,6 +1307,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
deallocate_threshold_block(cpu, bank);
free_out:
+ list_del(&b->blocks_head);
kobject_del(b->kobj);
kobject_put(b->kobj);
kfree(b);
--
2.7.4
Powered by blists - more mailing lists