lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <45EE9921DD12714AB1D7B7B9D2FE208B0230256D48BF@dbde03.ent.ti.com>
Date:	Wed, 27 Jul 2011 23:35:19 +0530
From:	"N, Mugunthan V" <mugunthanvnm@...com>
To:	Thomas Gleixner <tglx@...utronix.de>,
	LKML <linux-kernel@...r.kernel.org>
CC:	linux-rt-users <linux-rt-users@...r.kernel.org>
Subject: RE: [ANNOUNCE] 3.0-rt3

Hi RT Folks

I am testing 3.0-rt3 on AM3517 - Arm Cortex A8. As the kernel from the
mainline is not booting I am using a patch to boot AM3517 kernel on top of
the mainline kernel. Its crashing when I start hackbench and cyclictest.

There were two warnings while starting the test and then the kernel crashes.
Attaching the patch and the rt-config file.

[  266.001220] BUG: sleeping function called from invalid context at
kernel/rtmutex.c:645
[  266.001251] in_atomic(): 1, irqs_disabled(): 0, pid: 3, name: ksoftirqd/0
[  266.001281] 1 lock held by ksoftirqd/0/3:
[  266.001281]  #0:  (cpu_asid_lock){+.+...}, at: [<c006acfc>]
__new_context+0x14/0xbc
[  266.001403] [<c0065f40>] (unwind_backtrace+0x0/0xf8) from [<c0466c80>]
(rt_spin_lock+0x24/0x5c)
[  266.001434] [<c0466c80>] (rt_spin_lock+0x24/0x5c) from [<c006aca8>]
(set_mm_context+0x1c/0x5c)
[  266.001464] [<c006aca8>] (set_mm_context+0x1c/0x5c) from [<c006ad3c>]
(__new_context+0x54/0xbc)
[  266.001495] [<c006ad3c>] (__new_context+0x54/0xbc) from [<c0464f14>]
(__schedule+0x738/0x818)
[  266.001556] [<c0464f14>] (__schedule+0x738/0x818) from [<c0095408>]
(schedule+0x34/0xac)
[  266.001586] [<c0095408>] (schedule+0x34/0xac) from [<c046569c>]
(schedule_preempt_disabled+0x14/0x20)
[  266.001647] [<c046569c>] (schedule_preempt_disabled+0x14/0x20) from
[<c00a2a74>] (run_ksoftirqd+0x174/0x194)
[  266.001678] [<c00a2a74>] (run_ksoftirqd+0x174/0x194) from [<c00bc81c>]
(kthread+0x84/0x8c)
[  266.001739] [<c00bc81c>] (kthread+0x84/0x8c) from [<c005fd2c>]
(kernel_thread_exit+0x0/0x8)

[  267.494995] ------------[ cut here ]------------
[  267.495086] WARNING: at kernel/lockdep.c:939 __bfs+0x1f8/0x254()
[  267.495086] Modules linked in:
[  267.495147] [<c0065f40>] (unwind_backtrace+0x0/0xf8) from [<c009b774>]
(warn_slowpath_common+0x4c/0x64)
[  267.495178] [<c009b774>] (warn_slowpath_common+0x4c/0x64) from
[<c009b7a8>] (warn_slowpath_null+0x1c/0x24)
[  267.495208] [<c009b7a8>] (warn_slowpath_null+0x1c/0x24) from
[<c00d0014>] (__bfs+0x1f8/0x254)
[  267.495239] [<c00d0014>] (__bfs+0x1f8/0x254) from [<c00d38bc>]
(check_usage_backwards+0x74/0xfc)
[  267.495269] [<c00d38bc>] (check_usage_backwards+0x74/0xfc) from
[<c00d2990>] (mark_lock+0x20c/0x660)
[  267.495300] [<c00d2990>] (mark_lock+0x20c/0x660) from [<c00d495c>]
(__lock_acquire+0x74c/0x1c84)
[  267.495330] [<c00d495c>] (__lock_acquire+0x74c/0x1c84) from [<c00d64c0>]
(lock_acquire+0x104/0x124)
[  267.495361] [<c00d64c0>] (lock_acquire+0x104/0x124) from [<c046753c>]
(_raw_spin_lock+0x3c/0x4c)
[  267.495422] [<c046753c>] (_raw_spin_lock+0x3c/0x4c) from [<c04665a4>]
(rt_mutex_slowlock+0x28/0x178)
[  267.495452] [<c04665a4>] (rt_mutex_slowlock+0x28/0x178) from
[<c00eb734>] (rcu_boost_kthread+0xd4/0x168)
[  267.495513] [<c00eb734>] (rcu_boost_kthread+0xd4/0x168) from
[<c00bc81c>] (kthread+0x84/0x8c)
[  267.495544] [<c00bc81c>] (kthread+0x84/0x8c) from [<c005fd2c>]
(kernel_thread_exit+0x0/0x8)
[  267.495574] ---[ end trace 000000000000000d ]---

[  267.495605] Unable to handle kernel NULL pointer dereference at virtual
address 00000008
[  267.495605] pgd = c0004000
[  267.495635] [00000008] *pgd=00000000
[  267.495635] Internal error: Oops: 17 [#1] PREEMPT SMP
[  267.495666] Modules linked in:
[  267.495697] CPU: 0    Tainted: G        W
(3.0.0-rt3-00001-gcd4001e-dirty #6)
[  267.495697] PC is at __bfs+0x118/0x254
[  267.495727] LR is at warn_slowpath_null+0x1c/0x24
[  267.495758] pc : [<c00cff34>]    lr : [<c009b7a8>]    psr: 60000093
[  267.495758] sp : cf84bce8  ip : 00000000  fp : cf84bcfc
[  267.495788] r10: 00004954  r9 : 00004958  r8 : c056c688
[  267.495788] r7 : c8df6be0  r6 : c0948b20  r5 : c06f3258  r4 : 00000000
[  267.495819] r3 : c094cb10  r2 : 00000200  r1 : c068ee10  r0 : 00000000
[  267.495819] Flags: nZCv  IRQs off  FIQs on  Mode SVC_32  ISA ARM
Segment kernel
[  267.495849] Control: 10c5387d  Table: 8df68019  DAC: 00000017
[  267.495849] Process rcub0 (pid: 10, stack limit = 0xcf84a2f8)
[  267.495880] Stack: (0xcf84bce8 to 0xcf84c000)
[  267.495910] bce0:                   cf84bff8 cf84bd54 c00cfd14 00000000
00000000 cf84bd30
[  267.495941] bd00: cf84bd34 00000000 cf84bd30 cf8499d8 cf849520 c056c688
c00d3848 cf84a000
[  267.495941] bd20: c09dc8d0 c00d38bc 00000000 c00626a8 00000000 00000000
c06f3160 c005fd2c
[  267.495971] bd40: c005fd2c c06f31a0 00000000 00000000 00000000 c06f31a0
00000004 00000000
[  267.496002] bd60: 00000002 cf8499d8 cf849520 c00d2990 00000008 cf849520
00000000 cf849520
[  267.496032] bd80: 00000000 cf8499d8 cf84bf38 3c3be3df 00000000 c00d495c
00000093 00000000
[  267.496063] bda0: cf84a000 00000000 00000000 00000000 cf84a000 cf84a000
00000000 3c3be3de
[  267.496063] bdc0: 60000013 c0664ffc 00000000 cf849520 00000001 00000000
00000000 c046a3b0
[  267.496093] bde0: 00000000 c008b7a4 00000000 cf849520 00000010 cf849520
00000000 cf8499d8
[  267.496124] be00: c0666814 0000002a 00000000 c00d4670 cf849520 c0090bb8
00000001 00000000
[  267.496154] be20: cf84a000 00000000 00000000 00000000 60000013 cf84bf38
00000002 c00d64c0
[  267.496185] be40: 00000002 00000000 00000000 c04665a4 00000000 00000001
ffffffff 00000000
[  267.496215] be60: cf84bf24 c04665a4 00000001 cf84bf24 00000000 a0000013
00000002 c046753c
[  267.496215] be80: 00000002 00000000 c04665a4 cf84a000 ce25cae0 c0666800
cf84a000 c04665a4
[  267.496246] bea0: 00000000 00000000 60000093 00000000 00000002 c00d64c0
00000002 00000080
[  267.496276] bec0: 00000000 c00eb778 00000000 c0052520 c0052520 00000000
c0666800 c00eb778
[  267.496307] bee0: a0000013 00000000 cf849520 00000000 cf84a000 00000000
cf84bf0c ce25cae0
[  267.496337] bf00: c0666800 cf84a000 00000001 00000000 00000000 a0000013
cf84bf24 c00eb734
[  267.496337] bf20: c0052520 cf84995c cf849520 c04677a8 c0df0520 c00eb660
00000000 c00d3160
[  267.496368] bf40: cf84a000 00000001 cf84bf48 cf84bf48 ce25cae0 c0666800
c00eb660 cf84bf8c
[  267.496398] bf60: cf82fec8 c0666800 c00eb660 00000000 00000000 00000000
00000000 c00bc81c
[  267.496429] bf80: cf84a000 00000000 c0666800 00000000 00000000 c005ed18
dead4ead ffffffff
[  267.496459] bfa0: ffffffff c06a1120 00000000 00000000 c0569f74 cf84bfb4
cf84bfb4 00000000
[  267.496490] bfc0: cf82fec8 c00bc798 c06a1104 00000000 00000000 c0575564
cf84bfd8 cf84bfd8
[  267.496490] bfe0: 00000000 cf82fec8 c00bc798 c005fd2c 00000013 c005fd2c
fc9cff55 b687ef0f
[  267.496551] [<c00cff34>] (__bfs+0x118/0x254) from [<c00d38bc>]
(check_usage_backwards+0x74/0xfc)
[  267.496582] [<c00d38bc>] (check_usage_backwards+0x74/0xfc) from
[<c00d2990>] (mark_lock+0x20c/0x660)
[  267.496582] [<c00d2990>] (mark_lock+0x20c/0x660) from [<c00d495c>]
(__lock_acquire+0x74c/0x1c84)
[  267.496612] [<c00d495c>] (__lock_acquire+0x74c/0x1c84) from [<c00d64c0>]
(lock_acquire+0x104/0x124)
[  267.496643] [<c00d64c0>] (lock_acquire+0x104/0x124) from [<c046753c>]
(_raw_spin_lock+0x3c/0x4c)
[  267.496673] [<c046753c>] (_raw_spin_lock+0x3c/0x4c) from [<c04665a4>]
(rt_mutex_slowlock+0x28/0x178)
[  267.496734] [<c04665a4>] (rt_mutex_slowlock+0x28/0x178) from
[<c00eb734>] (rcu_boost_kthread+0xd4/0x168)
[  267.496765] [<c00eb734>] (rcu_boost_kthread+0xd4/0x168) from
[<c00bc81c>] (kthread+0x84/0x8c)
[  267.496795] [<c00bc81c>] (kthread+0x84/0x8c) from [<c005fd2c>]
(kernel_thread_exit+0x0/0x8)
[  267.496826] Code: e0837182 e1570001 2a000035 e59f3114 (e5941008)

---
Regards,
Mugunthan V N.

-----Original Message-----
From: linux-rt-users-owner@...r.kernel.org [mailto:linux-rt-users-owner@...r.kernel.org] On Behalf Of Thomas Gleixner
Sent: Sunday, July 24, 2011 4:04 PM
To: LKML
Cc: linux-rt-users
Subject: [ANNOUNCE] 3.0-rt3

Dear RT Folks,

I'm pleased to announce the 3.0-rt3 release.

Changes versus 3.0-rt2:

  * Fix earlyprintk really

  * AMD NB fix for allocation in smp function call, reported by Ed

  * Disabled a few config options on RT which have known issues

Patch against 3.0 can be found here:

  http://www.kernel.org/pub/linux/kernel/projects/rt/patch-3.0-rt3.patch.bz2

The split quilt queue is available at:

  http://www.kernel.org/pub/linux/kernel/projects/rt/patches-3.0-rt3.tar.bz2

Delta patch below.

Thanks,

        tglx
----
 arch/x86/include/asm/amd_nb.h         |    6 +
 arch/x86/kernel/cpu/intel_cacheinfo.c |  121 +++++++++++-----------------------
 arch/x86/kernel/early_printk.c        |    2
 drivers/net/Kconfig                   |    1
 localversion-rt                       |    2
 mm/Kconfig                            |    2
 6 files changed, 51 insertions(+), 83 deletions(-)

Index: linux-2.6/arch/x86/kernel/early_printk.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/early_printk.c
+++ linux-2.6/arch/x86/kernel/early_printk.c
@@ -171,7 +171,7 @@ static struct console early_serial_conso

 static inline void early_console_register(struct console *con, int keep_early)
 {
-       if (early_console->index != -1) {
+       if (con->index != -1) {
                printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
                       con->name);
                return;
Index: linux-2.6/localversion-rt
===================================================================
--- linux-2.6.orig/localversion-rt
+++ linux-2.6/localversion-rt
@@ -1 +1 @@
--rt2
+-rt3
Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -151,28 +151,17 @@ union _cpuid4_leaf_ecx {
        u32 full;
 };

-struct amd_l3_cache {
-       struct   amd_northbridge *nb;
-       unsigned indices;
-       u8       subcaches[4];
-};
-
-struct _cpuid4_info {
+struct _cpuid4_info_regs {
        union _cpuid4_leaf_eax eax;
        union _cpuid4_leaf_ebx ebx;
        union _cpuid4_leaf_ecx ecx;
        unsigned long size;
-       struct amd_l3_cache *l3;
-       DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+       struct amd_northbridge *nb;
 };

-/* subset of above _cpuid4_info w/o shared_cpu_map */
-struct _cpuid4_info_regs {
-       union _cpuid4_leaf_eax eax;
-       union _cpuid4_leaf_ebx ebx;
-       union _cpuid4_leaf_ecx ecx;
-       unsigned long size;
-       struct amd_l3_cache *l3;
+struct _cpuid4_info {
+       struct _cpuid4_info_regs base;
+       DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
 };

 unsigned short                 num_cache_leaves;
@@ -314,12 +303,13 @@ struct _cache_attr {
 /*
  * L3 cache descriptors
  */
-static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
+static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
 {
+       struct amd_l3_cache *l3 = &nb->l3_cache;
        unsigned int sc0, sc1, sc2, sc3;
        u32 val = 0;

-       pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
+       pci_read_config_dword(nb->misc, 0x1C4, &val);

        /* calculate subcache sizes */
        l3->subcaches[0] = sc0 = !(val & BIT(0));
@@ -333,33 +323,16 @@ static void __cpuinit amd_calc_l3_indice
 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
                                        int index)
 {
-       static struct amd_l3_cache *__cpuinitdata l3_caches;
        int node;

        /* only for L3, and not in virtualized environments */
-       if (index < 3 || amd_nb_num() == 0)
+       if (index < 3)
                return;

-       /*
-        * Strictly speaking, the amount in @size below is leaked since it is
-        * never freed but this is done only on shutdown so it doesn't matter.
-        */
-       if (!l3_caches) {
-               int size = amd_nb_num() * sizeof(struct amd_l3_cache);
-
-               l3_caches = kzalloc(size, GFP_ATOMIC);
-               if (!l3_caches)
-                       return;
-       }
-
        node = amd_get_nb_id(smp_processor_id());
-
-       if (!l3_caches[node].nb) {
-               l3_caches[node].nb = node_to_amd_nb(node);
-               amd_calc_l3_indices(&l3_caches[node]);
-       }
-
-       this_leaf->l3 = &l3_caches[node];
+       this_leaf->nb = node_to_amd_nb(node);
+       if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
+               amd_calc_l3_indices(this_leaf->nb);
 }

 /*
@@ -369,11 +342,11 @@ static void __cpuinit amd_init_l3_cache(
  *
  * @returns: the disabled index if used or negative value if slot free.
  */
-int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
+int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
 {
        unsigned int reg = 0;

-       pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, &reg);
+       pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);

        /* check whether this slot is activated already */
        if (reg & (3UL << 30))
@@ -387,11 +360,10 @@ static ssize_t show_cache_disable(struct
 {
        int index;

-       if (!this_leaf->l3 ||
-           !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+       if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
                return -EINVAL;

-       index = amd_get_l3_disable_slot(this_leaf->l3, slot);
+       index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
        if (index >= 0)
                return sprintf(buf, "%d\n", index);

@@ -408,7 +380,7 @@ show_cache_disable_##slot(struct _cpuid4
 SHOW_CACHE_DISABLE(0)
 SHOW_CACHE_DISABLE(1)

-static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
+static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
                                 unsigned slot, unsigned long idx)
 {
        int i;
@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct
        for (i = 0; i < 4; i++) {
                u32 reg = idx | (i << 20);

-               if (!l3->subcaches[i])
+               if (!nb->l3_cache.subcaches[i])
                        continue;

-               pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
+               pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);

                /*
                 * We need to WBINVD on a core on the node containing the L3
@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct
                wbinvd_on_cpu(cpu);

                reg |= BIT(31);
-               pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
+               pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
        }
 }

@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct
  *
  * @return: 0 on success, error status on failure
  */
-int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
+int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
                            unsigned long index)
 {
        int ret = 0;

        /*  check if @slot is already used or the index is already disabled */
-       ret = amd_get_l3_disable_slot(l3, slot);
+       ret = amd_get_l3_disable_slot(nb, slot);
        if (ret >= 0)
                return -EINVAL;

-       if (index > l3->indices)
+       if (index > nb->l3_cache.indices)
                return -EINVAL;

        /* check whether the other slot has disabled the same index already */
-       if (index == amd_get_l3_disable_slot(l3, !slot))
+       if (index == amd_get_l3_disable_slot(nb, !slot))
                return -EINVAL;

-       amd_l3_disable_index(l3, cpu, slot, index);
+       amd_l3_disable_index(nb, cpu, slot, index);

        return 0;
 }
@@ -480,8 +452,7 @@ static ssize_t store_cache_disable(struc
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;

-       if (!this_leaf->l3 ||
-           !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+       if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
                return -EINVAL;

        cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
@@ -489,7 +460,7 @@ static ssize_t store_cache_disable(struc
        if (strict_strtoul(buf, 10, &val) < 0)
                return -EINVAL;

-       err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
+       err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
        if (err) {
                if (err == -EEXIST)
                        printk(KERN_WARNING "L3 disable slot %d in use!\n",
@@ -518,7 +489,7 @@ static struct _cache_attr cache_disable_
 static ssize_t
 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
 {
-       if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+       if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
                return -EINVAL;

        return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
@@ -533,7 +504,7 @@ store_subcaches(struct _cpuid4_info *thi
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;

-       if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+       if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
                return -EINVAL;

        if (strict_strtoul(buf, 16, &val) < 0)
@@ -769,7 +740,7 @@ static void __cpuinit cache_shared_cpu_m
                return;
        }
        this_leaf = CPUID4_INFO_IDX(cpu, index);
-       num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
+       num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;

        if (num_threads_sharing == 1)
                cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
@@ -820,29 +791,19 @@ static void __cpuinit free_cache_attribu
        for (i = 0; i < num_cache_leaves; i++)
                cache_remove_shared_cpu_map(cpu, i);

-       kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
        kfree(per_cpu(ici_cpuid4_info, cpu));
        per_cpu(ici_cpuid4_info, cpu) = NULL;
 }

-static int
-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
-{
-       struct _cpuid4_info_regs *leaf_regs =
-               (struct _cpuid4_info_regs *)this_leaf;
-
-       return cpuid4_cache_lookup_regs(index, leaf_regs);
-}
-
 static void __cpuinit get_cpu_leaves(void *_retval)
 {
        int j, *retval = _retval, cpu = smp_processor_id();

        /* Do cpuid and store the results */
        for (j = 0; j < num_cache_leaves; j++) {
-               struct _cpuid4_info *this_leaf;
-               this_leaf = CPUID4_INFO_IDX(cpu, j);
-               *retval = cpuid4_cache_lookup(j, this_leaf);
+               struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
+
+               *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
                if (unlikely(*retval < 0)) {
                        int i;

@@ -900,16 +861,16 @@ static ssize_t show_##file_name(struct _
        return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
 }

-show_one_plus(level, eax.split.level, 0);
-show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
-show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
-show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
-show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
+show_one_plus(level, base.eax.split.level, 0);
+show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
+show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
+show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
+show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);

 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
                         unsigned int cpu)
 {
-       return sprintf(buf, "%luK\n", this_leaf->size / 1024);
+       return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
 }

 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
@@ -946,7 +907,7 @@ static inline ssize_t show_shared_cpu_li
 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
                         unsigned int cpu)
 {
-       switch (this_leaf->eax.split.type) {
+       switch (this_leaf->base.eax.split.type) {
        case CACHE_TYPE_DATA:
                return sprintf(buf, "Data\n");
        case CACHE_TYPE_INST:
@@ -1135,7 +1096,7 @@ static int __cpuinit cache_add_dev(struc

                ktype_cache.default_attrs = default_attrs;
 #ifdef CONFIG_AMD_NB
-               if (this_leaf->l3)
+               if (this_leaf->base.nb)
                        ktype_cache.default_attrs = amd_l3_attrs();
 #endif
                retval = kobject_init_and_add(&(this_object->kobj),
Index: linux-2.6/arch/x86/include/asm/amd_nb.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/amd_nb.h
+++ linux-2.6/arch/x86/include/asm/amd_nb.h
@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
 extern int amd_get_subcaches(int);
 extern int amd_set_subcaches(int, int);

+struct amd_l3_cache {
+       unsigned indices;
+       u8       subcaches[4];
+};
+
 struct amd_northbridge {
        struct pci_dev *misc;
        struct pci_dev *link;
+       struct amd_l3_cache l3_cache;
 };

 struct amd_northbridge_info {
Index: linux-2.6/drivers/net/Kconfig
===================================================================
--- linux-2.6.orig/drivers/net/Kconfig
+++ linux-2.6/drivers/net/Kconfig
@@ -3410,6 +3410,7 @@ config NET_FC

 config NETCONSOLE
        tristate "Network console logging support"
+       depends on !PREEMPT_RT_FULL
        ---help---
        If you want to log kernel messages over the network, enable this.
        See <file:Documentation/networking/netconsole.txt> for details.
Index: linux-2.6/mm/Kconfig
===================================================================
--- linux-2.6.orig/mm/Kconfig
+++ linux-2.6/mm/Kconfig
@@ -304,7 +304,7 @@ config NOMMU_INITIAL_TRIM_EXCESS

 config TRANSPARENT_HUGEPAGE
        bool "Transparent Hugepage Support"
-       depends on X86 && MMU
+       depends on X86 && MMU && !PREEMPT_RT_FULL
        select COMPACTION
        help
          Transparent Hugepages allows the kernel to use huge pages and
--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Download attachment "am3517evm.patch" of type "application/octet-stream" (4456 bytes)

Download attachment "am3517-rt.config" of type "application/octet-stream" (62917 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ