[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <54FDDC83.3010202@oracle.com>
Date: Mon, 09 Mar 2015 13:46:43 -0400
From: Sasha Levin <sasha.levin@...cle.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>
CC: Dave Jones <davej@...emonkey.org.uk>,
LKML <linux-kernel@...r.kernel.org>, nicolas.pitre@...aro.org
Subject: sched: divide error in sg_capacity_factor
Hi all,
While fuzzing with trinity inside the latest -next kernel using trinity I've stumbled on:
[ 936.784266] divide error: 0000 [#1] PREEMPT SMP KASAN
[ 936.789198] Dumping ftrace buffer:
[ 936.793957] (ftrace buffer empty)
[ 936.793957] Modules linked in:
[ 936.793957] CPU: 52 PID: 22110 Comm: trinity-c52 Tainted: G W 4.0.0-rc1-sasha-00044-ge21109a #2039
[ 936.793957] task: ffff8807ff293000 ti: ffff880f81fe8000 task.ti: ffff880f81fe8000
[ 936.793957] RIP: find_busiest_group (kernel/sched/fair.c:6152 kernel/sched/fair.c:6223 kernel/sched/fair.c:6341 kernel/sched/fair.c:6603)
[ 936.829403] RSP: 0000:ffff8810c28079a8 EFLAGS: 00010206
[ 936.829403] RAX: 00000000000003ff RBX: 000000000000004e RCX: 0000000000002000
[ 936.829403] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
[ 936.829403] RBP: ffff8810c2807be8 R08: 0000000000000001 R09: 0000000000000001
[ 936.829403] R10: 0000000000000001 R11: 0000000000000008 R12: dffffc0000000000
[ 936.829403] R13: 0000000000000001 R14: ffff8810c2807b40 R15: ffff8810c2807ce8
[ 936.829403] FS: 00007f89c95ff700(0000) GS:ffff8810c2800000(0000) knlGS:0000000000000000
[ 936.829403] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 936.829403] CR2: 0000000003503ff8 CR3: 0000000f8237b000 CR4: 00000000000007a0
[ 936.829403] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 936.829403] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000050602
[ 936.829403] Stack:
[ 936.829403] 0000000000000082 ffffffff00000001 ffff8810c28079d8 ffff8810c2817a88
[ 936.829403] 0000000000000000 1ffff10218500f4b 00000000c2817a88 ffff8810c2807d14
[ 936.829403] ffff8810c2807b50 ffff8810c2807cfc ffff8810c0740010 0000000307418e2e
[ 936.829403] Call Trace:
[ 936.829403] <IRQ>
[ 936.829403] ? __enqueue_entity (kernel/sched/fair.c:501)
[ 936.829403] ? update_group_capacity (kernel/sched/fair.c:6593)
[ 936.829403] ? update_cfs_shares (kernel/sched/fair.c:2375)
[ 936.829403] ? cpumask_next_and (lib/cpumask.c:40)
[ 936.829403] load_balance (kernel/sched/fair.c:6857)
[ 936.829403] ? _raw_spin_unlock_irqrestore (./arch/x86/include/asm/preempt.h:95 include/linux/spinlock_api_smp.h:163 kernel/locking/spinlock.c:191)
[ 936.829403] ? update_blocked_averages (kernel/sched/fair.c:5743)
[ 936.829403] ? find_busiest_group (kernel/sched/fair.c:6820)
[ 936.829403] ? run_rebalance_domains (kernel/sched/fair.c:7450 kernel/sched/fair.c:7659)
[ 936.829403] ? trace_hardirqs_on_caller (kernel/locking/lockdep.c:2566)
[ 936.829403] run_rebalance_domains (kernel/sched/fair.c:7494 kernel/sched/fair.c:7659)
[ 936.829403] ? run_rebalance_domains (kernel/sched/fair.c:7450 kernel/sched/fair.c:7659)
[ 936.829403] ? pick_next_task_fair (kernel/sched/fair.c:7654)
[ 936.829403] ? irq_exit (kernel/softirq.c:350 kernel/softirq.c:391)
[ 936.829403] __do_softirq (kernel/softirq.c:273 include/linux/jump_label.h:114 include/trace/events/irq.h:126 kernel/softirq.c:274)
[ 936.829403] irq_exit (kernel/softirq.c:350 kernel/softirq.c:391)
[ 936.829403] smp_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:918)
[ 936.829403] apic_timer_interrupt (arch/x86/kernel/entry_64.S:958)
[ 936.829403] <EOI>
[ 936.829403] ? is_module_address (kernel/module.c:3835)
[ 936.829403] ? __kernel_text_address (kernel/extable.c:104)
[ 936.829403] print_context_stack (arch/x86/kernel/dumpstack.c:105)
[ 936.829403] dump_trace (arch/x86/kernel/dumpstack_64.c:244)
[ 936.829403] save_stack_trace (arch/x86/kernel/stacktrace.c:64)
[ 936.829403] __set_page_owner (mm/page_owner.c:72)
[ 936.829403] ? __reset_page_owner (mm/page_owner.c:61)
[ 936.829403] ? __inc_zone_state (mm/vmstat.c:271)
[ 936.829403] get_page_from_freelist (include/linux/page_owner.h:26 mm/page_alloc.c:2176)
[ 936.829403] __alloc_pages_nodemask (mm/page_alloc.c:2844)
[ 936.829403] ? alloc_pages_vma (mm/mempolicy.c:2007)
[ 936.829403] ? debug_check_no_locks_freed (kernel/locking/lockdep.c:3051)
[ 936.829403] ? debug_check_no_locks_freed (kernel/locking/lockdep.c:3051)
[ 936.829403] ? __alloc_pages_direct_compact (mm/page_alloc.c:2797)
[ 936.829403] ? debug_check_no_locks_freed (kernel/locking/lockdep.c:3051)
[ 936.829403] ? arch_local_irq_restore (init/do_mounts.h:19)
[ 936.829403] ? trace_hardirqs_on_caller (kernel/locking/lockdep.c:2566)
[ 936.829403] alloc_pages_vma (mm/mempolicy.c:2007)
[ 936.829403] ? handle_mm_fault (mm/memory.c:2156 mm/memory.c:3164 mm/memory.c:3269 mm/memory.c:3298)
[ 936.829403] handle_mm_fault (mm/memory.c:2156 mm/memory.c:3164 mm/memory.c:3269 mm/memory.c:3298)
[ 936.829403] ? debug_check_no_locks_freed (kernel/locking/lockdep.c:3051)
[ 936.829403] ? __pmd_alloc (mm/memory.c:3280)
[ 936.829403] ? perf_event_context_sched_in (kernel/events/core.c:2755)
[ 936.829403] ? trace_hardirqs_on_caller (kernel/locking/lockdep.c:2566)
[ 936.829403] ? __do_page_fault (arch/x86/mm/fault.c:1173)
[ 936.829403] ? ___might_sleep (kernel/sched/core.c:7297 (discriminator 1))
[ 936.829403] ? find_vma (mm/mmap.c:2035)
[ 936.829403] __do_page_fault (arch/x86/mm/fault.c:1235)
[ 936.829403] ? finish_task_switch (kernel/sched/core.c:2214)
[ 936.829403] ? finish_task_switch (kernel/sched/sched.h:1058 kernel/sched/core.c:2210)
[ 936.829403] trace_do_page_fault (arch/x86/mm/fault.c:1329)
[ 936.829403] do_async_page_fault (arch/x86/kernel/kvm.c:280)
[ 936.829403] async_page_fault (arch/x86/kernel/entry_64.S:1295)
[ 936.829403] Code: 89 f8 48 c1 e8 03 42 0f b6 04 20 84 c0 74 08 3c 03 0f 8e 3a 18 00 00 8b 7e 08 44 89 e8 48 c1 e0 0a 48 8d 44 07 ff 48 89 fe 48 99 <48> f7 ff 31 d2 48 89 c7 44 89 e8 f7 f7 45 89 c5 49 81 c5 00 02
All code
========
0: 89 f8 mov %edi,%eax
2: 48 c1 e8 03 shr $0x3,%rax
6: 42 0f b6 04 20 movzbl (%rax,%r12,1),%eax
b: 84 c0 test %al,%al
d: 74 08 je 0x17
f: 3c 03 cmp $0x3,%al
11: 0f 8e 3a 18 00 00 jle 0x1851
17: 8b 7e 08 mov 0x8(%rsi),%edi
1a: 44 89 e8 mov %r13d,%eax
1d: 48 c1 e0 0a shl $0xa,%rax
21: 48 8d 44 07 ff lea -0x1(%rdi,%rax,1),%rax
26: 48 89 fe mov %rdi,%rsi
29: 48 99 cqto
2b:* 48 f7 ff idiv %rdi <-- trapping instruction
2e: 31 d2 xor %edx,%edx
30: 48 89 c7 mov %rax,%rdi
33: 44 89 e8 mov %r13d,%eax
36: f7 f7 div %edi
38: 45 89 c5 mov %r8d,%r13d
3b: 49 rex.WB
3c: 81 .byte 0x81
3d: c5 00 02 (bad)
...
Code starting with the faulting instruction
===========================================
0: 48 f7 ff idiv %rdi
3: 31 d2 xor %edx,%edx
5: 48 89 c7 mov %rax,%rdi
8: 44 89 e8 mov %r13d,%eax
b: f7 f7 div %edi
d: 45 89 c5 mov %r8d,%r13d
10: 49 rex.WB
11: 81 .byte 0x81
12: c5 00 02 (bad)
...
[ 936.829403] RIP find_busiest_group (kernel/sched/fair.c:6152 kernel/sched/fair.c:6223 kernel/sched/fair.c:6341 kernel/sched/fair.c:6603)
[ 936.829403] RSP <ffff8810c28079a8>
Thanks,
Sasha
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists