diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt index 5c51ed406d1d..eedecbbaac57 100644 --- a/Documentation/cgroups/cpusets.txt +++ b/Documentation/cgroups/cpusets.txt @@ -345,14 +345,14 @@ the named feature on. The implementation is simple. Setting the flag 'cpuset.memory_spread_page' turns on a per-process flag -PF_SPREAD_PAGE for each task that is in that cpuset or subsequently +PFA_SPREAD_PAGE for each task that is in that cpuset or subsequently joins that cpuset. The page allocation calls for the page cache -is modified to perform an inline check for this PF_SPREAD_PAGE task +is modified to perform an inline check for this PFA_SPREAD_PAGE task flag, and if set, a call to a new routine cpuset_mem_spread_node() returns the node to prefer for the allocation. Similarly, setting 'cpuset.memory_spread_slab' turns on the flag -PF_SPREAD_SLAB, and appropriately marked slab caches will allocate +PFA_SPREAD_SLAB, and appropriately marked slab caches will allocate pages from the node returned by cpuset_mem_spread_node(). The cpuset_mem_spread_node() routine is also simple. It uses the diff --git a/Makefile b/Makefile index ec64e6f97222..f86a857ae320 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 2 -SUBLEVEL = 93 +SUBLEVEL = 94 EXTRAVERSION = NAME = Saber-toothed Squirrel diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index 4010f1fc5b65..cea944caca61 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h @@ -67,6 +67,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) extern void iounmap(volatile void __iomem *addr); #define ioremap_nocache(off,size) ioremap(off,size) +#define ioremap_wc ioremap_nocache /* * IO bus memory addresses are also 1:1 with the physical address @@ -162,13 +163,21 @@ static inline void _writel(unsigned long l, unsigned long addr) #define __raw_writew writew #define __raw_writel writel -#define ioread8 read +#define ioread8 readb #define ioread16 readw #define ioread32 readl #define iowrite8 writeb #define iowrite16 writew #define iowrite32 writel +#define ioread8_rep(p, dst, count) insb((unsigned long)(p), (dst), (count)) +#define ioread16_rep(p, dst, count) insw((unsigned long)(p), (dst), (count)) +#define ioread32_rep(p, dst, count) insl((unsigned long)(p), (dst), (count)) + +#define iowrite8_rep(p, src, count) outsb((unsigned long)(p), (src), (count)) +#define iowrite16_rep(p, src, count) outsw((unsigned long)(p), (src), (count)) +#define iowrite32_rep(p, src, count) outsl((unsigned long)(p), (src), (count)) + #define mmiowb() #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 32103cc2a257..88f19e387b5d 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -247,7 +247,8 @@ int __compute_return_epc(struct pt_regs *regs) return -EFAULT; sigill: - printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); - force_sig(SIGBUS, current); + pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); return -EFAULT; } diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 57ba13edb03a..3fc1691110dc 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle + * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. */ #include @@ -34,6 +34,12 @@ EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(kernel_thread); +/* + * Functions that operate on entire pages. Mostly used by memory management. + */ +EXPORT_SYMBOL(clear_page); +EXPORT_SYMBOL(copy_page); + /* * Userspace access stuff. */ diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index d02765708ddb..4e40a90f2446 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -203,7 +203,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" "2: sc %[tmp], (%[addr]) \n" - " bnez %[tmp], 4f \n" + " beqz %[tmp], 4f \n" "3: \n" " .subsection 2 \n" "4: b 1b \n" diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 4d8c1623eee2..0d3871cf6166 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -3,8 +3,8 @@ # obj-y += cache.o dma-default.o extable.o fault.o \ - init.o mmap.o tlbex.o tlbex-fault.o uasm.o \ - page.o + init.o mmap.o page.o page-funcs.o \ + tlbex.o tlbex-fault.o uasm.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S new file mode 100644 index 000000000000..48a6b38ff13e --- /dev/null +++ b/arch/mips/mm/page-funcs.S @@ -0,0 +1,50 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Micro-assembler generated clear_page/copy_page functions. + * + * Copyright (C) 2012 MIPS Technologies, Inc. + * Copyright (C) 2012 Ralf Baechle + */ +#include +#include + +#ifdef CONFIG_SIBYTE_DMA_PAGEOPS +#define cpu_clear_page_function_name clear_page_cpu +#define cpu_copy_page_function_name copy_page_cpu +#else +#define cpu_clear_page_function_name clear_page +#define cpu_copy_page_function_name copy_page +#endif + +/* + * Maximum sizes: + * + * R4000 128 bytes S-cache: 0x058 bytes + * R4600 v1.7: 0x05c bytes + * R4600 v2.0: 0x060 bytes + * With prefetching, 16 word strides 0x120 bytes + */ +EXPORT(__clear_page_start) +LEAF(cpu_clear_page_function_name) +1: j 1b /* Dummy, will be replaced. */ + .space 288 +END(cpu_clear_page_function_name) +EXPORT(__clear_page_end) + +/* + * Maximum sizes: + * + * R4000 128 bytes S-cache: 0x11c bytes + * R4600 v1.7: 0x080 bytes + * R4600 v2.0: 0x07c bytes + * With prefetching, 16 word strides 0x540 bytes + */ +EXPORT(__copy_page_start) +LEAF(cpu_copy_page_function_name) +1: j 1b /* Dummy, will be replaced. */ + .space 1344 +END(cpu_copy_page_function_name) +EXPORT(__copy_page_end) diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 36272f7d3744..a22bd403dca3 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -6,6 +6,7 @@ * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2007 Maciej W. Rozycki * Copyright (C) 2008 Thiemo Seufer + * Copyright (C) 2012 MIPS Technologies, Inc. */ #include #include @@ -72,45 +73,6 @@ static struct uasm_reloc __cpuinitdata relocs[5]; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) -/* - * Maximum sizes: - * - * R4000 128 bytes S-cache: 0x058 bytes - * R4600 v1.7: 0x05c bytes - * R4600 v2.0: 0x060 bytes - * With prefetching, 16 word strides 0x120 bytes - */ - -static u32 clear_page_array[0x120 / 4]; - -#ifdef CONFIG_SIBYTE_DMA_PAGEOPS -void clear_page_cpu(void *page) __attribute__((alias("clear_page_array"))); -#else -void clear_page(void *page) __attribute__((alias("clear_page_array"))); -#endif - -EXPORT_SYMBOL(clear_page); - -/* - * Maximum sizes: - * - * R4000 128 bytes S-cache: 0x11c bytes - * R4600 v1.7: 0x080 bytes - * R4600 v2.0: 0x07c bytes - * With prefetching, 16 word strides 0x540 bytes - */ -static u32 copy_page_array[0x540 / 4]; - -#ifdef CONFIG_SIBYTE_DMA_PAGEOPS -void -copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array"))); -#else -void copy_page(void *to, void *from) __attribute__((alias("copy_page_array"))); -#endif - -EXPORT_SYMBOL(copy_page); - - static int pref_bias_clear_store __cpuinitdata; static int pref_bias_copy_load __cpuinitdata; static int pref_bias_copy_store __cpuinitdata; @@ -283,10 +245,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) } } +extern u32 __clear_page_start; +extern u32 __clear_page_end; +extern u32 __copy_page_start; +extern u32 __copy_page_end; + void __cpuinit build_clear_page(void) { int off; - u32 *buf = (u32 *)&clear_page_array; + u32 *buf = &__clear_page_start; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; @@ -357,17 +324,17 @@ void __cpuinit build_clear_page(void) uasm_i_jr(&buf, RA); uasm_i_nop(&buf); - BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array)); + BUG_ON(buf > &__clear_page_end); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized clear page handler (%u instructions).\n", - (u32)(buf - clear_page_array)); + (u32)(buf - &__clear_page_start)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); - for (i = 0; i < (buf - clear_page_array); i++) - pr_debug("\t.word 0x%08x\n", clear_page_array[i]); + for (i = 0; i < (buf - &__clear_page_start); i++) + pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]); pr_debug("\t.set pop\n"); } @@ -428,7 +395,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) void __cpuinit build_copy_page(void) { int off; - u32 *buf = (u32 *)©_page_array; + u32 *buf = &__copy_page_start; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; @@ -596,21 +563,23 @@ void __cpuinit build_copy_page(void) uasm_i_jr(&buf, RA); uasm_i_nop(&buf); - BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array)); + BUG_ON(buf > &__copy_page_end); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized copy page handler (%u instructions).\n", - (u32)(buf - copy_page_array)); + (u32)(buf - &__copy_page_start)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); - for (i = 0; i < (buf - copy_page_array); i++) - pr_debug("\t.word 0x%08x\n", copy_page_array[i]); + for (i = 0; i < (buf - &__copy_page_start); i++) + pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]); pr_debug("\t.set pop\n"); } #ifdef CONFIG_SIBYTE_DMA_PAGEOPS +extern void clear_page_cpu(void *page); +extern void copy_page_cpu(void *to, void *from); /* * Pad descriptors to cacheline, since each is exclusively owned by a diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 4014d9064be1..4fdbb581f3b5 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -364,7 +364,7 @@ ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ ENTRY_SAME(add_key) ENTRY_SAME(request_key) /* 265 */ - ENTRY_SAME(keyctl) + ENTRY_COMP(keyctl) ENTRY_SAME(ioprio_set) ENTRY_SAME(ioprio_get) ENTRY_SAME(inotify_init) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 578e5a0d116d..b3b9d089dda6 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1054,7 +1054,9 @@ " .llong 0\n" \ " .llong 0\n" \ ".previous" \ - : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) + : "=r" (rval) \ + : "i" (CPU_FTR_CELL_TB_BUG) : "cr0"); \ + rval;}) #else #define mftb() ({unsigned long rval; \ asm volatile("mftb %0" : "=r" (rval)); rval;}) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 9a52349874ee..6ca7fe0bd420 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -863,6 +863,19 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto instr_done; #endif case 19: /* mfcr */ + if ((instr >> 20) & 1) { + imm = 0xf0000000UL; + for (sh = 0; sh < 8; ++sh) { + if (instr & (0x80000 >> sh)) { + regs->gpr[rd] = regs->ccr & imm; + break; + } + imm >>= 4; + } + + goto instr_done; + } + regs->gpr[rd] = regs->ccr; regs->gpr[rd] &= 0xffffffffUL; goto instr_done; diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index b239ff53b189..2be14b2c7155 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -62,6 +62,12 @@ static inline void syscall_get_arguments(struct task_struct *task, { unsigned long mask = -1UL; + /* + * No arguments for this syscall, there's nothing to do. + */ + if (!n) + return; + BUG_ON(i + n > 6); #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_31BIT)) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 7bdc8f4075cd..336c8aedb6a1 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -32,9 +32,9 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __addr_ok(addr) \ - ((unsigned long __force)(addr) < \ - (current_thread_info()->addr_limit.seg)) +#define user_addr_max() (current_thread_info()->addr_limit.seg) +#define __addr_ok(addr) \ + ((unsigned long __force)(addr) < user_addr_max()) /* * Test whether a block of memory is a valid user space address. @@ -46,14 +46,14 @@ * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ -#define __range_not_ok(addr, size) \ +#define __range_not_ok(addr, size, limit) \ ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ : "=&r" (flag), "=r" (roksum) \ : "1" (addr), "g" ((long)(size)), \ - "rm" (current_thread_info()->addr_limit.seg)); \ + "rm" (limit)); \ flag; \ }) @@ -76,7 +76,8 @@ * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) +#define access_ok(type, addr, size) \ + (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) /* * The exception table consists of pairs of addresses: the first is the diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1c041e07c3c9..57cb4fae620d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1461,6 +1461,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); } +static inline int +valid_user_frame(const void __user *fp, unsigned long size) +{ + return (__range_not_ok(fp, size, TASK_SIZE) == 0); +} + #ifdef CONFIG_COMPAT static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) @@ -1485,6 +1491,9 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (fp < compat_ptr(regs->sp)) break; + if (!valid_user_frame(fp, sizeof(frame))) + break; + perf_callchain_store(entry, frame.return_address); fp = compat_ptr(frame.next_frame); } @@ -1531,6 +1540,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) if ((unsigned long)fp < regs->sp) break; + if (!valid_user_frame(fp, sizeof(frame))) + break; + perf_callchain_store(entry, frame.return_address); fp = frame.next_frame; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0fb33a013863..88280620f77f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6671,6 +6671,14 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; + + if (!(exec_control & CPU_BASED_TPR_SHADOW)) { +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING; +#endif + } + /* * Merging of IO and MSR bitmaps not currently supported. * Rather, exit every time. diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 38eaf74837bc..2c7daef5c65f 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -540,15 +540,53 @@ static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone); /* - * Broadwell EP Home Agent BARs erroneously return non-zero values when read. + * Device [8086:2fc0] + * Erratum HSE43 + * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset + * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html * - * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html - * entry BDF2. + * Devices [8086:6f60,6fa0,6fc0] + * Erratum BDF2 + * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration + * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html */ -static void pci_bdwep_bar(struct pci_dev *dev) +static void pci_invalid_bar(struct pci_dev *dev) { dev->non_compliant_bars = 1; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); + +/* + * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] + * + * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to + * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used + * for soft poweroff and suspend-to-RAM. + * + * As far as we know, this is related to the address space, not to the Root + * Port itself. Attaching the quirk to the Root Port is a convenience, but + * it could probably also be a standalone DMI quirk. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=103211 + */ +static void quirk_apple_mbp_poweroff(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + + if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") && + !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) || + pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0)) + return; + + res = request_mem_region(0x7fa00000, 0x200000, + "MacBook Pro poweroff workaround"); + if (res) + dev_info(dev, "claimed %s %pR\n", res->name, res); + else + dev_info(dev, "can't work around MacBook Pro poweroff issue\n"); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 6790cf7eba5a..6731e919c416 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1240,7 +1240,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { - struct gpd_link *link; + struct gpd_link *l, *link; int ret = -EINVAL; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) @@ -1249,7 +1249,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, start: genpd_acquire_lock(genpd); - list_for_each_entry(link, &genpd->master_links, master_node) { + list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { if (link->slave != subdomain) continue; diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index f7395c438a00..2c71cc4f3039 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -879,9 +879,10 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, ssize_t err; int i, rc; char *str = buf; - struct tpm_chip *chip = dev_get_drvdata(dev); + memset(&tpm_cmd, 0, sizeof(tpm_cmd)); + tpm_cmd.header.in = tpm_readpubek_header; err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, "attempting to read the PUBEK"); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ca67338e5533..8ea257ef1139 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2059,10 +2059,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed, but we use them anyways to avoid - * stuck interrupts on some machines. + * be lost or delayed, and was defeatured. MSI interrupts seem to + * get lost on g4x as well, and interrupt delivery seems to stay + * properly dead afterwards. So we'll just disable them for all + * pre-gen5 chipsets. */ - if (!IS_I945G(dev) && !IS_I945GM(dev)) + if (INTEL_INFO(dev)->gen >= 5) pci_enable_msi(dev->pdev); spin_lock_init(&dev_priv->gt_lock); diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 9353992f9eea..29478d2a0fcb 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -58,17 +58,6 @@ struct ib_update_work { u8 port_num; }; -static inline int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - -static inline int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - int ib_get_cached_gid(struct ib_device *device, u8 port_num, int index, @@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.gid_cache[port_num - start_port(device)]; + cache = device->cache.gid_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device, read_lock_irqsave(&device->cache.lock, flags); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { cache = device->cache.gid_cache[p]; for (i = 0; i < cache->table_len; ++i) { if (!memcmp(gid, &cache->table[i], sizeof *gid)) { - *port_num = p + start_port(device); + *port_num = p + rdma_start_port(device); if (index) *index = i; ret = 0; @@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -168,12 +157,12 @@ int ib_find_cached_pkey(struct ib_device *device, int i; int ret = -ENOENT; - if (port_num < start_port(device) || port_num > end_port(device)) + if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; *index = -1; @@ -197,11 +186,11 @@ int ib_get_cached_lmc(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (!rdma_is_port_valid(device, port_num)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - *lmc = device->cache.lmc_cache[port_num - start_port(device)]; + *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; read_unlock_irqrestore(&device->cache.lock, flags); return ret; @@ -262,13 +251,13 @@ static void ib_cache_update(struct ib_device *device, write_lock_irq(&device->cache.lock); - old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; - old_gid_cache = device->cache.gid_cache [port - start_port(device)]; + old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; + old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)]; - device->cache.pkey_cache[port - start_port(device)] = pkey_cache; - device->cache.gid_cache [port - start_port(device)] = gid_cache; + device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; + device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache; - device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; + device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); @@ -322,14 +311,14 @@ static void ib_cache_setup_one(struct ib_device *device) device->cache.pkey_cache = kmalloc(sizeof *device->cache.pkey_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.gid_cache = kmalloc(sizeof *device->cache.gid_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * - (end_port(device) - - start_port(device) + 1), + (rdma_end_port(device) - + rdma_start_port(device) + 1), GFP_KERNEL); if (!device->cache.pkey_cache || !device->cache.gid_cache || @@ -339,10 +328,10 @@ static void ib_cache_setup_one(struct ib_device *device) goto err; } - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { device->cache.pkey_cache[p] = NULL; device->cache.gid_cache [p] = NULL; - ib_cache_update(device, p + start_port(device)); + ib_cache_update(device, p + rdma_start_port(device)); } INIT_IB_EVENT_HANDLER(&device->cache.event_handler, @@ -353,7 +342,7 @@ static void ib_cache_setup_one(struct ib_device *device) return; err_cache: - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } @@ -371,7 +360,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) ib_unregister_event_handler(&device->cache.event_handler); flush_workqueue(ib_wq); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e711de400a01..df844af1afb3 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -151,18 +151,6 @@ static int alloc_name(char *name) return 0; } -static int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - - -static int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - /** * ib_alloc_device - allocate an IB device struct * @size:size of structure to allocate @@ -232,7 +220,7 @@ static int read_port_table_lengths(struct ib_device *device) if (!tprops) goto out; - num_ports = end_port(device) - start_port(device) + 1; + num_ports = rdma_end_port(device) - rdma_start_port(device) + 1; device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports, GFP_KERNEL); @@ -242,7 +230,7 @@ static int read_port_table_lengths(struct ib_device *device) goto err; for (port_index = 0; port_index < num_ports; ++port_index) { - ret = ib_query_port(device, port_index + start_port(device), + ret = ib_query_port(device, port_index + rdma_start_port(device), tprops); if (ret) goto err; @@ -575,7 +563,7 @@ int ib_query_port(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr) { - if (port_num < start_port(device) || port_num > end_port(device)) + if (!rdma_is_port_valid(device, port_num)) return -EINVAL; return device->query_port(device, port_num, port_attr); @@ -653,7 +641,7 @@ int ib_modify_port(struct ib_device *device, if (!device->modify_port) return -ENOSYS; - if (port_num < start_port(device) || port_num > end_port(device)) + if (!rdma_is_port_valid(device, port_num)) return -EINVAL; return device->modify_port(device, port_num, port_modify_mask, @@ -676,8 +664,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, union ib_gid tmp_gid; int ret, port, i; - for (port = start_port(device); port <= end_port(device); ++port) { - for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) { + for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { + for (i = 0; i < device->gid_tbl_len[port - rdma_start_port(device)]; ++i) { ret = ib_query_gid(device, port, i, &tmp_gid); if (ret) return ret; @@ -708,7 +696,7 @@ int ib_find_pkey(struct ib_device *device, int ret, i; u16 tmp_pkey; - for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) { + for (i = 0; i < device->pkey_tbl_len[port_num - rdma_start_port(device)]; ++i) { ret = ib_query_pkey(device, port_num, i, &tmp_pkey); if (ret) return ret; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 7f2254ec7636..a5a7f4707d19 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1790,6 +1790,11 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, goto out; } + if (!rdma_is_port_valid(qp->device, cmd.port_num)) { + ret = -EINVAL; + goto release_qp; + } + attr->qp_state = cmd.qp_state; attr->cur_qp_state = cmd.cur_qp_state; attr->path_mtu = cmd.path_mtu; @@ -1843,6 +1848,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); } +release_qp: put_qp_read(qp); if (ret) @@ -2262,6 +2268,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) { + struct ib_device *ib_dev = file->device->ib_dev; struct ib_uverbs_create_ah cmd; struct ib_uverbs_create_ah_resp resp; struct ib_uobject *uobj; @@ -2276,6 +2283,9 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) + return -EINVAL; + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); if (!uobj) return -ENOMEM; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 1284b9221179..1233bb477472 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -390,8 +390,10 @@ static int i8042_start(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = true; - mb(); + spin_unlock_irq(&i8042_lock); + return 0; } @@ -404,16 +406,20 @@ static void i8042_stop(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = false; + port->serio = NULL; + spin_unlock_irq(&i8042_lock); /* + * We need to make sure that interrupt handler finishes using + * our serio port before we return from this function. * We synchronize with both AUX and KBD IRQs because there is * a (very unlikely) chance that AUX IRQ is raised for KBD port * and vice versa. */ synchronize_irq(I8042_AUX_IRQ); synchronize_irq(I8042_KBD_IRQ); - port->serio = NULL; } /* @@ -530,7 +536,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&i8042_lock, flags); - if (likely(port->exists && !filtered)) + if (likely(serio && !filtered)) serio_interrupt(serio, data, dfl); out: diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7b75a1942159..308e8c8905c9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -875,13 +875,16 @@ static void make_request(struct mddev *mddev, struct bio * bio) */ DEFINE_WAIT(w); for (;;) { - flush_signals(current); + sigset_t full, old; prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo || bio->bi_sector >= mddev->suspend_hi) break; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); } finish_wait(&conf->wait_barrier, &w); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6056ee7984ac..2682eb95c41e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3823,12 +3823,16 @@ static void make_request(struct mddev *mddev, struct bio * bi) * userspace, we want an interruptible * wait. */ - flush_signals(current); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_INTERRUPTIBLE); if (logical_sector >= mddev->suspend_lo && - logical_sector < mddev->suspend_hi) + logical_sector < mddev->suspend_hi) { + sigset_t full, old; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); + } goto retry; } diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 2fb6473856a0..6e7468e39430 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -739,6 +739,7 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data, } else if (urb_type == MCEUSB_RX) { /* standard request */ async_urb = ir->urb_in; + async_buf = NULL; ir->send_flags = RECV_FLAG_IN_PROGRESS; } else { @@ -754,6 +755,10 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data, res = usb_submit_urb(async_urb, GFP_ATOMIC); if (res) { mce_dbg(dev, "receive request FAILED! (res=%d)\n", res); + if (urb_type == MCEUSB_TX) { + kfree(async_buf); + usb_free_urb(async_urb); + } return; } mce_dbg(dev, "receive request complete (res=%d)\n", res); diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c index 66696fa8341d..9bd1f73f82da 100644 --- a/drivers/media/video/saa7164/saa7164-buffer.c +++ b/drivers/media/video/saa7164/saa7164-buffer.c @@ -130,9 +130,9 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port, goto fail2; /* init the buffers to a known pattern, easier during debugging */ - memset_io(buf->cpu, 0xff, buf->pci_size); + memset(buf->cpu, 0xff, buf->pci_size); buf->crc = crc32(0, buf->cpu, buf->actual_size); - memset_io(buf->pt_cpu, 0xff, buf->pt_size); + memset(buf->pt_cpu, 0xff, buf->pt_size); dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p (%d pageptrs)\n", __func__, buf, params->numpagetables); diff --git a/drivers/media/video/saa7164/saa7164-bus.c b/drivers/media/video/saa7164/saa7164-bus.c index 466e1b02f91f..6ef50642805a 100644 --- a/drivers/media/video/saa7164/saa7164-bus.c +++ b/drivers/media/video/saa7164/saa7164-bus.c @@ -33,12 +33,12 @@ int saa7164_bus_setup(struct saa7164_dev *dev) b->Type = TYPE_BUS_PCIe; b->m_wMaxReqSize = SAA_DEVICE_MAXREQUESTSIZE; - b->m_pdwSetRing = (u8 *)(dev->bmmio + + b->m_pdwSetRing = (u8 __iomem *)(dev->bmmio + ((u32)dev->busdesc.CommandRing)); b->m_dwSizeSetRing = SAA_DEVICE_BUFFERBLOCKSIZE; - b->m_pdwGetRing = (u8 *)(dev->bmmio + + b->m_pdwGetRing = (u8 __iomem *)(dev->bmmio + ((u32)dev->busdesc.ResponseRing)); b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE; @@ -138,6 +138,7 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, u32 bytes_to_write, free_write_space, timeout, curr_srp, curr_swp; u32 new_swp, space_rem; int ret = SAA_ERR_BAD_PARAMETER; + u16 size; if (!msg) { printk(KERN_ERR "%s() !msg\n", __func__); @@ -148,10 +149,6 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, saa7164_bus_verify(dev); - msg->size = cpu_to_le16(msg->size); - msg->command = cpu_to_le16(msg->command); - msg->controlselector = cpu_to_le16(msg->controlselector); - if (msg->size > dev->bus.m_wMaxReqSize) { printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n", __func__); @@ -169,8 +166,8 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, bytes_to_write = sizeof(*msg) + msg->size; free_write_space = 0; timeout = SAA_BUS_TIMEOUT; - curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos)); - curr_swp = le32_to_cpu(saa7164_readl(bus->m_dwSetWritePos)); + curr_srp = saa7164_readl(bus->m_dwSetReadPos); + curr_swp = saa7164_readl(bus->m_dwSetWritePos); /* Deal with ring wrapping issues */ if (curr_srp > curr_swp) @@ -203,7 +200,7 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, mdelay(1); /* Check the space usage again */ - curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos)); + curr_srp = saa7164_readl(bus->m_dwSetReadPos); /* Deal with ring wrapping issues */ if (curr_srp > curr_swp) @@ -223,6 +220,16 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, dprintk(DBGLVL_BUS, "%s() bus->m_dwSizeSetRing = %x\n", __func__, bus->m_dwSizeSetRing); + /* + * Make a copy of msg->size before it is converted to le16 since it is + * used in the code below. + */ + size = msg->size; + /* Convert to le16/le32 */ + msg->size = (__force u16)cpu_to_le16(msg->size); + msg->command = (__force u32)cpu_to_le32(msg->command); + msg->controlselector = (__force u16)cpu_to_le16(msg->controlselector); + /* Mental Note: line 462 tmmhComResBusPCIe.cpp */ /* Check if we're going to wrap again */ @@ -243,28 +250,28 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, dprintk(DBGLVL_BUS, "%s() tr4\n", __func__); /* Split the msg into pieces as the ring wraps */ - memcpy(bus->m_pdwSetRing + curr_swp, msg, space_rem); - memcpy(bus->m_pdwSetRing, (u8 *)msg + space_rem, + memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, space_rem); + memcpy_toio(bus->m_pdwSetRing, (u8 *)msg + space_rem, sizeof(*msg) - space_rem); - memcpy(bus->m_pdwSetRing + sizeof(*msg) - space_rem, - buf, msg->size); + memcpy_toio(bus->m_pdwSetRing + sizeof(*msg) - space_rem, + buf, size); } else if (space_rem == sizeof(*msg)) { dprintk(DBGLVL_BUS, "%s() tr5\n", __func__); /* Additional data at the beginning of the ring */ - memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); - memcpy(bus->m_pdwSetRing, buf, msg->size); + memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); + memcpy_toio(bus->m_pdwSetRing, buf, size); } else { /* Additional data wraps around the ring */ - memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); - if (msg->size > 0) { - memcpy(bus->m_pdwSetRing + curr_swp + + memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); + if (size > 0) { + memcpy_toio(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf, space_rem - sizeof(*msg)); - memcpy(bus->m_pdwSetRing, (u8 *)buf + + memcpy_toio(bus->m_pdwSetRing, (u8 *)buf + space_rem - sizeof(*msg), bytes_to_write - space_rem); } @@ -276,15 +283,20 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, dprintk(DBGLVL_BUS, "%s() tr6\n", __func__); /* The ring buffer doesn't wrap, two simple copies */ - memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); - memcpy(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf, - msg->size); + memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); + memcpy_toio(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf, + size); } dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp); /* Update the bus write position */ - saa7164_writel(bus->m_dwSetWritePos, cpu_to_le32(new_swp)); + saa7164_writel(bus->m_dwSetWritePos, new_swp); + + /* Convert back to cpu after writing the msg to the ringbuffer. */ + msg->size = le16_to_cpu((__force __le16)msg->size); + msg->command = le32_to_cpu((__force __le32)msg->command); + msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector); ret = SAA_OK; out: @@ -336,8 +348,8 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, /* Peek the bus to see if a msg exists, if it's not what we're expecting * then return cleanly else read the message from the bus. */ - curr_gwp = le32_to_cpu(saa7164_readl(bus->m_dwGetWritePos)); - curr_grp = le32_to_cpu(saa7164_readl(bus->m_dwGetReadPos)); + curr_gwp = saa7164_readl(bus->m_dwGetWritePos); + curr_grp = saa7164_readl(bus->m_dwGetReadPos); if (curr_gwp == curr_grp) { ret = SAA_ERR_EMPTY; @@ -369,19 +381,23 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, new_grp -= bus->m_dwSizeGetRing; space_rem = bus->m_dwSizeGetRing - curr_grp; - memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem); - memcpy((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing, + memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem); + memcpy_fromio((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing, bytes_to_read - space_rem); } else { /* No wrapping */ - memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read); + memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read); } + /* Convert from little endian to CPU */ + msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size); + msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command); + msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector); + memcpy(msg, &msg_tmp, sizeof(*msg)); /* No need to update the read positions, because this was a peek */ /* If the caller specifically want to peek, return */ if (peekonly) { - memcpy(msg, &msg_tmp, sizeof(*msg)); goto peekout; } @@ -426,25 +442,19 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, space_rem = bus->m_dwSizeGetRing - curr_grp; if (space_rem < sizeof(*msg)) { - /* msg wraps around the ring */ - memcpy(msg, bus->m_pdwGetRing + curr_grp, space_rem); - memcpy((u8 *)msg + space_rem, bus->m_pdwGetRing, - sizeof(*msg) - space_rem); if (buf) - memcpy(buf, bus->m_pdwGetRing + sizeof(*msg) - + memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) - space_rem, buf_size); } else if (space_rem == sizeof(*msg)) { - memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) - memcpy(buf, bus->m_pdwGetRing, buf_size); + memcpy_fromio(buf, bus->m_pdwGetRing, buf_size); } else { /* Additional data wraps around the ring */ - memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) { - memcpy(buf, bus->m_pdwGetRing + curr_grp + + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), space_rem - sizeof(*msg)); - memcpy(buf + space_rem - sizeof(*msg), + memcpy_fromio(buf + space_rem - sizeof(*msg), bus->m_pdwGetRing, bytes_to_read - space_rem); } @@ -453,19 +463,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, } else { /* No wrapping */ - memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) - memcpy(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), buf_size); } /* Update the read positions, adjusting the ring */ - saa7164_writel(bus->m_dwGetReadPos, cpu_to_le32(new_grp)); + saa7164_writel(bus->m_dwGetReadPos, new_grp); peekout: - msg->size = le16_to_cpu(msg->size); - msg->command = le16_to_cpu(msg->command); - msg->controlselector = le16_to_cpu(msg->controlselector); ret = SAA_OK; out: mutex_unlock(&bus->lock); diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c index 8f3c47e1325f..14e158eed85a 100644 --- a/drivers/media/video/saa7164/saa7164-core.c +++ b/drivers/media/video/saa7164/saa7164-core.c @@ -140,7 +140,7 @@ static void saa7164_ts_verifier(struct saa7164_buffer *buf) u32 i; u8 cc, a; u16 pid; - u8 __iomem *bufcpu = (u8 *)buf->cpu; + u8 *bufcpu = (u8 *)buf->cpu; port->sync_errors = 0; port->v_cc_errors = 0; @@ -281,7 +281,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr) struct saa7164_user_buffer *ubuf = NULL; struct list_head *c, *n; int i = 0; - u8 __iomem *p; + u8 *p; mutex_lock(&port->dmaqueue_lock); list_for_each_safe(c, n, &port->dmaqueue.list) { @@ -338,8 +338,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr) if (buf->actual_size <= ubuf->actual_size) { - memcpy_fromio(ubuf->data, buf->cpu, - ubuf->actual_size); + memcpy(ubuf->data, buf->cpu, ubuf->actual_size); if (crc_checking) { /* Throw a new checksum on the read buffer */ @@ -366,7 +365,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr) * with known bad data. We check for this data at a later point * in time. */ saa7164_buffer_zero_offsets(port, bufnr); - memset_io(buf->cpu, 0xff, buf->pci_size); + memset(buf->cpu, 0xff, buf->pci_size); if (crc_checking) { /* Throw yet aanother new checksum on the dma buffer */ buf->crc = crc32(0, buf->cpu, buf->actual_size); @@ -1134,7 +1133,7 @@ static int saa7164_proc_show(struct seq_file *m, void *v) if (c == 0) seq_printf(m, " %04x:", i); - seq_printf(m, " %02x", *(b->m_pdwSetRing + i)); + seq_printf(m, " %02x", readb(b->m_pdwSetRing + i)); if (++c == 16) { seq_printf(m, "\n"); @@ -1149,7 +1148,7 @@ static int saa7164_proc_show(struct seq_file *m, void *v) if (c == 0) seq_printf(m, " %04x:", i); - seq_printf(m, " %02x", *(b->m_pdwGetRing + i)); + seq_printf(m, " %02x", readb(b->m_pdwGetRing + i)); if (++c == 16) { seq_printf(m, "\n"); diff --git a/drivers/media/video/saa7164/saa7164-fw.c b/drivers/media/video/saa7164/saa7164-fw.c index a266bf0169e6..124223a96702 100644 --- a/drivers/media/video/saa7164/saa7164-fw.c +++ b/drivers/media/video/saa7164/saa7164-fw.c @@ -72,7 +72,7 @@ int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg) /* TODO: move dlflags into dev-> and change to write/readl/b */ /* TODO: Excessive levels of debug */ int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize, - u32 dlflags, u8 *dst, u32 dstsize) + u32 dlflags, u8 __iomem *dst, u32 dstsize) { u32 reg, timeout, offset; u8 *srcbuf = NULL; @@ -136,7 +136,7 @@ int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize, srcsize -= dstsize, offset += dstsize) { dprintk(DBGLVL_FW, "%s() memcpy %d\n", __func__, dstsize); - memcpy(dst, srcbuf + offset, dstsize); + memcpy_toio(dst, srcbuf + offset, dstsize); /* Flag the data as ready */ saa7164_writel(drflag, 1); @@ -154,7 +154,7 @@ int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize, dprintk(DBGLVL_FW, "%s() memcpy(l) %d\n", __func__, dstsize); /* Write last block to the device */ - memcpy(dst, srcbuf+offset, srcsize); + memcpy_toio(dst, srcbuf+offset, srcsize); /* Flag the data as ready */ saa7164_writel(drflag, 1); diff --git a/drivers/media/video/saa7164/saa7164-types.h b/drivers/media/video/saa7164/saa7164-types.h index 1d2140a3eb38..f48ba978f835 100644 --- a/drivers/media/video/saa7164/saa7164-types.h +++ b/drivers/media/video/saa7164/saa7164-types.h @@ -78,9 +78,9 @@ enum tmBusType { struct tmComResBusInfo { enum tmBusType Type; u16 m_wMaxReqSize; - u8 *m_pdwSetRing; + u8 __iomem *m_pdwSetRing; u32 m_dwSizeSetRing; - u8 *m_pdwGetRing; + u8 __iomem *m_pdwGetRing; u32 m_dwSizeGetRing; u32 m_dwSetWritePos; u32 m_dwSetReadPos; diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h index 742b34103b5d..360e6e623059 100644 --- a/drivers/media/video/saa7164/saa7164.h +++ b/drivers/media/video/saa7164/saa7164.h @@ -315,13 +315,13 @@ struct saa7164_buffer { /* A block of page align PCI memory */ u32 pci_size; /* PCI allocation size in bytes */ - u64 __iomem *cpu; /* Virtual address */ + u64 *cpu; /* Virtual address */ dma_addr_t dma; /* Physical address */ u32 crc; /* Checksum for the entire buffer data */ /* A page table that splits the block into a number of entries */ u32 pt_size; /* PCI allocation size in bytes */ - u64 __iomem *pt_cpu; /* Virtual address */ + u64 *pt_cpu; /* Virtual address */ dma_addr_t pt_dma; /* Physical address */ /* Encoder fops */ diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index cbee842f8b6b..2419b0f3a6d1 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -320,6 +320,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component, struct device *dev) { struct enclosure_component *cdev; + int err; if (!edev || component >= edev->components) return -EINVAL; @@ -329,12 +330,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component, if (cdev->dev == dev) return -EEXIST; - if (cdev->dev) + if (cdev->dev) { enclosure_remove_links(cdev); - - put_device(cdev->dev); + put_device(cdev->dev); + } cdev->dev = get_device(dev); - return enclosure_add_links(cdev); + err = enclosure_add_links(cdev); + if (err) { + put_device(cdev->dev); + cdev->dev = NULL; + } + return err; } EXPORT_SYMBOL_GPL(enclosure_add_device); diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 4d7e334603c3..68e9583a9d9b 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1176,6 +1176,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy, struct mwifiex_adapter *adapter; struct net_device *dev; void *mdev_priv; + int ret; if (!priv) return ERR_PTR(-EFAULT); @@ -1216,8 +1217,8 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy, ether_setup, 1); if (!dev) { wiphy_err(wiphy, "no memory available for netdevice\n"); - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_alloc_netdev; } dev_net_set(dev, wiphy_net(wiphy)); @@ -1239,23 +1240,29 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy, SET_NETDEV_DEV(dev, adapter->dev); + sema_init(&priv->async_sem, 1); + priv->scan_pending_on_block = false; + /* Register network device */ if (register_netdevice(dev)) { wiphy_err(wiphy, "cannot register virtual network device\n"); - free_netdev(dev); - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-EFAULT); + ret = -EFAULT; + goto err_reg_netdev; } - sema_init(&priv->async_sem, 1); - priv->scan_pending_on_block = false; - dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name); #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_init(priv); #endif return dev; + +err_reg_netdev: + free_netdev(dev); + priv->netdev = NULL; +err_alloc_netdev: + priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h index 1ec90fc7505e..6531622d2528 100644 --- a/drivers/net/wireless/wl12xx/wl12xx.h +++ b/drivers/net/wireless/wl12xx/wl12xx.h @@ -166,7 +166,7 @@ extern u32 wl12xx_debug_level; #define ACX_TX_DESCRIPTORS 16 -#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) +#define WL1271_AGGR_BUFFER_SIZE (4 * 0x00001000) enum wl1271_state { WL1271_STATE_OFF, diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 68af94c151df..fa635079f6d3 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -873,6 +873,7 @@ static int pci_pm_thaw_noirq(struct device *dev) return pci_legacy_resume_early(dev); pci_update_current_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); if (drv && drv->pm && drv->pm->thaw_noirq) error = drv->pm->thaw_noirq(dev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ea6123f500d9..9290d239d570 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -358,7 +358,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) { int i; - for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { struct resource *r = &dev->resource[i]; if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index 781068d62f23..5e6c08e0d087 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c @@ -93,7 +93,7 @@ static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc) __raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER); while (!(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB) - && timeout--) + && --timeout) mdelay(1); if (!timeout) diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index d1e697190970..d52c3c47ceef 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1894,7 +1894,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, bnx2i_ep_active_list_add(hba, bnx2i_ep); - if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) + rc = bnx2i_map_ep_dbell_regs(bnx2i_ep); + if (rc) goto del_active_ep; mutex_unlock(&hba->net_dev_lock); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index c8744588e6b7..771b916ef19e 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2079,7 +2079,7 @@ iscsi_if_rx(struct sk_buff *skb) uint32_t group; nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || skb->len < nlh->nlmsg_len) { break; } diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 676fe9ac7f61..06b30f0449ab 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -568,6 +568,7 @@ static int __devinit esp_sbus_probe(struct platform_device *op) struct device_node *dp = op->dev.of_node; struct platform_device *dma_of = NULL; int hme = 0; + int ret; if (dp->parent && (!strcmp(dp->parent->name, "espdma") || @@ -582,7 +583,11 @@ static int __devinit esp_sbus_probe(struct platform_device *op) if (!dma_of) return -ENODEV; - return esp_sbus_probe_one(op, dma_of, hme); + ret = esp_sbus_probe_one(op, dma_of, hme); + if (ret) + put_device(&dma_of->dev); + + return ret; } static int __devexit esp_sbus_remove(struct platform_device *op) @@ -615,6 +620,8 @@ static int __devexit esp_sbus_remove(struct platform_device *op) dev_set_drvdata(&op->dev, NULL); + put_device(&dma_of->dev); + return 0; } diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c index 2d6317850064..2446e09666b4 100644 --- a/drivers/staging/usbip/stub_main.c +++ b/drivers/staging/usbip/stub_main.c @@ -237,7 +237,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) kmem_cache_free(stub_priv_cache, priv); kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + kfree(urb->setup_packet); + urb->setup_packet = NULL; + usb_free_urb(urb); } } diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c index 023fda305be2..1d01109017bb 100644 --- a/drivers/staging/usbip/stub_tx.c +++ b/drivers/staging/usbip/stub_tx.c @@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv) struct urb *urb = priv->urb; kfree(urb->setup_packet); + urb->setup_packet = NULL; + kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + list_del(&priv->list); kmem_cache_free(stub_priv_cache, priv); usb_free_urb(urb); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 1c469ad1e0f6..306f844db714 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2650,18 +2650,18 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) * related to the kernel should not use this. */ data = shift_state; - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_GETMOUSEREPORTING: data = mouse_reporting(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETVESABLANK: ret = set_vesa_blanking(p); break; case TIOCL_GETKMSGREDIRECT: data = vt_get_kmsg_redirect(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETKMSGREDIRECT: if (!capable(CAP_SYS_ADMIN)) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 2177ed483412..3014687ea25d 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -183,6 +183,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1908, 0x1315), .driver_info = USB_QUIRK_HONOR_BNUMINTERFACES }, + /* Hauppauge HVR-950q */ + { USB_DEVICE(0x2040, 0x7200), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index d89b72c86b66..223326b9d957 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1023,6 +1023,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd) t2 |= PORT_WKOC_E | PORT_WKCONN_E; t2 &= ~PORT_WKDISC_E; } + if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && + (hcd->speed < HCD_USB3)) + t2 &= ~PORT_WAKE_BITS; } else t2 &= ~PORT_WAKE_BITS; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 423700bd310d..39cd9320e4d9 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -45,6 +45,11 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba +#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb +#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc + static const char hcd_name[] = "xhci_hcd"; /* called after powerup, by probe or system-pm "wakeup" */ @@ -110,6 +115,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if ((pdev->vendor == PCI_VENDOR_ID_AMD) && + ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) + xhci->quirks |= XHCI_U2_DISABLE_WAKE; + if (pdev->vendor == PCI_VENDOR_ID_INTEL) xhci->quirks |= XHCI_INTEL_HOST; if (pdev->vendor == PCI_VENDOR_ID_INTEL) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 4453f2a3805e..307e3cc48195 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1502,6 +1502,7 @@ struct xhci_hcd { #define XHCI_SPURIOUS_WAKEUP (1 << 18) #define XHCI_PME_STUCK_QUIRK (1 << 20) #define XHCI_MISSING_CAS (1 << 24) +#define XHCI_U2_DISABLE_WAKE (1 << 27) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 8dfb599bc0ec..fe2c410c5c67 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -140,6 +140,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 44bdce4242ad..3f75f5f093a4 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -1852,7 +1852,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT) case ATYIO_CLKR: if (M64_HAS(INTEGRATED)) { - struct atyclk clk; + struct atyclk clk = { 0 }; union aty_pll *pll = &par->pll; u32 dsp_config = pll->ct.dsp_config; u32 dsp_on_off = pll->ct.dsp_on_off; diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 9f55b545ea44..286edc185aac 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -117,12 +117,6 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans, switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; - if (acl) { - ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (ret) - return ret; - } - ret = 0; break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) @@ -161,11 +155,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, { int ret; struct posix_acl *acl = NULL; + struct inode *inode = dentry->d_inode; + umode_t old_mode = inode->i_mode; - if (!inode_owner_or_capable(dentry->d_inode)) + if (!inode_owner_or_capable(inode)) return -EPERM; - if (!IS_POSIXACL(dentry->d_inode)) + if (!IS_POSIXACL(inode)) return -EOPNOTSUPP; if (value) { @@ -180,7 +176,14 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, } } - ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); + if (type == ACL_TYPE_ACCESS && acl) { + ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (ret) + goto out; + } + ret = btrfs_set_acl(NULL, inode, acl, type); + if (ret) + inode->i_mode = old_mode; out: posix_acl_release(acl); diff --git a/fs/dcache.c b/fs/dcache.c index 01c68ae3129c..93df95475e43 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -836,6 +836,7 @@ void shrink_dcache_sb(struct super_block *sb) list_splice_init(&sb->s_dentry_lru, &tmp); spin_unlock(&dcache_lru_lock); shrink_dentry_list(&tmp); + cond_resched(); spin_lock(&dcache_lru_lock); } spin_unlock(&dcache_lru_lock); diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index e38a9b61af3f..5106714312da 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c @@ -174,11 +174,8 @@ ext2_get_acl(struct inode *inode, int type) return acl; } -/* - * inode->i_mutex: down - */ static int -ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) +__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int name_index; void *value = NULL; @@ -193,13 +190,6 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) switch(type) { case ACL_TYPE_ACCESS: name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl) { - error = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (error) - return error; - inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - } break; case ACL_TYPE_DEFAULT: @@ -225,6 +215,31 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) return error; } +/* + * inode->i_mutex: down + */ +static int +ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + int error; + int update_mode = 0; + umode_t mode = inode->i_mode; + + if (type == ACL_TYPE_ACCESS && acl) { + error = posix_acl_update_mode(inode, &mode, &acl); + if (error) + return error; + update_mode = 1; + } + error = __ext2_set_acl(inode, acl, type); + if (!error && update_mode) { + inode->i_mode = mode; + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); + } + return error; +} + /* * Initialize the ACLs of a new inode. Called from ext2_new_inode. * @@ -248,7 +263,7 @@ ext2_init_acl(struct inode *inode, struct inode *dir) } if (test_opt(inode->i_sb, POSIX_ACL) && acl) { if (S_ISDIR(inode->i_mode)) { - error = ext2_set_acl(inode, ACL_TYPE_DEFAULT, acl); + error = __ext2_set_acl(inode, acl, ACL_TYPE_DEFAULT); if (error) goto cleanup; } @@ -257,7 +272,7 @@ ext2_init_acl(struct inode *inode, struct inode *dir) return error; if (error > 0) { /* This is an extended ACL */ - error = ext2_set_acl(inode, ACL_TYPE_ACCESS, acl); + error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS); } } cleanup: @@ -295,7 +310,7 @@ ext2_acl_chmod(struct inode *inode) error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); if (error) return error; - error = ext2_set_acl(inode, ACL_TYPE_ACCESS, acl); + error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); return error; } @@ -378,7 +393,7 @@ ext2_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, } else acl = NULL; - error = ext2_set_acl(dentry->d_inode, type, acl); + error = ext2_set_acl(dentry->d_inode, acl, type); release_and_out: posix_acl_release(acl); diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index 880d3d64bb14..c84a8f6ce4c3 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c @@ -178,14 +178,9 @@ ext3_get_acl(struct inode *inode, int type) return acl; } -/* - * Set the access or default ACL of an inode. - * - * inode->i_mutex: down unless called from ext3_new_inode - */ static int -ext3_set_acl(handle_t *handle, struct inode *inode, int type, - struct posix_acl *acl) +__ext3_set_acl(handle_t *handle, struct inode *inode, int type, + struct posix_acl *acl) { int name_index; void *value = NULL; @@ -198,13 +193,6 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, switch(type) { case ACL_TYPE_ACCESS: name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl) { - error = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (error) - return error; - inode->i_ctime = CURRENT_TIME_SEC; - ext3_mark_inode_dirty(handle, inode); - } break; case ACL_TYPE_DEFAULT: @@ -233,6 +221,34 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, return error; } +/* + * Set the access or default ACL of an inode. + * + * inode->i_mutex: down + */ +static int +ext3_set_acl(handle_t *handle, struct inode *inode, int type, + struct posix_acl *acl) +{ + int error; + int update_mode = 0; + umode_t mode = inode->i_mode; + + if (type == ACL_TYPE_ACCESS && acl) { + error = posix_acl_update_mode(inode, &mode, &acl); + if (error) + return error; + update_mode = 1; + } + error = __ext3_set_acl(handle, inode, type, acl); + if (!error && update_mode) { + inode->i_mode = mode; + inode->i_ctime = CURRENT_TIME_SEC; + ext3_mark_inode_dirty(handle, inode); + } + return error; +} + /* * Initialize the ACLs of a new inode. Called from ext3_new_inode. * @@ -256,8 +272,8 @@ ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) } if (test_opt(inode->i_sb, POSIX_ACL) && acl) { if (S_ISDIR(inode->i_mode)) { - error = ext3_set_acl(handle, inode, - ACL_TYPE_DEFAULT, acl); + error = __ext3_set_acl(handle, inode, + ACL_TYPE_DEFAULT, acl); if (error) goto cleanup; } @@ -267,7 +283,7 @@ ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) if (error > 0) { /* This is an extended ACL */ - error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, acl); + error = __ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, acl); } } cleanup: diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 5d419a496d96..c36fa746fbd2 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -183,8 +183,8 @@ ext4_get_acl(struct inode *inode, int type) * inode->i_mutex: down unless called from ext4_new_inode */ static int -ext4_set_acl(handle_t *handle, struct inode *inode, int type, - struct posix_acl *acl) +__ext4_set_acl(handle_t *handle, struct inode *inode, int type, + struct posix_acl *acl) { int name_index; void *value = NULL; @@ -197,13 +197,6 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type, switch (type) { case ACL_TYPE_ACCESS: name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl) { - error = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (error) - return error; - inode->i_ctime = ext4_current_time(inode); - ext4_mark_inode_dirty(handle, inode); - } break; case ACL_TYPE_DEFAULT: @@ -225,8 +218,34 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type, value, size, 0); kfree(value); - if (!error) + if (!error) { set_cached_acl(inode, type, acl); + } + + return error; +} + +static int +ext4_set_acl(handle_t *handle, struct inode *inode, int type, + struct posix_acl *acl) +{ + umode_t mode = inode->i_mode; + int update_mode = 0; + int error; + + if ((type == ACL_TYPE_ACCESS) && acl) { + error = posix_acl_update_mode(inode, &mode, &acl); + if (error) + return error; + update_mode = 1; + } + + error = __ext4_set_acl(handle, inode, type, acl); + if (!error && update_mode) { + inode->i_mode = mode; + inode->i_ctime = ext4_current_time(inode); + ext4_mark_inode_dirty(handle, inode); + } return error; } @@ -254,8 +273,8 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) } if (test_opt(inode->i_sb, POSIX_ACL) && acl) { if (S_ISDIR(inode->i_mode)) { - error = ext4_set_acl(handle, inode, - ACL_TYPE_DEFAULT, acl); + error = __ext4_set_acl(handle, inode, + ACL_TYPE_DEFAULT, acl); if (error) goto cleanup; } @@ -265,7 +284,7 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) if (error > 0) { /* This is an extended ACL */ - error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl); + error = __ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl); } } cleanup: diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 1d4f4c74d2c3..81b8c07bfd4f 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -250,15 +250,9 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type) return acl; } -/* - * Inode operation set_posix_acl(). - * - * inode->i_mutex: down - * BKL held [before 2.5.x] - */ static int -reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, - int type, struct posix_acl *acl) +__reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, + int type, struct posix_acl *acl) { char *name; void *value = NULL; @@ -271,11 +265,6 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; - if (acl) { - error = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (error) - return error; - } break; case ACL_TYPE_DEFAULT: name = POSIX_ACL_XATTR_DEFAULT; @@ -316,6 +305,31 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, return error; } +/* + * Inode operation set_posix_acl(). + * + * inode->i_mutex: down + */ +static int +reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, + int type, struct posix_acl *acl) +{ + int error; + int update_mode = 0; + umode_t mode = inode->i_mode; + + if (type == ACL_TYPE_ACCESS && acl) { + error = posix_acl_update_mode(inode, &mode, &acl); + if (error) + return error; + update_mode = 1; + } + error = __reiserfs_set_acl(th, inode, type, acl); + if (!error && update_mode) + inode->i_mode = mode; + return error; +} + /* dir->i_mutex: locked, * inode is new and not released into the wild yet */ int @@ -350,8 +364,8 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, if (acl) { /* Copy the default ACL to the default ACL of a new directory */ if (S_ISDIR(inode->i_mode)) { - err = reiserfs_set_acl(th, inode, ACL_TYPE_DEFAULT, - acl); + err = __reiserfs_set_acl(th, inode, ACL_TYPE_DEFAULT, + acl); if (err) goto cleanup; } @@ -364,7 +378,7 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, /* If we need an ACL.. */ if (err > 0) - err = reiserfs_set_acl(th, inode, ACL_TYPE_ACCESS, acl); + err = __reiserfs_set_acl(th, inode, ACL_TYPE_ACCESS, acl); cleanup: posix_acl_release(acl); } else { diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index cef0460f4c54..562e84d73c1e 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -573,7 +573,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, aligned_dlen = ALIGN(dlen, 8); aligned_ilen = ALIGN(ilen, 8); len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; - dent = kmalloc(len, GFP_NOFS); + dent = kzalloc(len, GFP_NOFS); if (!dent) return -ENOMEM; @@ -959,7 +959,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8); if (old_dir != new_dir) len += plen; - dent = kmalloc(len, GFP_NOFS); + dent = kzalloc(len, GFP_NOFS); if (!dent) return -ENOMEM; @@ -1307,7 +1307,7 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, hlen = host_ui->data_len + UBIFS_INO_NODE_SZ; len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8); - xent = kmalloc(len, GFP_NOFS); + xent = kzalloc(len, GFP_NOFS); if (!xent) return -ENOMEM; @@ -1414,7 +1414,7 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, aligned_len1 = ALIGN(len1, 8); aligned_len = aligned_len1 + ALIGN(len2, 8); - ino = kmalloc(aligned_len, GFP_NOFS); + ino = kzalloc(aligned_len, GFP_NOFS); if (!ino) return -ENOMEM; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index f996cb52442b..e1498cc5cc8d 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -2730,6 +2730,10 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) dbg_tnc("xent '%s', ino %lu", xent->name, (unsigned long)xattr_inum); +#ifdef CONFIG_UBIFS_FS_XATTR + ubifs_evict_xattr_inode(c, xattr_inum); +#endif + nm.name = xent->name; nm.len = le16_to_cpu(xent->nlen); err = ubifs_tnc_remove_nm(c, &key1, &nm); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 223dd4294d06..30a9b5cf3729 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1748,6 +1748,7 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, size_t size); ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size); int ubifs_removexattr(struct dentry *dentry, const char *name); +void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum); /* super.c */ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum); diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 28b7ebf5d0d9..3962fa3be948 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -483,6 +483,28 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size) return written; } +/** + * ubifs_evict_xattr_inode - Evict an xattr inode. + * @c: UBIFS file-system description object + * @xattr_inum: xattr inode number + * + * When an inode that hosts xattrs is being removed we have to make sure + * that cached inodes of the xattrs also get removed from the inode cache + * otherwise we'd waste memory. This function looks up an inode from the + * inode cache and clears the link counter such that iput() will evict + * the inode. + */ +void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum) +{ + struct inode *inode; + + inode = ilookup(c->vfs_sb, xattr_inum); + if (inode) { + clear_nlink(inode); + iput(inode); + } +} + static int remove_xattr(struct ubifs_info *c, struct inode *host, struct inode *inode, const struct qstr *nm) { diff --git a/fs/udf/file.c b/fs/udf/file.c index 26f9b5206a78..905bbe1b33c5 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -44,10 +44,15 @@ static void __udf_adinicb_readpage(struct page *page) struct inode *inode = page->mapping->host; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); + loff_t isize = i_size_read(inode); + /* + * We have to be careful here as truncate can change i_size under us. + * So just sample it once and use the same value everywhere. + */ kaddr = kmap(page); - memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); - memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); + memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, isize); + memset(kaddr + isize, 0, PAGE_CACHE_SIZE - isize); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 5d67e2023311..68de1c75237a 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1145,8 +1145,8 @@ int udf_setsize(struct inode *inode, loff_t newsize) up_write(&iinfo->i_data_sem); return err; } - truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); + truncate_setsize(inode, newsize); } else { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); @@ -1162,8 +1162,8 @@ int udf_setsize(struct inode *inode, loff_t newsize) udf_get_block); if (err) return err; - down_write(&iinfo->i_data_sem); truncate_setsize(inode, newsize); + down_write(&iinfo->i_data_sem); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index a3721633abc8..9a51bcf3208d 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h @@ -201,7 +201,14 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev) #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) #ifdef CONFIG_XFS_RT -#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) + +/* + * make sure we ignore the inode flag if the filesystem doesn't have a + * configured realtime device. + */ +#define XFS_IS_REALTIME_INODE(ip) \ + (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \ + (ip)->i_mount->m_rtdev_targp) #else #define XFS_IS_REALTIME_INODE(ip) (0) #endif diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 4f7a63237471..1b6ce8c2401a 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -216,6 +216,23 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); + +/** + * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask poiter + * @start: the start location + * + * The implementation does not assume any bit in @mask is set (including @start). + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_wrap(cpu, mask, start) \ + for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ + (cpu) < nr_cpumask_bits; \ + (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) + /** * for_each_cpu_and - iterate over every cpu in both masks * @cpu: the (optionally unsigned) integer iterator diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 7a7e5fd2a277..1dc942705235 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -74,12 +74,12 @@ extern int cpuset_slab_spread_node(void); static inline int cpuset_do_page_mem_spread(void) { - return current->flags & PF_SPREAD_PAGE; + return task_spread_page(current); } static inline int cpuset_do_slab_mem_spread(void) { - return current->flags & PF_SPREAD_SLAB; + return task_spread_slab(current); } extern int current_cpuset_is_being_rebound(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index fb76ee7dbd9d..2d4ab762d479 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1313,6 +1313,8 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; + unsigned long atomic_flags; /* Flags needing atomic access. */ + pid_t pid; pid_t tgid; @@ -1810,8 +1812,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ -#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ @@ -1844,6 +1844,20 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) +/* Per-process atomic flags. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ + +#define TASK_PFA_TEST(name, func) \ + static inline bool task_##func(struct task_struct *p) \ + { return test_bit(PFA_##name, &p->atomic_flags); } +#define TASK_PFA_SET(name, func) \ + static inline void task_set_##func(struct task_struct *p) \ + { set_bit(PFA_##name, &p->atomic_flags); } +#define TASK_PFA_CLEAR(name, func) \ + static inline void task_clear_##func(struct task_struct *p) \ + { clear_bit(PFA_##name, &p->atomic_flags); } + /* * task->jobctl flags */ @@ -1936,6 +1950,14 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) } #endif +TASK_PFA_TEST(SPREAD_PAGE, spread_page) +TASK_PFA_SET(SPREAD_PAGE, spread_page) +TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) + +TASK_PFA_TEST(SPREAD_SLAB, spread_slab) +TASK_PFA_SET(SPREAD_SLAB, spread_slab) +TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) + /* * Do not use outside of architecture code which knows its limitations. * diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 4fc3e5dda0c4..088ecb020905 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -489,9 +489,9 @@ extern void usb_ep0_reinit(struct usb_device *); ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointRequest \ - ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) #define EndpointOutRequest \ - ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ /* GetBusState and SetHubDescriptor are optional, omitted */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bf5daafe8ecc..a6bb7eb8d7ed 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1308,6 +1308,40 @@ int ib_query_port(struct ib_device *device, enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num); +/** + * rdma_start_port - Return the first valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return start port number + */ +static inline u8 rdma_start_port(const struct ib_device *device) +{ + return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; +} + +/** + * rdma_end_port - Return the last valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return last port number + */ +static inline u8 rdma_end_port(const struct ib_device *device) +{ + return (device->node_type == RDMA_NODE_IB_SWITCH) ? + 0 : device->phys_port_cnt; +} + +static inline int rdma_is_port_valid(const struct ib_device *device, + unsigned int port) +{ + return (port >= rdma_start_port(device) && + port <= rdma_end_port(device)); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4346f9a2c9dc..5cc0eece8e3a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -326,13 +326,14 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs, struct task_struct *tsk) { if (is_spread_page(cs)) - tsk->flags |= PF_SPREAD_PAGE; + task_set_spread_page(tsk); else - tsk->flags &= ~PF_SPREAD_PAGE; + task_clear_spread_page(tsk); + if (is_spread_slab(cs)) - tsk->flags |= PF_SPREAD_SLAB; + task_set_spread_slab(tsk); else - tsk->flags &= ~PF_SPREAD_SLAB; + task_clear_spread_slab(tsk); } /* diff --git a/kernel/events/core.c b/kernel/events/core.c index bc94278e0b45..8b37d9553146 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4240,9 +4240,6 @@ static void perf_output_read_one(struct perf_output_handle *handle, __output_copy(handle, values, n * sizeof(u64)); } -/* - * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. - */ static void perf_output_read_group(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) @@ -4286,6 +4283,13 @@ static void perf_output_read_group(struct perf_output_handle *handle, #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ PERF_FORMAT_TOTAL_TIME_RUNNING) +/* + * XXX PERF_SAMPLE_READ vs inherited events seems difficult. + * + * The problem is that its both hard and excessively expensive to iterate the + * child list, not to mention that its impossible to IPI the children running + * on another CPU, from interrupt/NMI context. + */ static void perf_output_read(struct perf_output_handle *handle, struct perf_event *event) { @@ -6284,9 +6288,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, local64_set(&hwc->period_left, hwc->sample_period); /* - * we currently do not support PERF_FORMAT_GROUP on inherited events + * We currently do not support PERF_SAMPLE_READ on inherited events. + * See perf_output_read(). */ - if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) + if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) goto done; pmu = perf_init_event(event); diff --git a/kernel/sched.c b/kernel/sched.c index 4b3e12ec5a60..5cfb99789abf 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7479,7 +7479,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) cpumask_clear(covered); - for_each_cpu(i, span) { + for_each_cpu_wrap(i, span, cpu) { struct cpumask *sg_span; if (cpumask_test_cpu(i, covered)) diff --git a/lib/cpumask.c b/lib/cpumask.c index af3e5817de98..70184596853e 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -75,6 +75,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) return i; } +/** + * cpumask_next_wrap - helper to implement for_each_cpu_wrap + * @n: the cpu prior to the place to search + * @mask: the cpumask pointer + * @start: the start point of the iteration + * @wrap: assume @n crossing @start terminates the iteration + * + * Returns >= nr_cpu_ids on completion + * + * Note: the @wrap argument is required for the start condition when + * we cannot assume @start is set in @mask. + */ +int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) +{ + int next; + +again: + next = cpumask_next(n, mask); + + if (wrap && n < start && next >= start) { + return nr_cpumask_bits; + + } else if (next >= nr_cpumask_bits) { + wrap = true; + n = -1; + goto again; + } + + return next; +} +EXPORT_SYMBOL(cpumask_next_wrap); + /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK /** diff --git a/mm/mmap.c b/mm/mmap.c index c7cbb405d5b3..ba6583bd1858 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1717,7 +1717,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; - if (address >= TASK_SIZE) + if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; @@ -1729,7 +1729,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) gap_addr = TASK_SIZE; next = vma->vm_next; - if (next && next->vm_start < gap_addr) { + if (next && next->vm_start < gap_addr && + (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ @@ -1788,7 +1789,8 @@ int expand_downwards(struct vm_area_struct *vma, if (gap_addr > address) return -ENOMEM; prev = vma->vm_prev; - if (prev && prev->vm_end > gap_addr) { + if (prev && prev->vm_end > gap_addr && + (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { if (!(prev->vm_flags & VM_GROWSDOWN)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ diff --git a/mm/slab.c b/mm/slab.c index aea5e42c67d8..bb39255d8b3f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3255,7 +3255,7 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) #ifdef CONFIG_NUMA /* - * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. + * Try allocating on another node if PFA_SPREAD_SLAB|PF_MEMPOLICY. * * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. @@ -3496,7 +3496,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) { void *objp; - if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { + if (unlikely((current->flags & PF_MEMPOLICY) || cpuset_do_slab_mem_spread())) { objp = alternate_node_alloc(cache, flags); if (objp) goto out; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index dd7c0195aee6..01116f935fc9 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -68,7 +68,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data); -static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); +static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err); @@ -787,7 +787,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, buf), buf); + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } @@ -1825,12 +1825,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned return len; } -static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) +static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) { struct l2cap_conf_opt *opt = *ptr; BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); + if (size < L2CAP_CONF_OPT_SIZE + len) + return; + opt->type = type; opt->len = len; @@ -1901,11 +1904,12 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) } } -static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) +static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) { struct l2cap_conf_req *req = data; struct l2cap_conf_rfc rfc = { .mode = chan->mode }; void *ptr = req->data; + void *endptr = data + data_size; BT_DBG("chan %p", chan); @@ -1926,7 +1930,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) done: if (chan->imtu != L2CAP_DEFAULT_MTU) - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr); switch (chan->mode) { case L2CAP_MODE_BASIC: @@ -1942,7 +1946,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) rfc.max_pdu_size = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), - (unsigned long) &rfc); + (unsigned long) &rfc, endptr - ptr); break; case L2CAP_MODE_ERTM: @@ -1956,7 +1960,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), - (unsigned long) &rfc); + (unsigned long) &rfc, endptr - ptr); if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -1964,7 +1968,8 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; - l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs, + endptr - ptr); } break; @@ -1979,7 +1984,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), - (unsigned long) &rfc); + (unsigned long) &rfc, endptr - ptr); if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -1987,7 +1992,8 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; - l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs, + endptr - ptr); } break; } @@ -1998,10 +2004,11 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) return ptr - data; } -static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) +static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; + void *endptr = data + data_size; void *req = chan->conf_req; int len = chan->conf_len; int type, hint, olen; @@ -2077,8 +2084,8 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) if (chan->num_conf_rsp == 1) return -ECONNREFUSED; - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); } @@ -2092,7 +2099,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) chan->omtu = mtu; set_bit(CONF_MTU_DONE, &chan->conf_state); } - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr); switch (rfc.mode) { case L2CAP_MODE_BASIC: @@ -2117,7 +2124,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); + sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; @@ -2129,8 +2136,8 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) set_bit(CONF_MODE_DONE, &chan->conf_state); - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); break; @@ -2151,10 +2158,12 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) return ptr - data; } -static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result) +static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, + void *data, size_t size, u16 *result) { struct l2cap_conf_req *req = data; void *ptr = req->data; + void *endptr = data + size; int type, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; @@ -2171,13 +2180,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi chan->imtu = L2CAP_DEFAULT_MIN_MTU; } else chan->imtu = val; - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr); break; case L2CAP_CONF_FLUSH_TO: chan->flush_to = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, - 2, chan->flush_to); + 2, chan->flush_to, endptr - ptr); break; case L2CAP_CONF_RFC: @@ -2191,7 +2200,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi chan->fcs = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); + sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; } } @@ -2250,7 +2259,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) return; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, buf), buf); + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } @@ -2459,7 +2468,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn, u8 buf[128]; set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, buf), buf); + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } @@ -2509,7 +2518,7 @@ static int l2cap_connect_rsp(struct l2cap_conn *conn, break; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, req), req); + l2cap_build_conf_req(chan, req, sizeof(req)), req); chan->num_conf_req++; break; @@ -2602,7 +2611,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr } /* Complete config. */ - len = l2cap_parse_conf_req(chan, rsp); + len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp)); if (len < 0) { l2cap_send_disconn_req(conn, chan, ECONNRESET); goto unlock; @@ -2635,7 +2644,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { u8 buf[64]; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, buf), buf); + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } @@ -2687,7 +2696,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, /* throw out any old stored conf requests */ result = L2CAP_CONF_SUCCESS; len = l2cap_parse_conf_rsp(chan, rsp->data, len, - req, &result); + req, sizeof(req), &result); if (len < 0) { l2cap_send_disconn_req(conn, chan, ECONNRESET); goto done; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index cb1de47d8c2c..cd4f45bce336 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1382,18 +1382,11 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) { - if (ifp->flags&IFA_F_PERMANENT) { - spin_lock_bh(&ifp->lock); - addrconf_del_timer(ifp); - ifp->flags |= IFA_F_TENTATIVE; - if (dad_failed) - ifp->flags |= IFA_F_DADFAILED; - spin_unlock_bh(&ifp->lock); - if (dad_failed) - ipv6_ifa_notify(0, ifp); - in6_ifa_put(ifp); + if (dad_failed) + ifp->flags |= IFA_F_DADFAILED; + #ifdef CONFIG_IPV6_PRIVACY - } else if (ifp->flags&IFA_F_TEMPORARY) { + if (ifp->flags&IFA_F_TEMPORARY) { struct inet6_ifaddr *ifpub; spin_lock_bh(&ifp->lock); ifpub = ifp->ifpub; @@ -1406,7 +1399,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) spin_unlock_bh(&ifp->lock); } ipv6_del_addr(ifp); + } else #endif + if (ifp->flags&IFA_F_PERMANENT || !dad_failed) { + spin_lock_bh(&ifp->lock); + addrconf_del_timer(ifp); + ifp->flags |= IFA_F_TENTATIVE; + spin_unlock_bh(&ifp->lock); + if (dad_failed) + ipv6_ifa_notify(0, ifp); + in6_ifa_put(ifp); } else ipv6_del_addr(ifp); } diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index fc36accd487d..f95145c3ec49 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -705,10 +705,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, char uid[9]; /* Verify the input sockaddr */ - if (!addr || addr->sa_family != AF_IUCV) - return -EINVAL; - - if (addr_len < sizeof(struct sockaddr_iucv)) + if (addr_len < sizeof(struct sockaddr_iucv) || + addr->sa_family != AF_IUCV) return -EINVAL; lock_sock(sk); @@ -879,7 +877,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, struct iucv_sock *iucv = iucv_sk(sk); int err; - if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) + if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) return -EINVAL; if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index a753c9b2336f..6f12fd694d29 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -432,7 +432,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) drop: IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); kfree_skb(skb); - return -1; + return 0; } /* Userspace will call sendmsg() on the tunnel socket to send L2TP diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 69ce23fc81eb..77a0cc183cc6 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -307,6 +307,21 @@ static void death_by_timeout(unsigned long ul_conntrack) nf_ct_put(ct); } +static inline bool +nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, + const struct nf_conntrack_tuple *tuple, + u16 zone) +{ + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + + /* A conntrack can be recreated with the equal tuple, + * so we need to check that the conntrack is confirmed + */ + return nf_ct_tuple_equal(tuple, &h->tuple) && + nf_ct_zone(ct) == zone && + nf_ct_is_confirmed(ct); +} + /* * Warning : * - Caller must take a reference on returned object @@ -328,8 +343,7 @@ ____nf_conntrack_find(struct net *net, u16 zone, local_bh_disable(); begin: hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { - if (nf_ct_tuple_equal(tuple, &h->tuple) && - nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { + if (nf_ct_key_equal(h, tuple, zone)) { NF_CT_STAT_INC(net, found); local_bh_enable(); return h; @@ -376,8 +390,7 @@ __nf_conntrack_find_get(struct net *net, u16 zone, !atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; else { - if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || - nf_ct_zone(ct) != zone)) { + if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { nf_ct_put(ct); goto begin; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index fecb51097fe5..d24106c1b609 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -310,7 +310,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) case RTM_DELTFILTER: err = tp->ops->delete(tp, fh); if (err == 0) - tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); + tfilter_notify(net, skb, n, tp, + t->tcm_handle, RTM_DELTFILTER); goto errout; case RTM_GETTFILTER: err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 446d22439492..41a0ebb5b554 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -158,8 +158,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, - [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, - .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, @@ -3500,6 +3499,10 @@ static int validate_scan_freqs(struct nlattr *freqs) struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; + nla_for_each_nested(attr1, freqs, tmp1) + if (nla_len(attr1) != sizeof(u32)) + return 0; + nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* @@ -5820,6 +5823,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) if (err) return err; + if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || + !tb[NL80211_REKEY_DATA_KCK]) + return -EINVAL; if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)