[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20121002181756.GB9108@kroah.com>
Date: Tue, 2 Oct 2012 11:17:56 -0700
From: Greg KH <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, Jiri Slaby <jslaby@...e.cz>
Subject: Re: Linux 3.0.44
diff --git a/Makefile b/Makefile
index 4bd7aed..b49094b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 0
-SUBLEVEL = 43
+SUBLEVEL = 44
EXTRAVERSION =
NAME = Sneaky Weasel
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e756d04..b15162f 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,8 +14,8 @@
*/
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fad8e72..157781e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1234,6 +1234,32 @@ config ARM_ERRATA_754327
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
+config ARM_ERRATA_764369
+ bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for erratum 764369
+ affecting Cortex-A9 MPCore with two or more processors (all
+ current revisions). Under certain timing circumstances, a data
+ cache line maintenance operation by MVA targeting an Inner
+ Shareable memory region may fail to proceed up to either the
+ Point of Coherency or to the Point of Unification of the
+ system. This workaround adds a DSB instruction before the
+ relevant cache maintenance functions and sets a specific bit
+ in the diagnostic control register of the SCU.
+
+config PL310_ERRATA_769419
+ bool "PL310 errata: no automatic Store Buffer drain"
+ depends on CACHE_L2X0
+ help
+ On revisions of the PL310 prior to r3p2, the Store Buffer does
+ not automatically drain. This can cause normal, non-cacheable
+ writes to be retained when the memory system is idle, leading
+ to suboptimal I/O performance for drivers using coherent DMA.
+ This option adds a write barrier to the cpu_idle loop so that,
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1298,32 +1324,6 @@ source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
-config ARM_ERRATA_764369
- bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
- depends on CPU_V7 && SMP
- help
- This option enables the workaround for erratum 764369
- affecting Cortex-A9 MPCore with two or more processors (all
- current revisions). Under certain timing circumstances, a data
- cache line maintenance operation by MVA targeting an Inner
- Shareable memory region may fail to proceed up to either the
- Point of Coherency or to the Point of Unification of the
- system. This workaround adds a DSB instruction before the
- relevant cache maintenance functions and sets a specific bit
- in the diagnostic control register of the SCU.
-
-config PL310_ERRATA_769419
- bool "PL310 errata: no automatic Store Buffer drain"
- depends on CACHE_L2X0
- help
- On revisions of the PL310 prior to r3p2, the Store Buffer does
- not automatically drain. This can cause normal, non-cacheable
- writes to be retained when the memory system is idle, leading
- to suboptimal I/O performance for drivers using coherent DMA.
- This option adds a write barrier to the cpu_idle loop so that,
- on systems with an outer cache, the store buffer is drained
- explicitly.
-
endmenu
menu "Kernel Features"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 940b201..4d1f07d 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -539,6 +539,7 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
index 93226cf..b1479fd 100644
--- a/arch/arm/include/asm/mutex.h
+++ b/arch/arm/include/asm/mutex.h
@@ -7,121 +7,10 @@
*/
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
-
-#if __LINUX_ARM_ARCH__ < 6
-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
-# include <asm-generic/mutex-xchg.h>
-#else
-
/*
- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
- * atomic decrement (it is not a reliable atomic decrement but it satisfies
- * the defined semantics for our purpose, while being smaller and faster
- * than a real atomic decrement or atomic swap. The idea is to attempt
- * decrementing the lock value only once. If once decremented it isn't zero,
- * or if its store-back fails due to a dispute on the exclusive store, we
- * simply bail out immediately through the slow path where the lock will be
- * reattempted until it succeeds.
+ * On pre-ARMv6 hardware this results in a swp-based implementation,
+ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
+ * accesses instead.
*/
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- fail_fn(count);
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- __res = fail_fn(count);
- return __res;
-}
-
-/*
- * Same trick is used for the unlock fast path. However the original value,
- * rather than the result, is used to test for success in order to have
- * better generated assembly.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "ldrex %0, [%3] \n\t"
- "add %1, %0, #1 \n\t"
- "strex %2, %1, [%3] "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __orig |= __ex_flag;
- if (unlikely(__orig != 0))
- fail_fn(count);
-}
-
-/*
- * If the unlock was done on a contended lock, or if the unlock simply fails
- * then the mutex remains locked.
- */
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/*
- * For __mutex_fastpath_trylock we use another construct which could be
- * described as a "single value cmpxchg".
- *
- * This provides the needed trylock semantics like cmpxchg would, but it is
- * lighter and less generic than a true cmpxchg implementation.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "1: ldrex %0, [%3] \n\t"
- "subs %1, %0, #1 \n\t"
- "strexeq %2, %1, [%3] \n\t"
- "movlt %0, #0 \n\t"
- "cmpeq %2, #0 \n\t"
- "bgt 1b "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&count->counter)
- : "cc", "memory" );
-
- return __orig;
-}
-
-#endif
+#include <asm-generic/mutex-xchg.h>
#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 942bda2..1396edf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -451,6 +451,13 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) {
m->ip = regs->ip;
m->cs = regs->cs;
+ /*
+ * When in VM86 mode make the cs look like ring 3
+ * always. This is a lie, but it's better than passing
+ * the additional vm86 bit around everywhere.
+ */
+ if (v8086_mode(regs))
+ m->cs |= 3;
} else {
m->ip = 0;
m->cs = 0;
@@ -988,6 +995,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*/
add_taint(TAINT_MACHINE_CHECK);
+ mce_get_rip(&m, regs);
severity = mce_severity(&m, tolerant, NULL);
/*
@@ -1026,7 +1034,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
mce_ring_add(m.addr >> PAGE_SHIFT);
- mce_get_rip(&m, regs);
mce_log(&m);
if (severity > worst) {
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 6dd8955..0951b81 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -521,3 +521,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
+
+/*
+ * Twinhead H12Y needs us to block out a region otherwise we map devices
+ * there and any access kills the box.
+ *
+ * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
+ *
+ * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
+ */
+static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
+ pr_info("Reserving memory on Twinhead H12Y\n");
+ request_mem_region(0xFFB00000, 0x100000, "twinhead");
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8dcda4..5669564 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -15,6 +15,7 @@
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/acpi.h>
+#include <asm/numa.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -463,4 +464,7 @@ void __init xen_arch_setup(void)
boot_option_idle_override = IDLE_HALT;
fiddle_vdso();
+#ifdef CONFIG_NUMA
+ numa_off = 1;
+#endif
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 0c0669f..1893506 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -188,10 +188,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always return 0 for CPU0's handle.
+ * Ignores apic_id and always returns 0 for the processor
+ * handle with acpi id 0 if nr_cpu_ids is 1.
+ * This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
*/
- if (acpi_id == 0)
+ if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
return apic_id;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index abf2f4e..8300250 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -394,6 +394,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
{ PCI_DEVICE(0x1b4b, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
+ { PCI_DEVICE(0x1b4b, 0x9192),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(0x1b4b, 0x91a3),
.driver_data = board_ahci_yes_fbs },
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 000d03a..600ede0 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4138,6 +4138,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Devices which aren't very happy with higher link speeds */
{ "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
+ { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
/*
* Devices which choke on SETXFER. Applies only if both the
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1023392..13f7db6 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -360,7 +360,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto repeat;
}
- dev->power.deferred_resume = false;
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
@@ -420,6 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
+ dev->power.deferred_resume = false;
rpm_resume(dev, 0);
retval = -EAGAIN;
goto out;
@@ -533,6 +533,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
|| dev->parent->power.runtime_status == RPM_ACTIVE) {
atomic_inc(&dev->parent->power.child_count);
spin_unlock(&dev->parent->power.lock);
+ retval = 1;
goto no_callback; /* Assume success. */
}
spin_unlock(&dev->parent->power.lock);
@@ -610,7 +611,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
}
wake_up_all(&dev->power.wait_queue);
- if (!retval)
+ if (retval >= 0)
rpm_idle(dev, RPM_ASYNC);
out:
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 3796fcc..f01b3507 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -795,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
}
break;
case CMD_PROTOCOL_ERR:
+ cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev,
"%p has protocol error\n", c);
break;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f4585b9..71346fe 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -60,6 +60,9 @@ static struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+ /* Apple-specific (Broadcom) devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
+
/* Broadcom SoftSailing reporting vendor specific */
{ USB_DEVICE(0x0a5c, 0x21e1) },
@@ -102,15 +105,14 @@ static struct usb_device_id btusb_table[] = {
/* Broadcom BCM20702A0 */
{ USB_DEVICE(0x0489, 0xe042) },
- { USB_DEVICE(0x0a5c, 0x21e3) },
- { USB_DEVICE(0x0a5c, 0x21e6) },
- { USB_DEVICE(0x0a5c, 0x21e8) },
- { USB_DEVICE(0x0a5c, 0x21f3) },
{ USB_DEVICE(0x413c, 0x8197) },
/* Foxconn - Hon Hai */
{ USB_DEVICE(0x0489, 0xe033) },
+ /*Broadcom devices with vendor specific id */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+
{ } /* Terminating entry */
};
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index ad683ec..b7fe343 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/cpumask.h>
-#include <linux/sched.h> /* for current / set_cpus_allowed() */
#include <linux/io.h>
#include <linux/delay.h>
@@ -1132,16 +1131,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
return res;
}
-/* Driver entry point to switch to the target frequency */
-static int powernowk8_target(struct cpufreq_policy *pol,
- unsigned targfreq, unsigned relation)
+struct powernowk8_target_arg {
+ struct cpufreq_policy *pol;
+ unsigned targfreq;
+ unsigned relation;
+};
+
+static long powernowk8_target_fn(void *arg)
{
- cpumask_var_t oldmask;
+ struct powernowk8_target_arg *pta = arg;
+ struct cpufreq_policy *pol = pta->pol;
+ unsigned targfreq = pta->targfreq;
+ unsigned relation = pta->relation;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
unsigned int newstate;
- int ret = -EIO;
+ int ret;
if (!data)
return -EINVAL;
@@ -1149,29 +1155,16 @@ static int powernowk8_target(struct cpufreq_policy *pol,
checkfid = data->currfid;
checkvid = data->currvid;
- /* only run on specific CPU from here on. */
- /* This is poor form: use a workqueue or smp_call_function_single */
- if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_copy(oldmask, tsk_cpus_allowed(current));
- set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
-
- if (smp_processor_id() != pol->cpu) {
- printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
- goto err_out;
- }
-
if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing targ, change pending bit set\n");
- goto err_out;
+ return -EIO;
}
pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
pol->cpu, targfreq, pol->min, pol->max, relation);
if (query_current_values_with_pending_wait(data))
- goto err_out;
+ return -EIO;
if (cpu_family != CPU_HW_PSTATE) {
pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
@@ -1189,7 +1182,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
if (cpufreq_frequency_table_target(pol, data->powernow_table,
targfreq, relation, &newstate))
- goto err_out;
+ return -EIO;
mutex_lock(&fidvid_mutex);
@@ -1202,9 +1195,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
ret = transition_frequency_fidvid(data, newstate);
if (ret) {
printk(KERN_ERR PFX "transition frequency failed\n");
- ret = 1;
mutex_unlock(&fidvid_mutex);
- goto err_out;
+ return 1;
}
mutex_unlock(&fidvid_mutex);
@@ -1213,12 +1205,25 @@ static int powernowk8_target(struct cpufreq_policy *pol,
data->powernow_table[newstate].index);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
- ret = 0;
-err_out:
- set_cpus_allowed_ptr(current, oldmask);
- free_cpumask_var(oldmask);
- return ret;
+ return 0;
+}
+
+/* Driver entry point to switch to the target frequency */
+static int powernowk8_target(struct cpufreq_policy *pol,
+ unsigned targfreq, unsigned relation)
+{
+ struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
+ .relation = relation };
+
+ /*
+ * Must run on @pol->cpu. cpufreq core is responsible for ensuring
+ * that we're bound to the current CPU and pol->cpu stays online.
+ */
+ if (smp_processor_id() == pol->cpu)
+ return powernowk8_target_fn(&pta);
+ else
+ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
}
/* Driver entry point to verify the policy and range of frequencies */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 10c6349..1357c3b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -674,7 +674,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
flags);
if (unlikely(!atslave || !sg_len)) {
- dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
+ dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
return NULL;
}
@@ -702,6 +702,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
+ if (unlikely(!len)) {
+ dev_dbg(chan2dev(chan),
+ "prep_slave_sg: sg(%d) data length is zero\n", i);
+ goto err;
+ }
mem_width = 2;
if (unlikely(mem & 3 || len & 3))
mem_width = 0;
@@ -736,6 +741,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
+ if (unlikely(!len)) {
+ dev_dbg(chan2dev(chan),
+ "prep_slave_sg: sg(%d) data length is zero\n", i);
+ goto err;
+ }
mem_width = 2;
if (unlikely(mem & 3 || len & 3))
mem_width = 0;
@@ -769,6 +779,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
err_desc_get:
dev_err(chan2dev(chan), "not enough descriptors available\n");
+err:
atc_desc_put(atchan, first);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 236bbe0..918bac8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -160,7 +160,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_hdmi->has_audio)
+ if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index ef6b426..cee3184 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -488,7 +488,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if ((dev->pdev->device == 0x9802) &&
+ if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 5231934..a5737a5 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -133,6 +133,12 @@ static ssize_t show_voltage(struct device *dev,
}
}
+static ssize_t ads7871_show_name(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
+}
+
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
@@ -142,6 +148,8 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
+static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
+
static struct attribute *ads7871_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
@@ -151,6 +159,7 @@ static struct attribute *ads7871_attributes[] = {
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
+ &dev_attr_name.attr,
NULL
};
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index e8e18ca..ac2d6cb 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -128,12 +128,12 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
* counter saturations resulting in bogus power readings.
* We correct this value ourselves to cope with older BIOSes.
*/
-static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
+static const struct pci_device_id affected_device[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{ 0 }
};
-static void __devinit tweak_runavg_range(struct pci_dev *pdev)
+static void tweak_runavg_range(struct pci_dev *pdev)
{
u32 val;
@@ -157,6 +157,16 @@ static void __devinit tweak_runavg_range(struct pci_dev *pdev)
REG_TDP_RUNNING_AVERAGE, val);
}
+#ifdef CONFIG_PM
+static int fam15h_power_resume(struct pci_dev *pdev)
+{
+ tweak_runavg_range(pdev);
+ return 0;
+}
+#else
+#define fam15h_power_resume NULL
+#endif
+
static void __devinit fam15h_power_init_data(struct pci_dev *f4,
struct fam15h_power_data *data)
{
@@ -255,6 +265,7 @@ static struct pci_driver fam15h_power_driver = {
.id_table = fam15h_power_id_table,
.probe = fam15h_power_probe,
.remove = __devexit_p(fam15h_power_remove),
+ .resume = fam15h_power_resume,
};
static int __init fam15h_power_init(void)
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 5724074..b6adfac 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct twl4030_madc_request req;
+ struct twl4030_madc_request req = {
+ .channels = 1 << attr->index,
+ .method = TWL4030_MADC_SW2,
+ .type = TWL4030_MADC_WAIT,
+ };
long val;
- req.channels = (1 << attr->index);
- req.method = TWL4030_MADC_SW2;
- req.func_cb = NULL;
val = twl4030_madc_conversion(&req);
if (val < 0)
return val;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 15f53c5..e01fd4c 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -335,6 +335,12 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
+ },
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
},
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index d497db0..509135f 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include "isdnloop.h"
-static char *revision = "$Revision: 1.11.6.7 $";
static char *isdnloop_id = "loop0";
MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
static int __init
isdnloop_init(void)
{
- char *p;
- char rev[10];
-
- if ((p = strchr(revision, ':'))) {
- strcpy(rev, p + 1);
- p = strchr(rev, '$');
- *p = 0;
- } else
- strcpy(rev, " ??? ");
- printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
-
if (isdnloop_id)
return (isdnloop_addcard(isdnloop_id));
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8b04a02..98262e5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1096,8 +1096,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
ret = 0;
}
rdev->sectors = rdev->sb_start;
- /* Limit to 4TB as metadata cannot record more than that */
- if (rdev->sectors >= (2ULL << 32))
+ /* Limit to 4TB as metadata cannot record more than that.
+ * (not needed for Linear and RAID0 as metadata doesn't
+ * record this size)
+ */
+ if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1379,7 +1382,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
- if (num_sectors >= (2ULL << 32))
+ if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 3186ac7..9cfb56d 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -772,10 +772,11 @@ static ssize_t show_protocols(struct device *device,
if (dev->driver_type == RC_DRIVER_SCANCODE) {
enabled = dev->rc_map.rc_type;
allowed = dev->allowed_protos;
- } else {
+ } else if (dev->raw) {
enabled = dev->raw->enabled_protocols;
allowed = ir_raw_get_allowed_protocols();
- }
+ } else
+ return -ENODEV;
IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
(long long)allowed,
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 17bbacb..cc2ae7e 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,6 +18,8 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/uv/uv_hub.h>
@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+static int xpc_mq_node = -1;
+
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
#if defined CONFIG_X86_64
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
UV_AFFINITY_CPU);
- if (mq->irq < 0) {
- dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
- -mq->irq);
+ if (mq->irq < 0)
return mq->irq;
- }
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
- page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
- pg_order);
+ page = alloc_pages_exact_node(nid,
+ GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
};
+static int
+xpc_init_mq_node(int nid)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ xpc_activate_mq_uv =
+ xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
+ XPC_ACTIVATE_IRQ_NAME,
+ xpc_handle_activate_IRQ_uv);
+ if (!IS_ERR(xpc_activate_mq_uv))
+ break;
+ }
+ if (IS_ERR(xpc_activate_mq_uv)) {
+ put_online_cpus();
+ return PTR_ERR(xpc_activate_mq_uv);
+ }
+
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ xpc_notify_mq_uv =
+ xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
+ XPC_NOTIFY_IRQ_NAME,
+ xpc_handle_notify_IRQ_uv);
+ if (!IS_ERR(xpc_notify_mq_uv))
+ break;
+ }
+ if (IS_ERR(xpc_notify_mq_uv)) {
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ put_online_cpus();
+ return PTR_ERR(xpc_notify_mq_uv);
+ }
+
+ put_online_cpus();
+ return 0;
+}
+
int
xpc_init_uv(void)
{
+ int nid;
+ int ret = 0;
+
xpc_arch_ops = xpc_arch_ops_uv;
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
return -E2BIG;
}
- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
- XPC_ACTIVATE_IRQ_NAME,
- xpc_handle_activate_IRQ_uv);
- if (IS_ERR(xpc_activate_mq_uv))
- return PTR_ERR(xpc_activate_mq_uv);
+ if (xpc_mq_node < 0)
+ for_each_online_node(nid) {
+ ret = xpc_init_mq_node(nid);
- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
- XPC_NOTIFY_IRQ_NAME,
- xpc_handle_notify_IRQ_uv);
- if (IS_ERR(xpc_notify_mq_uv)) {
- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
- return PTR_ERR(xpc_notify_mq_uv);
- }
+ if (!ret)
+ break;
+ }
+ else
+ ret = xpc_init_mq_node(xpc_mq_node);
- return 0;
+ if (ret < 0)
+ dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
+ -ret);
+
+ return ret;
}
void
@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
}
+
+module_param(xpc_mq_node, int, 0);
+MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bd8805c..08b59b8 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -306,6 +306,9 @@ static int mmc_read_switch(struct mmc_card *card)
goto out;
}
+ if (status[13] & UHS_SDR50_BUS_SPEED)
+ card->sw_caps.hs_max_dtr = 50000000;
+
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
@@ -348,9 +351,6 @@ static int mmc_read_switch(struct mmc_card *card)
}
card->sw_caps.sd3_curr_limit = status[7];
- } else {
- if (status[13] & 0x02)
- card->sw_caps.hs_max_dtr = 50000000;
}
out:
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index d513d47..74160eb 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -278,11 +278,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
writel(stat & MXS_MMC_IRQ_BITS,
host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ spin_unlock(&host->lock);
+
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
mmc_signal_sdio_irq(host->mmc);
- spin_unlock(&host->lock);
-
if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
cmd->error = -ETIMEDOUT;
else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index c3b08f1..62ca03a 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
int div = 1;
u32 temp;
+ if (clock == 0)
+ goto out;
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- if (clock == 0)
- goto out;
-
while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
pre_div *= 2;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 153008f..8bcd5e9 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2515,8 +2515,9 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_card_is_removable(mmc))
mmc->caps |= MMC_CAP_NEEDS_POLL;
- /* UHS-I mode(s) supported by the host controller. */
- if (host->version >= SDHCI_SPEC_300)
+ /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
+ if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50))
mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
/* SDR104 supports also implies SDR50 support */
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fd3bf77..326bd93 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -356,7 +356,7 @@ retry:
*/
err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
vid_hdr, 0);
- kfree(new_seb);
+ kmem_cache_free(si->scan_leb_slab, new_seb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -369,7 +369,7 @@ write_error:
list_add(&new_seb->u.list, &si->erase);
goto retry;
}
- kfree(new_seb);
+ kmem_cache_free(si->scan_leb_slab, new_seb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index f1942ca..b4159a6 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1249,7 +1249,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
*/
static int ican3_reset_module(struct ican3_dev *mod)
{
- u8 val = 1 << mod->num;
unsigned long start;
u8 runold, runnew;
@@ -1263,8 +1262,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
runold = ioread8(mod->dpm + TARGET_RUNNING);
/* reset the module */
- iowrite8(val, &mod->ctrl->reset_assert);
- iowrite8(val, &mod->ctrl->reset_deassert);
+ iowrite8(0x00, &mod->dpmctrl->hwreset);
/* wait until the module has finished resetting and is running */
start = jiffies;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 330140e..9bcc39a 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -83,6 +83,11 @@
#define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
#define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
#define INSTRUCTION_RESET 0xC0
+#define RTS_TXB0 0x01
+#define RTS_TXB1 0x02
+#define RTS_TXB2 0x04
+#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
+
/* MPC251x registers */
#define CANSTAT 0x0e
@@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
int tx_buf_idx)
{
+ struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
u32 sid, eid, exide, rtr;
u8 buf[SPI_TRANSFER_BUF_LEN];
@@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
- mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
+
+ /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
+ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
+ mcp251x_spi_trans(priv->spi, 1);
}
static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f7bbde9..6ea2c09 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -969,12 +969,12 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(ndev);
+ unregister_candev(ndev);
clk_disable(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
release_mem_region(res->start, resource_size(res));
- unregister_candev(ndev);
free_candev(ndev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 0d0ee55..8402d19 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -2088,7 +2088,8 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG
- | FLAG2_DISABLE_ASPM_L0S,
+ | FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_DISABLE_ASPM_L1,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4840ab7..4309296 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -652,7 +652,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
flags);
dev_put(nt->np.dev);
nt->np.dev = NULL;
- netconsole_target_put(nt);
}
nt->enabled = 0;
stopped = true;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7d1651b..be3cade 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1383,6 +1383,11 @@ static int efx_probe_all(struct efx_nic *efx)
goto fail2;
}
+ BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+ if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+ rc = -EINVAL;
+ goto fail3;
+ }
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_channels(efx);
if (rc)
@@ -1942,6 +1947,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+ net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
/* Clear MAC statistics */
efx->mac_op->update_stats(efx);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index b0d1209..a5d1c60d 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -38,6 +38,7 @@ extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -60,10 +61,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
-/* The smallest [rt]xq_entries that the driver supports. Callers of
- * efx_wake_queue() assume that they can subsequently send at least one
- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS 100
+
+/* The smallest [rt]xq_entries that the driver supports. RX minimum
+ * is a bit arbitrary. For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT 128U
+#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */
extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d229027..cfaf801 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -677,21 +677,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring->tx_pending > EFX_MAX_DMAQ_SIZE)
return -EINVAL;
- if (ring->rx_pending < EFX_MIN_RING_SIZE ||
- ring->tx_pending < EFX_MIN_RING_SIZE) {
+ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
netif_err(efx, drv, efx->net_dev,
- "TX and RX queues cannot be smaller than %ld\n",
- EFX_MIN_RING_SIZE);
+ "RX queues cannot be smaller than %u\n",
+ EFX_RXQ_MIN_ENT);
return -EINVAL;
}
- return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+ txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+ if (txq_entries != ring->tx_pending)
+ netif_warn(efx, drv, efx->net_dev,
+ "increasing TX queue size to minimum of %u\n",
+ txq_entries);
+
+ return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
}
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 7443f99..d2405ce 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -65,6 +65,11 @@ enum {
#define FALCON_GMAC_LOOPBACKS \
(1 << LOOPBACK_GMAC)
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EFX_PAGE_SIZE 4096
+/* Size and alignment of buffer table entries (same) */
+#define EFX_BUF_SIZE EFX_PAGE_SIZE
+
/**
* struct falcon_board_type - board operations and type information
* @id: Board type id, as found in NVRAM
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 84eb99e..6d3b68a 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -115,6 +115,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
return len;
}
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for the alignment workaround */
+ if (EFX_WORKAROUND_5391(efx))
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
+
/*
* Add a socket buffer to a TX queue
*
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index c44e0e4..e2a988c 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1537,6 +1537,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x2001, 0x3c05),
.driver_info = (unsigned long) &ax88772_info,
}, {
+ // DLink DUB-E100 H/W Ver C1
+ USB_DEVICE (0x2001, 0x1a02),
+ .driver_info = (unsigned long) &ax88772_info,
+}, {
// Linksys USB1000
USB_DEVICE (0x1737, 0x0039),
.driver_info = (unsigned long) &ax88178_info,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 937f9e8..1493171 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1618,6 +1618,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
+ u32 reg;
/*
* Allocate eeprom data.
@@ -1631,6 +1632,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®);
+ rt2x00_set_field32(®, GPIOCSR_BIT8, 1);
+ rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
+
+ /*
* Initialize hw specifications.
*/
retval = rt2400pci_probe_hw_mode(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index d3a4a68..7564ae9 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -670,6 +670,7 @@
#define GPIOCSR_BIT5 FIELD32(0x00000020)
#define GPIOCSR_BIT6 FIELD32(0x00000040)
#define GPIOCSR_BIT7 FIELD32(0x00000080)
+#define GPIOCSR_BIT8 FIELD32(0x00000100)
/*
* BBPPCSR: BBP Pin control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index d27d7b8..cdd480f 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1936,6 +1936,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
+ u32 reg;
/*
* Allocate eeprom data.
@@ -1949,6 +1950,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®);
+ rt2x00_set_field32(®, GPIOCSR_DIR0, 1);
+ rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
+
+ /*
* Initialize hw specifications.
*/
retval = rt2500pci_probe_hw_mode(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 15237c2..f124a1b 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
u16 reg;
rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®);
- return rt2x00_get_field32(reg, MAC_CSR19_BIT7);
+ return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
}
#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
+ u16 reg;
/*
* Allocate eeprom data.
@@ -1781,6 +1782,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®);
+ rt2x00_set_field16(®, MAC_CSR19_BIT8, 0);
+ rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
+
+ /*
* Initialize hw specifications.
*/
retval = rt2500usb_probe_hw_mode(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index b493306..196bd51 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -189,14 +189,15 @@
* MAC_CSR19: GPIO control register.
*/
#define MAC_CSR19 0x0426
-#define MAC_CSR19_BIT0 FIELD32(0x0001)
-#define MAC_CSR19_BIT1 FIELD32(0x0002)
-#define MAC_CSR19_BIT2 FIELD32(0x0004)
-#define MAC_CSR19_BIT3 FIELD32(0x0008)
-#define MAC_CSR19_BIT4 FIELD32(0x0010)
-#define MAC_CSR19_BIT5 FIELD32(0x0020)
-#define MAC_CSR19_BIT6 FIELD32(0x0040)
-#define MAC_CSR19_BIT7 FIELD32(0x0080)
+#define MAC_CSR19_BIT0 FIELD16(0x0001)
+#define MAC_CSR19_BIT1 FIELD16(0x0002)
+#define MAC_CSR19_BIT2 FIELD16(0x0004)
+#define MAC_CSR19_BIT3 FIELD16(0x0008)
+#define MAC_CSR19_BIT4 FIELD16(0x0010)
+#define MAC_CSR19_BIT5 FIELD16(0x0020)
+#define MAC_CSR19_BIT6 FIELD16(0x0040)
+#define MAC_CSR19_BIT7 FIELD16(0x0080)
+#define MAC_CSR19_BIT8 FIELD16(0x0100)
/*
* MAC_CSR20: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dab7dc1..e947d3a 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -941,6 +941,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
+ u32 reg;
/*
* Allocate eeprom data.
@@ -954,6 +955,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, ®);
+ rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
+ rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
+ /*
* Initialize hw specifications.
*/
retval = rt2800_probe_hw_mode(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 39ebf9f..9366ef0 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -600,6 +600,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
+ u32 reg;
/*
* Allocate eeprom data.
@@ -613,6 +614,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, ®);
+ rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
+ rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
+ /*
* Initialize hw specifications.
*/
retval = rt2800_probe_hw_mode(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 9e5fd45..17de24e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2840,6 +2840,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
+ u32 reg;
/*
* Disable power saving.
@@ -2858,6 +2859,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®);
+ rt2x00_set_field32(®, MAC_CSR13_BIT13, 1);
+ rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
+
+ /*
* Initialize hw specifications.
*/
retval = rt61pci_probe_hw_mode(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index e3cd6db..8f3da5a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry {
#define MAC_CSR13_BIT10 FIELD32(0x00000400)
#define MAC_CSR13_BIT11 FIELD32(0x00000800)
#define MAC_CSR13_BIT12 FIELD32(0x00001000)
+#define MAC_CSR13_BIT13 FIELD32(0x00002000)
/*
* MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ad20953..1a06231 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
+ u32 reg;
/*
* Allocate eeprom data.
@@ -2190,6 +2191,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®);
+ rt2x00_set_field32(®, MAC_CSR13_BIT15, 0);
+ rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
+
+ /*
* Initialize hw specifications.
*/
retval = rt73usb_probe_hw_mode(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 9f6b470..df1cc11 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry {
#define MAC_CSR13_BIT10 FIELD32(0x00000400)
#define MAC_CSR13_BIT11 FIELD32(0x00000800)
#define MAC_CSR13_BIT12 FIELD32(0x00001000)
+#define MAC_CSR13_BIT13 FIELD32(0x00002000)
+#define MAC_CSR13_BIT14 FIELD32(0x00004000)
+#define MAC_CSR13_BIT15 FIELD32(0x00008000)
/*
* MAC_CSR14: LED control register.
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d65df92..26f7f01 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -646,9 +646,9 @@ static ssize_t show_infos(struct device *dev,
* The significance of others is yet to be found.
* If we don't find the method, we assume the device are present.
*/
- rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
if (!ACPI_FAILURE(rv))
- len += sprintf(page + len, "HRWS value : %#x\n",
+ len += sprintf(page + len, "HWRS value : %#x\n",
(uint) temp);
/*
* Another value for userspace: the ASYM method returns 0x02 for
@@ -1340,9 +1340,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
* The significance of others is yet to be found.
*/
status =
- acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
+ acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
if (!ACPI_FAILURE(status))
- pr_notice(" HRWS returned %x", (int)hwrs_result);
+ pr_notice(" HWRS returned %x", (int)hwrs_result);
if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
asus->have_rsts = true;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0580d99..9827fe9 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -68,6 +68,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x8A, { KEY_PROG1 } },
{ KE_KEY, 0x95, { KEY_MEDIA } },
{ KE_KEY, 0x99, { KEY_PHONE } },
+ { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+ { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+ { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+ { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
{ KE_KEY, 0xb5, { KEY_CALC } },
{ KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 368d0e6..15e6d5c 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -121,9 +121,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
if (!pdata->rtc_24h) {
- tm->tm_hour %= 12;
- if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
+ if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
+ tm->tm_hour -= 20;
+ tm->tm_hour %= 12;
tm->tm_hour += 12;
+ } else
+ tm->tm_hour %= 12;
}
tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index f9a2799..5e4e725 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -490,6 +490,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
goto out2;
}
+ /* ensure interrupts are disabled, bootloaders can be strange */
+ ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "unable to disable interrupt\n");
+
/* init cached IRQ enable bits */
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c54a2d..ca397f8 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1260,6 +1260,9 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
int rc = 0;
u64 mask64;
+ memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1));
+ memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2));
+
bnx2i_adjust_qp_size(hba);
iscsi_init.flags =
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 58f99f4..a3f856e 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1209,8 +1209,9 @@ static void complete_scsi_command(struct CommandList *cp)
}
break;
case CMD_PROTOCOL_ERR:
+ cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p has "
- "protocol error \n", cp);
+ "protocol error\n", cp);
break;
case CMD_HARDWARE_ERR:
cmd->result = DID_ERROR << 16;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 7d6e476..679fe6a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1097,6 +1097,13 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
u16 message_control;
+ /* Check whether controller SAS2008 B0 controller,
+ if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
+ if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
+ ioc->pdev->revision == 0x01) {
+ return -EINVAL;
+ }
+
base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
if (!base) {
dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi_fsl_spi.c
index 7963c9b..a725b07 100644
--- a/drivers/spi/spi_fsl_spi.c
+++ b/drivers/spi/spi_fsl_spi.c
@@ -139,10 +139,12 @@ static void fsl_spi_change_mode(struct spi_device *spi)
static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
+ struct fsl_spi_platform_data *pdata;
bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
+ pdata = spi->dev.parent->parent->platform_data;
+
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
pdata->cs_control(spi, !pol);
@@ -934,7 +936,7 @@ err:
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
- struct device *dev = spi->dev.parent;
+ struct device *dev = spi->dev.parent->parent;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
u16 cs = spi->chip_select;
int gpio = pinfo->gpios[cs];
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 3141dc8..966b693 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -655,7 +655,7 @@ static int das08jr_ao_winsn(struct comedi_device *dev,
int chan;
lsb = data[0] & 0xff;
- msb = (data[0] >> 8) & 0xf;
+ msb = (data[0] >> 8) & 0xff;
chan = CR_CHAN(insn->chanspec);
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index 0d38645..39bb66b 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -53,6 +53,7 @@
#include <linux/io.h>
#include <asm/irq.h>
#include <linux/fcntl.h>
+#include <linux/platform_device.h>
#ifdef LIRC_ON_SA1100
#include <asm/hardware.h>
#ifdef CONFIG_SA1100_COLLIE
@@ -488,9 +489,11 @@ static struct lirc_driver driver = {
.owner = THIS_MODULE,
};
+static struct platform_device *lirc_sir_dev;
static int init_chrdev(void)
{
+ driver.dev = &lirc_sir_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
@@ -1216,20 +1219,71 @@ static int init_lirc_sir(void)
return 0;
}
+static int __devinit lirc_sir_probe(struct platform_device *dev)
+{
+ return 0;
+}
+
+static int __devexit lirc_sir_remove(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver lirc_sir_driver = {
+ .probe = lirc_sir_probe,
+ .remove = __devexit_p(lirc_sir_remove),
+ .driver = {
+ .name = "lirc_sir",
+ .owner = THIS_MODULE,
+ },
+};
static int __init lirc_sir_init(void)
{
int retval;
+ retval = platform_driver_register(&lirc_sir_driver);
+ if (retval) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
+ "failed!\n");
+ return -ENODEV;
+ }
+
+ lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
+ if (!lirc_sir_dev) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
+ "failed!\n");
+ retval = -ENOMEM;
+ goto pdev_alloc_fail;
+ }
+
+ retval = platform_device_add(lirc_sir_dev);
+ if (retval) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
+ "failed!\n");
+ retval = -ENODEV;
+ goto pdev_add_fail;
+ }
+
retval = init_chrdev();
if (retval < 0)
- return retval;
+ goto fail;
+
retval = init_lirc_sir();
if (retval) {
drop_chrdev();
- return retval;
+ goto fail;
}
+
return 0;
+
+fail:
+ platform_device_del(lirc_sir_dev);
+pdev_add_fail:
+ platform_device_put(lirc_sir_dev);
+pdev_alloc_fail:
+ platform_driver_unregister(&lirc_sir_driver);
+ return retval;
}
static void __exit lirc_sir_exit(void)
@@ -1237,6 +1291,8 @@ static void __exit lirc_sir_exit(void)
drop_hardware();
drop_chrdev();
drop_port();
+ platform_device_unregister(lirc_sir_dev);
+ platform_driver_unregister(&lirc_sir_driver);
printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
}
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 1f0949e..30a9c62 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -113,13 +113,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter,
if (skb == NULL)
goto _recv_indicatepkt_drop;
skb->data = precv_frame->u.hdr.rx_data;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail -
- precv_frame->u.hdr.rx_head);
-#else
- skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail;
-#endif
skb->len = precv_frame->u.hdr.len;
+ skb_set_tail_pointer(skb, skb->len);
if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 42fcf7e..59a6d4d 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1855,7 +1855,7 @@ static void speakup_bits(struct vc_data *vc)
static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
{
- static u_char *goto_buf = "\0\0\0\0\0\0";
+ static u_char goto_buf[8];
static int num;
int maxlen, go_pos;
char *cp;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index cb817ce..921dae5 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -200,7 +200,7 @@ s_vProcessRxMACHeader (
} else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
cbHeaderSize += 6;
pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
- if ((*pwType == cpu_to_le16(ETH_P_IPX)) ||
+ if ((*pwType == cpu_to_be16(ETH_P_IPX)) ||
(*pwType == cpu_to_le16(0xF380))) {
cbHeaderSize -= 8;
pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 9b64b10..fe21868 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -1701,7 +1701,7 @@ s_bPacketToWirelessUsb(
// 802.1H
if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if (pDevice->dwDiagRefCount == 0) {
- if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) ||
+ if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
memcpy((PBYTE) (pbyPayloadHead),
abySNAP_Bridgetunnel, 6);
@@ -2840,10 +2840,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
Packet_Type = skb->data[ETH_HLEN+1];
Descriptor_type = skb->data[ETH_HLEN+1+1+2];
Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
- if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
- /* 802.1x OR eapol-key challenge frame transfer */
- if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
- (Packet_Type == 3)) {
+ if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
+ /* 802.1x OR eapol-key challenge frame transfer */
+ if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
+ (Packet_Type == 3)) {
bTxeapol_key = TRUE;
if(!(Key_info & BIT3) && //WPA or RSN group-key challenge
(Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
@@ -2989,19 +2989,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
}
}
- if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
- if (pDevice->byBBType != BB_TYPE_11A) {
- pDevice->wCurrentRate = RATE_1M;
- pDevice->byACKRate = RATE_1M;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- } else {
- pDevice->wCurrentRate = RATE_6M;
- pDevice->byACKRate = RATE_6M;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- }
- }
+ if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
+ if (pDevice->byBBType != BB_TYPE_11A) {
+ pDevice->wCurrentRate = RATE_1M;
+ pDevice->byACKRate = RATE_1M;
+ pDevice->byTopCCKBasicRate = RATE_1M;
+ pDevice->byTopOFDMBasicRate = RATE_6M;
+ } else {
+ pDevice->wCurrentRate = RATE_6M;
+ pDevice->byACKRate = RATE_6M;
+ pDevice->byTopCCKBasicRate = RATE_1M;
+ pDevice->byTopOFDMBasicRate = RATE_6M;
+ }
+ }
DBG_PRT(MSG_LEVEL_DEBUG,
KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n",
@@ -3017,7 +3017,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
if (bNeedEncryption == TRUE) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
- if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) {
+ if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) {
bNeedEncryption = FALSE;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d3a7342..910c8b0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3672,15 +3672,20 @@ static int transport_generic_cmd_sequencer(
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
}
-
+ /*
+ * For the overflow case keep the existing fabric provided
+ * ->data_length. Otherwise for the underflow case, reset
+ * ->data_length to the smaller SCSI expected data transfer
+ * length.
+ */
if (size > cmd->data_length) {
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
cmd->residual_count = (size - cmd->data_length);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - size);
+ cmd->data_length = size;
}
- cmd->data_length = size;
}
transport_set_supported_SAM_opcode(cmd);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 101eda9..73038ba 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -658,7 +658,8 @@ static void pch_dma_rx_complete(void *arg)
tty_flip_buffer_push(tty);
tty_kref_put(tty);
async_tx_ack(priv->desc_rx);
- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
}
static void pch_dma_tx_complete(void *arg)
@@ -713,7 +714,8 @@ static int handle_rx_to(struct eg20t_port *priv)
int rx_size;
int ret;
if (!priv->start_rx) {
- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
return 0;
}
buf = &priv->rxbuf;
@@ -975,11 +977,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
case PCH_UART_IID_RDR: /* Received Data Ready */
if (priv->use_dma) {
pch_uart_hal_disable_interrupt(priv,
- PCH_UART_HAL_RX_INT);
+ PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
ret = dma_handle_rx(priv);
if (!ret)
pch_uart_hal_enable_interrupt(priv,
- PCH_UART_HAL_RX_INT);
+ PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
} else {
ret = handle_rx(priv);
}
@@ -1105,7 +1109,8 @@ static void pch_uart_stop_rx(struct uart_port *port)
struct eg20t_port *priv;
priv = container_of(port, struct eg20t_port, port);
priv->start_rx = 0;
- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
priv->int_dis_flag = 1;
}
@@ -1161,6 +1166,7 @@ static int pch_uart_startup(struct uart_port *port)
break;
case 16:
fifo_size = PCH_UART_HAL_FIFO16;
+ break;
case 1:
default:
fifo_size = PCH_UART_HAL_FIFO_DIS;
@@ -1198,7 +1204,8 @@ static int pch_uart_startup(struct uart_port *port)
pch_request_dma(port);
priv->start_rx = 1;
- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
uart_update_timeout(port, CS8, default_baud);
return 0;
@@ -1256,7 +1263,7 @@ static void pch_uart_set_termios(struct uart_port *port,
stb = PCH_UART_HAL_STB1;
if (termios->c_cflag & PARENB) {
- if (!(termios->c_cflag & PARODD))
+ if (termios->c_cflag & PARODD)
parity = PCH_UART_HAL_PARITY_ODD;
else
parity = PCH_UART_HAL_PARITY_EVEN;
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 0149c09..ca98341 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
/* print devices for all busses */
list_for_each_entry(bus, &usb_bus_list, bus_list) {
/* recurse through all children of the root hub */
- if (!bus->root_hub)
+ if (!bus_to_hcd(bus)->rh_registered)
continue;
usb_lock_device(bus->root_hub);
ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 45e0908..9d5af9b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -977,10 +977,7 @@ static int register_root_hub(struct usb_hcd *hcd)
if (retval) {
dev_err (parent_dev, "can't register root hub for %s, %d\n",
dev_name(&usb_dev->dev), retval);
- }
- mutex_unlock(&usb_bus_list_lock);
-
- if (retval == 0) {
+ } else {
spin_lock_irq (&hcd_root_hub_lock);
hcd->rh_registered = 1;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -989,6 +986,7 @@ static int register_root_hub(struct usb_hcd *hcd)
if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */
}
+ mutex_unlock(&usb_bus_list_lock);
return retval;
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 32d3adc..8b2a9d8 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -96,6 +96,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Microchip Joss Optical infrared touchboard device */
+ { USB_DEVICE(0x04d8, 0x000c), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Samsung Android phone modem - ID conflict with SPH-I500 */
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 2499b3b..923153c 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -130,9 +130,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
else {
qtd = list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list);
- /* first qtd may already be partially processed */
- if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
+ qh->hw->hw_qtd_next = qtd->hw_next;
qtd = NULL;
+ }
}
if (qtd)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index efba0188..3b82c81 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -73,7 +73,9 @@
#define NB_PIF0_PWRDOWN_1 0x01100013
#define USB_INTEL_XUSB2PR 0xD0
+#define USB_INTEL_USB2PRM 0xD4
#define USB_INTEL_USB3_PSSEN 0xD8
+#define USB_INTEL_USB3PRM 0xDC
static struct amd_chipset_info {
struct pci_dev *nb_dev;
@@ -758,10 +760,18 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
#if defined(CONFIG_USB_XHCI_HCD) || defined(CONFIG_USB_XHCI_HCD_MODULE)
u32 ports_available;
- ports_available = 0xffffffff;
+ /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
+ * Indicate the ports that can be changed from OS.
+ */
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
+ &ports_available);
+
+ dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
+ ports_available);
+
/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
- * Register, to turn on SuperSpeed terminations for all
- * available ports.
+ * Register, to turn on SuperSpeed terminations for the
+ * switchable ports.
*/
pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
cpu_to_le32(ports_available));
@@ -771,7 +781,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
"under xHCI: 0x%x\n", ports_available);
- ports_available = 0xffffffff;
+ /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
+ * Indicate the USB 2.0 ports to be controlled by the xHCI host.
+ */
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
+ &ports_available);
+
+ dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
+ ports_available);
+
/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
* switch the USB 2.0 power and data lines over to the xHCI
* host.
@@ -820,12 +839,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
void __iomem *op_reg_base;
u32 val;
int timeout;
+ int len = pci_resource_len(pdev, 0);
if (!mmio_resource_enabled(pdev, 0))
return;
- base = ioremap_nocache(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ base = ioremap_nocache(pci_resource_start(pdev, 0), len);
if (base == NULL)
return;
@@ -835,9 +854,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
*/
ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
do {
+ if ((ext_cap_offset + sizeof(val)) > len) {
+ /* We're reading garbage from the controller */
+ dev_warn(&pdev->dev,
+ "xHCI controller failing to respond");
+ return;
+ }
+
if (!ext_cap_offset)
/* We've reached the end of the extended capabilities */
goto hc_init;
+
val = readl(base + ext_cap_offset);
if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
break;
@@ -868,9 +895,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
/* Disable any BIOS SMIs and clear all SMI events*/
writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+hc_init:
if (usb_is_intel_switchable_xhci(pdev))
usb_enable_xhci_ports(pdev);
-hc_init:
+
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
/* Wait for the host controller to be ready before writing any
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index ef004a5..7f69a39 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -15,6 +15,7 @@ void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
static inline void usb_amd_dev_put(void) {}
+static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
#endif /* CONFIG_PCI */
#endif /* __LINUX_USB_PCI_QUIRKS_H */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index de3c151..6456bf8 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -147,25 +147,34 @@ static void next_trb(struct xhci_hcd *xhci,
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
{
- union xhci_trb *next = ++(ring->dequeue);
unsigned long long addr;
ring->deq_updates++;
- /* Update the dequeue pointer further if that was a link TRB or we're at
- * the end of an event ring segment (which doesn't have link TRBS)
- */
- while (last_trb(xhci, ring, ring->deq_seg, next)) {
- if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
- ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
+
+ do {
+ /*
+ * Update the dequeue pointer further if that was a link TRB or
+ * we're at the end of an event ring segment (which doesn't have
+ * link TRBS)
+ */
+ if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
+ if (consumer && last_trb_on_last_seg(xhci, ring,
+ ring->deq_seg, ring->dequeue)) {
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Toggle cycle state "
+ "for ring %p = %i\n",
+ ring,
+ (unsigned int)
+ ring->cycle_state);
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ }
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ } else {
+ ring->dequeue++;
}
- ring->deq_seg = ring->deq_seg->next;
- ring->dequeue = ring->deq_seg->trbs;
- next = ring->dequeue;
- }
+ } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1f9602a..afafd20 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -594,7 +594,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- if (xhci->quirks && XHCI_SPURIOUS_REBOOT)
+ if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
spin_lock_irq(&xhci->lock);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 535a307..7b83689 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -702,6 +702,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
@@ -802,13 +803,32 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
- { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0x00) },
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
+ { USB_DEVICE(FTDI_VID, PI_C865_PID) },
+ { USB_DEVICE(FTDI_VID, PI_C857_PID) },
+ { USB_DEVICE(PI_VID, PI_C866_PID) },
+ { USB_DEVICE(PI_VID, PI_C663_PID) },
+ { USB_DEVICE(PI_VID, PI_C725_PID) },
+ { USB_DEVICE(PI_VID, PI_E517_PID) },
+ { USB_DEVICE(PI_VID, PI_C863_PID) },
{ USB_DEVICE(PI_VID, PI_E861_PID) },
+ { USB_DEVICE(PI_VID, PI_C867_PID) },
+ { USB_DEVICE(PI_VID, PI_E609_PID) },
+ { USB_DEVICE(PI_VID, PI_E709_PID) },
+ { USB_DEVICE(PI_VID, PI_100F_PID) },
+ { USB_DEVICE(PI_VID, PI_1011_PID) },
+ { USB_DEVICE(PI_VID, PI_1012_PID) },
+ { USB_DEVICE(PI_VID, PI_1013_PID) },
+ { USB_DEVICE(PI_VID, PI_1014_PID) },
+ { USB_DEVICE(PI_VID, PI_1015_PID) },
+ { USB_DEVICE(PI_VID, PI_1016_PID) },
{ USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 54b4258..06f6fd2 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -75,6 +75,9 @@
#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
#define FTDI_OPENDCC_GBM_PID 0xBFDC
+/* NZR SEM 16+ USB (http://www.nzr.de) */
+#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
+
/*
* RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
*/
@@ -539,7 +542,10 @@
/*
* Microchip Technology, Inc.
*
- * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
+ * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
+ * used by single function CDC ACM class based firmware demo
+ * applications. The VID/PID has also been used in firmware
+ * emulating FTDI serial chips by:
* Hornby Elite - Digital Command Control Console
* http://www.hornby.com/hornby-dcc/controllers/
*/
@@ -791,8 +797,27 @@
* Physik Instrumente
* http://www.physikinstrumente.com/en/products/
*/
+/* These two devices use the VID of FTDI */
+#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */
+#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */
+
#define PI_VID 0x1a72 /* Vendor ID */
-#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
+#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */
+#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */
+#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */
+#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */
+#define PI_C863_PID 0x1007 /* PI C-863 */
+#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */
+#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */
+#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */
+#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */
+#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */
+#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */
+#define PI_1012_PID 0x1012 /* PI Motion Controller */
+#define PI_1013_PID 0x1013 /* PI Motion Controller */
+#define PI_1014_PID 0x1014 /* PI Device */
+#define PI_1015_PID 0x1015 /* PI Device */
+#define PI_1016_PID 0x1016 /* PI Digital Servo Module */
/*
* Kondo Kagaku Co.Ltd.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d8851d0..ca72c75 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -882,8 +882,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1088,6 +1086,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 8745637..bf9a9b7 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -373,8 +373,15 @@ static void fb_flashcursor(struct work_struct *work)
struct vc_data *vc = NULL;
int c;
int mode;
+ int ret;
+
+ /* FIXME: we should sort out the unbind locking instead */
+ /* instead we just fail to flash the cursor if we can't get
+ * the lock instead of blocking fbcon deinit */
+ ret = console_trylock();
+ if (ret == 0)
+ return;
- console_lock();
if (ops && ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index d4ab797..479484f 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -773,6 +773,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
hpwdt_timer_reg = pci_mem_addr + 0x70;
hpwdt_timer_con = pci_mem_addr + 0x72;
+ /* Make sure that timer is disabled until /dev/watchdog is opened */
+ hpwdt_stop();
+
/* Make sure that we have a valid soft_margin */
if (hpwdt_change_timer(soft_margin))
hpwdt_change_timer(DEFAULT_MARGIN);
diff --git a/fs/dcache.c b/fs/dcache.c
index 0b51cfc9..bd8aaf6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -290,7 +290,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
* Inform try_to_ascend() that we are no longer attached to the
* dentry tree
*/
- dentry->d_flags |= DCACHE_DISCONNECTED;
+ dentry->d_flags |= DCACHE_DENTRY_KILLED;
if (parent)
spin_unlock(&parent->d_lock);
dentry_iput(dentry);
@@ -1015,7 +1015,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq
* or deletion
*/
if (new != old->d_parent ||
- (old->d_flags & DCACHE_DISCONNECTED) ||
+ (old->d_flags & DCACHE_DENTRY_KILLED) ||
(!locked && read_seqretry(&rename_lock, seq))) {
spin_unlock(&new->d_lock);
new = NULL;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 2717329..4a91a05 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -653,6 +653,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
struct dentry *trap = NULL;
+ struct inode *target_inode;
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
@@ -660,6 +661,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
+ target_inode = new_dentry->d_inode;
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
/* source should not be ancestor of target */
if (trap == lower_old_dentry) {
@@ -675,6 +677,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (rc)
goto out_lock;
+ if (target_inode)
+ fsstack_copy_attr_all(target_inode,
+ ecryptfs_inode_to_lower(target_inode));
fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c48f9f6..873c6f2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = jiffies;
- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+ memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
else
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index edfca53..f0a6990 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -633,7 +633,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
- __be32 *verf = NFS_COOKIEVERF(dir);
+ __be32 *verf = NFS_I(dir)->cookieverf;
struct nfs3_readdirargs arg = {
.fh = NFS_FH(dir),
.cookie = cookie,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 3da1166..c722905 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3018,11 +3018,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
dentry->d_parent->d_name.name,
dentry->d_name.name,
(unsigned long long)cookie);
- nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
+ nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
res.pgbase = args.pgbase;
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
if (status >= 0) {
- memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
+ memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index fc97fd5..5fcc67b 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5745,7 +5745,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_open(xdr, res);
if (status)
goto out;
- if (decode_getfh(xdr, &res->fh) != 0)
+ status = decode_getfh(xdr, &res->fh);
+ if (status)
goto out;
if (decode_getfattr(xdr, res->f_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index d709112..a1f3d6e 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1815,6 +1815,7 @@ static int nfs_validate_mount_data(void *options,
memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
+ args->nfs_server.port = ntohs(data->addr.sin_port);
if (!nfs_verify_server_address(sap))
goto out_no_address;
@@ -2528,6 +2529,7 @@ static int nfs4_validate_mount_data(void *options,
return -EFAULT;
if (!nfs_verify_server_address(sap))
goto out_no_address;
+ args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
if (data->auth_flavourlen) {
if (data->auth_flavourlen > 1)
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 7438850..b5a8636 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -290,7 +290,7 @@ handle_fragments:
check_directory_table:
/* Sanity check directory_table */
- if (msblk->directory_table >= next_table) {
+ if (msblk->directory_table > next_table) {
err = -EINVAL;
goto failed_mount;
}
diff --git a/fs/stat.c b/fs/stat.c
index 02a6061..aec24ec 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -57,7 +57,7 @@ EXPORT_SYMBOL(vfs_getattr);
int vfs_fstat(unsigned int fd, struct kstat *stat)
{
- struct file *f = fget(fd);
+ struct file *f = fget_raw(fd);
int error = -EBADF;
if (f) {
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 580a6d3..c04e0db 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -26,7 +26,13 @@ static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- fail_fn(count);
+ /*
+ * We failed to acquire the lock, so mark it contended
+ * to ensure that any waiting tasks are woken up by the
+ * unlock slow path.
+ */
+ if (likely(atomic_xchg(count, -1) != 1))
+ fail_fn(count);
}
/**
@@ -43,7 +49,8 @@ static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- return fail_fn(count);
+ if (likely(atomic_xchg(count, -1) != 1))
+ return fail_fn(count);
return 0;
}
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f13bb6d..7d6a6b8 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -220,6 +220,8 @@ struct dentry_operations {
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
+#define DCACHE_DENTRY_KILLED 0x100000
+
extern seqlock_t rename_lock;
static inline int dname_external(struct dentry *dentry)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 5e41a8e..921336f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -124,8 +124,17 @@ extern struct group_info init_groups;
extern struct cred init_cred;
+extern struct task_group root_task_group;
+
+#ifdef CONFIG_CGROUP_SCHED
+# define INIT_CGROUP_SCHED(tsk) \
+ .sched_task_group = &root_task_group,
+#else
+# define INIT_CGROUP_SCHED(tsk)
+#endif
+
#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk) \
+# define INIT_PERF_EVENTS(tsk) \
.perf_event_mutex = \
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
@@ -160,6 +169,7 @@ extern struct cred init_cred;
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
INIT_PUSHABLE_TASKS(tsk) \
+ INIT_CGROUP_SCHED(tsk) \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 9229b64..b557c78 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -228,7 +228,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
static inline __attribute__((format(printf, 2, 3)))
int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
-{ return 0; }
+{ return -ENOMEM; }
static inline int kobject_action_type(const char *buf, size_t count,
enum kobject_action *type)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 603bec2..06177ba10 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -58,13 +58,6 @@ union ktime {
typedef union ktime ktime_t; /* Kill this */
-#define KTIME_MAX ((s64)~((u64)1 << 63))
-#if (BITS_PER_LONG == 64)
-# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
-#else
-# define KTIME_SEC_MAX LONG_MAX
-#endif
-
/*
* ktime_t definitions when using the 64-bit scalar representation:
*/
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 935699b..6bea2c2 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -20,7 +20,7 @@
#include <linux/compiler.h>
#include <linux/mutex.h>
-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
+#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
struct memory_block {
unsigned long start_section_nr;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c6d6d48..c037215 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1329,6 +1329,8 @@ struct net_device {
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
+#define GSO_MAX_SEGS 65535
+ u16 gso_max_segs;
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index acdc370..af625d8 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -261,11 +261,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
return NFS_SERVER(inode)->nfs_client->rpc_ops;
}
-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
-{
- return NFS_I(inode)->cookieverf;
-}
-
static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
{
struct nfs_server *nfss = NFS_SERVER(inode);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e0786e3..effadd6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -807,7 +807,7 @@ struct perf_event {
struct hw_perf_event hw;
struct perf_event_context *ctx;
- struct file *filp;
+ atomic_long_t refcount;
/*
* These accumulate total time (in nanoseconds) that children
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 443ec43..0dae42e7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1235,6 +1235,9 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -2613,7 +2616,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
extern long sched_group_rt_period(struct task_group *tg);
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif
-#endif
+#endif /* CONFIG_CGROUP_SCHED */
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);
diff --git a/include/linux/time.h b/include/linux/time.h
index b306178..8c0216e 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
return ts_delta;
}
+#define KTIME_MAX ((s64)~((u64)1 << 63))
+#if (BITS_PER_LONG == 64)
+# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+#else
+# define KTIME_SEC_MAX LONG_MAX
+#endif
+
/*
* Returns true if the timespec is norm, false if denorm:
*/
-#define timespec_valid(ts) \
- (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
+static inline bool timespec_valid(const struct timespec *ts)
+{
+ /* Dates before 1970 are bogus */
+ if (ts->tv_sec < 0)
+ return false;
+ /* Can't have more nanoseconds then a second */
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return false;
+ return true;
+}
+
+static inline bool timespec_valid_strict(const struct timespec *ts)
+{
+ if (!timespec_valid(ts))
+ return false;
+ /* Disallow values that could overflow ktime_t */
+ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+ return false;
+ return true;
+}
extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
diff --git a/include/net/sock.h b/include/net/sock.h
index c0b938c..b2deeab 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -194,6 +194,7 @@ struct sock_common {
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
+ * @sk_gso_max_segs: Maximum number of GSO segments
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -310,6 +311,7 @@ struct sock {
int sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
+ u16 sk_gso_max_segs;
int sk_rcvlowat;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index a9c87ad..a9536da 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc,
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
__entry->page,
- page_to_pfn(__entry->page),
+ __entry->page ? page_to_pfn(__entry->page) : 0,
__entry->order,
__entry->migratetype,
show_gfp_flags(__entry->gfp_flags))
@@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page,
TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
__entry->page,
- page_to_pfn(__entry->page),
+ __entry->page ? page_to_pfn(__entry->page) : 0,
__entry->order,
__entry->migratetype,
__entry->order == 0)
diff --git a/kernel/async.c b/kernel/async.c
index cd9dbb9..04f66e3 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -87,6 +87,13 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
{
struct async_entry *entry;
+ if (!running) { /* just check the entry count */
+ if (atomic_read(&entry_count))
+ return 0; /* smaller than any cookie */
+ else
+ return next_cookie;
+ }
+
if (!list_empty(running)) {
entry = list_first_entry(running,
struct async_entry, list);
@@ -236,9 +243,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
*/
void async_synchronize_full(void)
{
- do {
- async_synchronize_cookie(next_cookie);
- } while (!list_empty(&async_running) || !list_empty(&async_pending));
+ async_synchronize_cookie_domain(next_cookie, NULL);
}
EXPORT_SYMBOL_GPL(async_synchronize_full);
@@ -258,7 +263,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
/**
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
* @cookie: async_cookie_t to use as checkpoint
- * @running: running list to synchronize on
+ * @running: running list to synchronize on, NULL indicates all lists
*
* This function waits until all asynchronous function calls for the
* synchronization domain specified by the running list @list submitted
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 32a6151..7b344be 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2969,12 +2969,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
* Called when the last reference to the file is gone.
*/
-static int perf_release(struct inode *inode, struct file *file)
+static void put_event(struct perf_event *event)
{
- struct perf_event *event = file->private_data;
struct task_struct *owner;
- file->private_data = NULL;
+ if (!atomic_long_dec_and_test(&event->refcount))
+ return;
rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
@@ -3009,7 +3009,13 @@ static int perf_release(struct inode *inode, struct file *file)
put_task_struct(owner);
}
- return perf_event_release_kernel(event);
+ perf_event_release_kernel(event);
+}
+
+static int perf_release(struct inode *inode, struct file *file)
+{
+ put_event(file->private_data);
+ return 0;
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
@@ -3241,7 +3247,7 @@ unlock:
static const struct file_operations perf_fops;
-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+static struct file *perf_fget_light(int fd, int *fput_needed)
{
struct file *file;
@@ -3255,7 +3261,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
return ERR_PTR(-EBADF);
}
- return file->private_data;
+ return file;
}
static int perf_event_set_output(struct perf_event *event,
@@ -3287,19 +3293,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PERF_EVENT_IOC_SET_OUTPUT:
{
+ struct file *output_file = NULL;
struct perf_event *output_event = NULL;
int fput_needed = 0;
int ret;
if (arg != -1) {
- output_event = perf_fget_light(arg, &fput_needed);
- if (IS_ERR(output_event))
- return PTR_ERR(output_event);
+ output_file = perf_fget_light(arg, &fput_needed);
+ if (IS_ERR(output_file))
+ return PTR_ERR(output_file);
+ output_event = output_file->private_data;
}
ret = perf_event_set_output(event, output_event);
if (output_event)
- fput_light(output_event->filp, fput_needed);
+ fput_light(output_file, fput_needed);
return ret;
}
@@ -6181,6 +6189,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
mutex_init(&event->mmap_mutex);
+ atomic_long_set(&event->refcount, 1);
event->cpu = cpu;
event->attr = *attr;
event->group_leader = group_leader;
@@ -6455,12 +6464,12 @@ SYSCALL_DEFINE5(perf_event_open,
return event_fd;
if (group_fd != -1) {
- group_leader = perf_fget_light(group_fd, &fput_needed);
- if (IS_ERR(group_leader)) {
- err = PTR_ERR(group_leader);
+ group_file = perf_fget_light(group_fd, &fput_needed);
+ if (IS_ERR(group_file)) {
+ err = PTR_ERR(group_file);
goto err_fd;
}
- group_file = group_leader->filp;
+ group_leader = group_file->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
@@ -6594,7 +6603,6 @@ SYSCALL_DEFINE5(perf_event_open,
put_ctx(gctx);
}
- event->filp = event_file;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
@@ -6682,7 +6690,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err_free;
}
- event->filp = NULL;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
@@ -6731,7 +6738,7 @@ static void sync_child_event(struct perf_event *child_event,
* Release the parent event, if this was the last
* reference to it.
*/
- fput(parent_event->filp);
+ put_event(parent_event);
}
static void
@@ -6807,9 +6814,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
*
* __perf_event_exit_task()
* sync_child_event()
- * fput(parent_event->filp)
- * perf_release()
- * mutex_lock(&ctx->mutex)
+ * put_event()
+ * mutex_lock(&ctx->mutex)
*
* But since its the parent context it won't be the same instance.
*/
@@ -6877,7 +6883,7 @@ static void perf_free_event(struct perf_event *event,
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
- fput(parent->filp);
+ put_event(parent);
perf_group_detach(event);
list_del_event(event, ctx);
@@ -6957,6 +6963,12 @@ inherit_event(struct perf_event *parent_event,
NULL);
if (IS_ERR(child_event))
return child_event;
+
+ if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
+ free_event(child_event);
+ return NULL;
+ }
+
get_ctx(child_ctx);
/*
@@ -6996,14 +7008,6 @@ inherit_event(struct perf_event *parent_event,
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
- * Get a reference to the parent filp - we will fput it
- * when the child event exits. This is safe to do because
- * we are in the parent and we know that the filp still
- * exists and has a nonzero count:
- */
- atomic_long_inc(&parent_event->filp->f_count);
-
- /*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
diff --git a/kernel/exit.c b/kernel/exit.c
index 303bed2..97dd317 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1049,6 +1049,22 @@ NORET_TYPE void do_exit(long code)
preempt_disable();
exit_rcu();
+
+ /*
+ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
+ * when the following two conditions become true.
+ * - There is race condition of mmap_sem (It is acquired by
+ * exit_mm()), and
+ * - SMI occurs before setting TASK_RUNINNG.
+ * (or hypervisor of virtual machine switches to other guest)
+ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
+ *
+ * To avoid it, we have to wait for releasing tsk->pi_lock which
+ * is held by try_to_wake_up()
+ */
+ smp_mb();
+ raw_spin_unlock_wait(&tsk->pi_lock);
+
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
schedule();
diff --git a/kernel/sched.c b/kernel/sched.c
index 8ef48f0..7484c92 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -605,22 +605,19 @@ static inline int cpu_of(struct rq *rq)
/*
* Return the group to which this tasks belongs.
*
- * We use task_subsys_state_check() and extend the RCU verification with
- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
- * task it moves into the cgroup. Therefore by holding either of those locks,
- * we pin the task to the current cgroup.
+ * We cannot use task_subsys_state() and friends because the cgroup
+ * subsystem changes that value before the cgroup_subsys::attach() method
+ * is called, therefore we cannot pin it and might observe the wrong value.
+ *
+ * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
+ * core changes this before calling sched_move_task().
+ *
+ * Instead we use a 'copy' which is updated from sched_move_task() while
+ * holding both task_struct::pi_lock and rq::lock.
*/
static inline struct task_group *task_group(struct task_struct *p)
{
- struct task_group *tg;
- struct cgroup_subsys_state *css;
-
- css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
- lockdep_is_held(&p->pi_lock) ||
- lockdep_is_held(&task_rq(p)->lock));
- tg = container_of(css, struct task_group, css);
-
- return autogroup_task_group(p, tg);
+ return p->sched_task_group;
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -2206,7 +2203,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
*
* sched_move_task() holds both and thus holding either pins the cgroup,
- * see set_task_rq().
+ * see task_group().
*
* Furthermore, all task_rq users should acquire both locks, see
* task_rq_lock().
@@ -8545,6 +8542,7 @@ void sched_destroy_group(struct task_group *tg)
*/
void sched_move_task(struct task_struct *tsk)
{
+ struct task_group *tg;
int on_rq, running;
unsigned long flags;
struct rq *rq;
@@ -8559,6 +8557,12 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
+ tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
+ lockdep_is_held(&tsk->sighand->siglock)),
+ struct task_group, css);
+ tg = autogroup_task_group(tsk, tg);
+ tsk->sched_task_group = tg;
+
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group)
tsk->sched_class->task_move_group(tsk, on_rq);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 678ae31..c3cbd8c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -382,7 +382,7 @@ int do_settimeofday(const struct timespec *tv)
struct timespec ts_delta;
unsigned long flags;
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ if (!timespec_valid_strict(tv))
return -EINVAL;
write_seqlock_irqsave(&xtime_lock, flags);
@@ -417,6 +417,8 @@ EXPORT_SYMBOL(do_settimeofday);
int timekeeping_inject_offset(struct timespec *ts)
{
unsigned long flags;
+ struct timespec tmp;
+ int ret = 0;
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
@@ -425,9 +427,16 @@ int timekeeping_inject_offset(struct timespec *ts)
timekeeping_forward_now();
+ tmp = timespec_add(xtime, *ts);
+ if (!timespec_valid_strict(&tmp)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
xtime = timespec_add(xtime, *ts);
wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+error: /* even if we error out, we forwarded the time, so call update */
timekeeping_update(true);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -435,7 +444,7 @@ int timekeeping_inject_offset(struct timespec *ts)
/* signal hrtimers about time change */
clock_was_set();
- return 0;
+ return ret;
}
EXPORT_SYMBOL(timekeeping_inject_offset);
@@ -582,7 +591,20 @@ void __init timekeeping_init(void)
struct timespec now, boot;
read_persistent_clock(&now);
+ if (!timespec_valid_strict(&now)) {
+ pr_warn("WARNING: Persistent clock returned invalid value!\n"
+ " Check your CMOS/BIOS settings.\n");
+ now.tv_sec = 0;
+ now.tv_nsec = 0;
+ }
+
read_boot_clock(&boot);
+ if (!timespec_valid_strict(&boot)) {
+ pr_warn("WARNING: Boot clock returned invalid value!\n"
+ " Check your CMOS/BIOS settings.\n");
+ boot.tv_sec = 0;
+ boot.tv_nsec = 0;
+ }
write_seqlock_irqsave(&xtime_lock, flags);
@@ -627,6 +649,12 @@ static void update_sleep_time(struct timespec t)
*/
static void __timekeeping_inject_sleeptime(struct timespec *delta)
{
+ if (!timespec_valid_strict(delta)) {
+ printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
+ "sleep delta value!\n");
+ return;
+ }
+
xtime = timespec_add(xtime, *delta);
wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
update_sleep_time(timespec_add(total_sleep_time, *delta));
@@ -897,6 +925,10 @@ static void update_wall_time(void)
#else
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#endif
+ /* Check if there's really nothing to do */
+ if (offset < timekeeper.cycle_interval)
+ return;
+
timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e88c924..00c0bad 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3412,14 +3412,17 @@ static int __cpuinit trustee_thread(void *__gcwq)
for_each_busy_worker(worker, i, pos, gcwq) {
struct work_struct *rebind_work = &worker->rebind_work;
+ unsigned long worker_flags = worker->flags;
/*
* Rebind_work may race with future cpu hotplug
* operations. Use a separate flag to mark that
- * rebinding is scheduled.
+ * rebinding is scheduled. The morphing should
+ * be atomic.
*/
- worker->flags |= WORKER_REBIND;
- worker->flags &= ~WORKER_ROGUE;
+ worker_flags |= WORKER_REBIND;
+ worker_flags &= ~WORKER_ROGUE;
+ ACCESS_ONCE(worker->flags) = worker_flags;
/* queue rebind_work, wq doesn't matter, use the default one */
if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
@@ -3599,18 +3602,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
#ifdef CONFIG_SMP
struct work_for_cpu {
- struct completion completion;
+ struct work_struct work;
long (*fn)(void *);
void *arg;
long ret;
};
-static int do_work_for_cpu(void *_wfc)
+static void work_for_cpu_fn(struct work_struct *work)
{
- struct work_for_cpu *wfc = _wfc;
+ struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
+
wfc->ret = wfc->fn(wfc->arg);
- complete(&wfc->completion);
- return 0;
}
/**
@@ -3625,19 +3627,11 @@ static int do_work_for_cpu(void *_wfc)
*/
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
- struct task_struct *sub_thread;
- struct work_for_cpu wfc = {
- .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
- .fn = fn,
- .arg = arg,
- };
+ struct work_for_cpu wfc = { .fn = fn, .arg = arg };
- sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
- if (IS_ERR(sub_thread))
- return PTR_ERR(sub_thread);
- kthread_bind(sub_thread, cpu);
- wake_up_process(sub_thread);
- wait_for_completion(&wfc.completion);
+ INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
+ schedule_work_on(cpu, &wfc.work);
+ flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ae5a3f2..e0a3e51 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -116,9 +116,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
struct mem_section *ms;
struct page *page, *memmap;
- if (!pfn_valid(start_pfn))
- return;
-
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
@@ -177,9 +174,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
end_pfn = pfn + pgdat->node_spanned_pages;
/* register_section info */
- for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
- register_page_bootmem_info_section(pfn);
-
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ /*
+ * Some platforms can assign the same pfn to multiple nodes - on
+ * node0 as well as nodeN. To avoid registering a pfn against
+ * multiple nodes we check that this pfn does not already
+ * reside in some other node.
+ */
+ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
+ register_page_bootmem_info_section(pfn);
+ }
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9177aa3..eb6b3fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -540,7 +540,7 @@ static inline void __free_one_page(struct page *page,
combined_idx = buddy_idx & page_idx;
higher_page = page + (combined_idx - page_idx);
buddy_idx = __find_buddy_index(combined_idx, order + 1);
- higher_buddy = page + (buddy_idx - combined_idx);
+ higher_buddy = higher_page + (buddy_idx - combined_idx);
if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
diff --git a/net/atm/common.c b/net/atm/common.c
index 22b963d..cc859ad 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -784,6 +784,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
+ memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = vcc->dev->number;
pvc.sap_addr.vpi = vcc->vpi;
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 437ee70..db0dd47 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -94,6 +94,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
return -ENOTCONN;
*sockaddr_len = sizeof(struct sockaddr_atmpvc);
addr = (struct sockaddr_atmpvc *)sockaddr;
+ memset(addr, 0, sizeof(*addr));
addr->sap_family = AF_ATMPVC;
addr->sap_addr.itf = vcc->dev->number;
addr->sap_addr.vpi = vcc->vpi;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 295e4a8..eb5cb6f 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -432,6 +432,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
*addr_len = sizeof(*haddr);
haddr->hci_family = AF_BLUETOOTH;
haddr->hci_dev = hdev->id;
+ haddr->hci_channel= 0;
release_sock(sk);
return 0;
@@ -644,6 +645,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
{
struct hci_filter *f = &hci_pi(sk)->filter;
+ memset(&uf, 0, sizeof(uf));
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 8248303..9810d45 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -329,6 +329,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
BT_DBG("sock %p, sk %p", sock, sk);
+ memset(la, 0, sizeof(struct sockaddr_l2));
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_l2);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1b10727..9dfe702 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -544,6 +544,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
BT_DBG("sock %p, sk %p", sock, sk);
+ memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c258796..bc1eb56 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -471,7 +471,7 @@ static int rfcomm_get_dev_list(void __user *arg)
size = sizeof(*dl) + dev_num * sizeof(*di);
- dl = kmalloc(size, GFP_KERNEL);
+ dl = kzalloc(size, GFP_KERNEL);
if (!dl)
return -ENOMEM;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8235b81..d8bc889 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1045,6 +1045,8 @@ rollback:
*/
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
+ char *new_ifalias;
+
ASSERT_RTNL();
if (len >= IFALIASZ)
@@ -1058,9 +1060,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
return 0;
}
- dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
- if (!dev->ifalias)
+ new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
+ if (!new_ifalias)
return -ENOMEM;
+ dev->ifalias = new_ifalias;
strlcpy(dev->ifalias, alias, len+1);
return len;
@@ -2050,6 +2053,9 @@ u32 netif_skb_features(struct sk_buff *skb)
__be16 protocol = skb->protocol;
u32 features = skb->dev->features;
+ if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+ features &= ~NETIF_F_GSO_MASK;
+
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
@@ -5870,6 +5876,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
dev->gso_max_size = GSO_MAX_SIZE;
+ dev->gso_max_segs = GSO_MAX_SEGS;
INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
dev->ethtool_ntuple_list.count = 0;
@@ -6253,7 +6260,8 @@ static struct hlist_head *netdev_create_hash(void)
/* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net)
{
- INIT_LIST_HEAD(&net->dev_base_head);
+ if (net != &init_net)
+ INIT_LIST_HEAD(&net->dev_base_head);
net->dev_name_head = netdev_create_hash();
if (net->dev_name_head == NULL)
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38..b856f87 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -33,22 +33,19 @@
#define TRACE_ON 1
#define TRACE_OFF 0
-static void send_dm_alert(struct work_struct *unused);
-
-
/*
* Globals, our netlink socket pointer
* and the work handle that will send up
* netlink alerts
*/
static int trace_state = TRACE_OFF;
-static DEFINE_SPINLOCK(trace_state_lock);
+static DEFINE_MUTEX(trace_state_mutex);
struct per_cpu_dm_data {
- struct work_struct dm_alert_work;
- struct sk_buff *skb;
- atomic_t dm_hit_count;
- struct timer_list send_timer;
+ spinlock_t lock;
+ struct sk_buff *skb;
+ struct work_struct dm_alert_work;
+ struct timer_list send_timer;
};
struct dm_hw_stat_delta {
@@ -74,56 +71,59 @@ static int dm_delay = 1;
static unsigned long dm_hw_check_delta = 2*HZ;
static LIST_HEAD(hw_stats_list);
-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
+static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
{
size_t al;
struct net_dm_alert_msg *msg;
struct nlattr *nla;
+ struct sk_buff *skb;
+ unsigned long flags;
al = sizeof(struct net_dm_alert_msg);
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
al += sizeof(struct nlattr);
- data->skb = genlmsg_new(al, GFP_KERNEL);
- genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
- 0, NET_DM_CMD_ALERT);
- nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
- msg = nla_data(nla);
- memset(msg, 0, al);
- atomic_set(&data->dm_hit_count, dm_hit_limit);
+ skb = genlmsg_new(al, GFP_KERNEL);
+
+ if (skb) {
+ genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+ 0, NET_DM_CMD_ALERT);
+ nla = nla_reserve(skb, NLA_UNSPEC,
+ sizeof(struct net_dm_alert_msg));
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+ } else {
+ mod_timer(&data->send_timer, jiffies + HZ / 10);
+ }
+
+ spin_lock_irqsave(&data->lock, flags);
+ swap(data->skb, skb);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return skb;
}
-static void send_dm_alert(struct work_struct *unused)
+static void send_dm_alert(struct work_struct *work)
{
struct sk_buff *skb;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data;
- /*
- * Grab the skb we're about to send
- */
- skb = data->skb;
+ data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
- /*
- * Replace it with a new one
- */
- reset_per_cpu_data(data);
-
- /*
- * Ship it!
- */
- genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ skb = reset_per_cpu_data(data);
+ if (skb)
+ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
}
/*
* This is the timer function to delay the sending of an alert
* in the event that more drops will arrive during the
- * hysteresis period. Note that it operates under the timer interrupt
- * so we don't need to disable preemption here
+ * hysteresis period.
*/
-static void sched_send_work(unsigned long unused)
+static void sched_send_work(unsigned long _data)
{
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
schedule_work(&data->dm_alert_work);
}
@@ -134,17 +134,19 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
struct nlmsghdr *nlh;
struct nlattr *nla;
int i;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct sk_buff *dskb;
+ struct per_cpu_dm_data *data;
+ unsigned long flags;
+ local_irq_save(flags);
+ data = &__get_cpu_var(dm_cpu_data);
+ spin_lock(&data->lock);
+ dskb = data->skb;
- if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
- /*
- * we're already at zero, discard this hit
- */
+ if (!dskb)
goto out;
- }
- nlh = (struct nlmsghdr *)data->skb->data;
+ nlh = (struct nlmsghdr *)dskb->data;
nla = genlmsg_data(nlmsg_data(nlh));
msg = nla_data(nla);
for (i = 0; i < msg->entries; i++) {
@@ -153,11 +155,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
goto out;
}
}
-
+ if (msg->entries == dm_hit_limit)
+ goto out;
/*
* We need to create a new entry
*/
- __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
+ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
msg->points[msg->entries].count = 1;
@@ -165,11 +168,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
if (!timer_pending(&data->send_timer)) {
data->send_timer.expires = jiffies + dm_delay * HZ;
- add_timer_on(&data->send_timer, smp_processor_id());
+ add_timer(&data->send_timer);
}
out:
- return;
+ spin_unlock_irqrestore(&data->lock, flags);
}
static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -213,7 +216,7 @@ static int set_all_monitor_traces(int state)
struct dm_hw_stat_delta *new_stat = NULL;
struct dm_hw_stat_delta *temp;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
if (state == trace_state) {
rc = -EAGAIN;
@@ -252,7 +255,7 @@ static int set_all_monitor_traces(int state)
rc = -EINPROGRESS;
out_unlock:
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
return rc;
}
@@ -295,12 +298,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
new_stat->dev = dev;
new_stat->last_rx = jiffies;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_add_rcu(&new_stat->list, &hw_stats_list);
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
case NETDEV_UNREGISTER:
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
if (new_stat->dev == dev) {
new_stat->dev = NULL;
@@ -311,7 +314,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
}
}
}
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
}
out:
@@ -367,13 +370,15 @@ static int __init init_net_drop_monitor(void)
for_each_present_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
- reset_per_cpu_data(data);
INIT_WORK(&data->dm_alert_work, send_dm_alert);
init_timer(&data->send_timer);
- data->send_timer.data = cpu;
+ data->send_timer.data = (unsigned long)data;
data->send_timer.function = sched_send_work;
+ spin_lock_init(&data->lock);
+ reset_per_cpu_data(data);
}
+
goto out;
out_unreg:
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2772ed1..1642c30 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -24,7 +24,9 @@ static DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
-struct net init_net;
+struct net init_net = {
+ .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
+};
EXPORT_SYMBOL(init_net);
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
diff --git a/net/core/sock.c b/net/core/sock.c
index b4bb59a..56623ad 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1312,6 +1312,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk->sk_gso_max_size = dst->dev->gso_max_size;
+ sk->sk_gso_max_segs = dst->dev->gso_max_segs;
}
}
}
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3d604e1..4caf63f 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -532,6 +532,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
case DCCP_SOCKOPT_CCID_TX_INFO:
if (len < sizeof(tfrc))
return -EINVAL;
+ memset(&tfrc, 0, sizeof(tfrc));
tfrc.tfrctx_x = hc->tx_x;
tfrc.tfrctx_x_recv = hc->tx_x_recv;
tfrc.tfrctx_x_calc = hc->tx_x_calc;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index f81af8d..ec7d8e7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
static int ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache,
int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
- kfree(mrt);
+ ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net)
{
- kfree(net->ipv4.mrt);
+ ipmr_free_table(net->ipv4.mrt);
}
#endif
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
return mrt;
}
+static void ipmr_free_table(struct mr_table *mrt)
+{
+ del_timer_sync(&mrt->ipmr_expire_timer);
+ mroute_clean_tables(mrt);
+ kfree(mrt);
+}
+
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b6ec23c..e57df66 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -739,7 +739,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
old_size_goal + mss_now > xmit_size_goal)) {
xmit_size_goal = old_size_goal;
} else {
- tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+ tp->xmit_size_goal_segs =
+ min_t(u16, xmit_size_goal / mss_now,
+ sk->sk_gso_max_segs);
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
}
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 850c737..6cebfd2 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -290,7 +290,8 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
- left * tp->mss_cache < sk->sk_gso_max_size)
+ left * tp->mss_cache < sk->sk_gso_max_size &&
+ left < sk->sk_gso_max_segs)
return 1;
return left <= tcp_max_burst(tp);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index faf257b..e0b8bd1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1310,21 +1310,21 @@ static void tcp_cwnd_validate(struct sock *sk)
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
- unsigned int mss_now, unsigned int cwnd)
+ unsigned int mss_now, unsigned int max_segs)
{
struct tcp_sock *tp = tcp_sk(sk);
- u32 needed, window, cwnd_len;
+ u32 needed, window, max_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
- cwnd_len = mss_now * cwnd;
+ max_len = mss_now * max_segs;
- if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
- return cwnd_len;
+ if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
+ return max_len;
needed = min(skb->len, window);
- if (cwnd_len <= needed)
- return cwnd_len;
+ if (max_len <= needed)
+ return max_len;
return needed - needed % mss_now;
}
@@ -1551,7 +1551,8 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
limit = min(send_win, cong_win);
/* If a full-sized TSO skb can be sent, do it. */
- if (limit >= sk->sk_gso_max_size)
+ if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
+ sk->sk_gso_max_segs * tp->mss_cache))
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -1777,7 +1778,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
- cwnd_quota);
+ min_t(unsigned int,
+ cwnd_quota,
+ sk->sk_gso_max_segs));
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index be29337..70d6a7f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -492,8 +492,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
struct net_device *dev;
struct inet6_dev *idev;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -502,7 +501,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
dev_forward_change(idev);
}
}
- rcu_read_unlock();
}
static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 71c292e..6a3d680 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1252,11 +1252,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
/* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list);
+ kfree_rcu(tunnel, rcu);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
- synchronize_rcu();
atomic_dec(&l2tp_tunnel_count);
- kfree(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a16a48e..4393794 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -157,6 +157,7 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index a18e6c3..99a60d5 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -966,14 +966,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
struct sockaddr_llc sllc;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- int rc = 0;
+ int rc = -EBADF;
memset(&sllc, 0, sizeof(sllc));
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
*uaddrlen = sizeof(sllc);
- memset(uaddr, 0, *uaddrlen);
if (peer) {
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index a178cb3..9528ea0 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2675,6 +2675,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_timeout_user t;
+ memset(&t, 0, sizeof(t));
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 24bc620..16a94a3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1345,7 +1345,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
- if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+ if ((dst_group || dst_pid) &&
+ !netlink_capable(sock, NL_NONROOT_SEND))
goto out;
} else {
dst_pid = nlk->dst_pid;
@@ -2098,6 +2099,7 @@ static void __init netlink_add_usersock_entry(void)
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
+ nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
netlink_table_ungrab();
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fafb968..1ab5a02 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -866,7 +866,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
if (likely(po->tx_ring.pg_vec)) {
ph = skb_shinfo(skb)->destructor_arg;
- BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
atomic_dec(&po->tx_ring.pending);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 596689e..51a8f8e 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -409,6 +409,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+ msg->msg_namelen = 0;
+
if (msg_flags & MSG_OOB)
goto out;
@@ -484,6 +486,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
sin->sin_port = inc->i_hdr.h_sport;
sin->sin_addr.s_addr = inc->i_saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ msg->msg_namelen = sizeof(*sin);
}
break;
}
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 2b4ab4b..89ddcb5 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
struct tcf_common *pc;
int ret = 0;
int err;
+#ifdef CONFIG_GACT_PROB
+ struct tc_gact_p *p_parm = NULL;
+#endif
if (nla == NULL)
return -EINVAL;
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
#ifndef CONFIG_GACT_PROB
if (tb[TCA_GACT_PROB] != NULL)
return -EOPNOTSUPP;
+#else
+ if (tb[TCA_GACT_PROB]) {
+ p_parm = nla_data(tb[TCA_GACT_PROB]);
+ if (p_parm->ptype >= MAX_RAND)
+ return -EINVAL;
+ }
#endif
pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
spin_lock_bh(&gact->tcf_lock);
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
- if (tb[TCA_GACT_PROB] != NULL) {
- struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
+ if (p_parm) {
gact->tcfg_paction = p_parm->paction;
gact->tcfg_pval = p_parm->pval;
gact->tcfg_ptype = p_parm->ptype;
@@ -132,7 +140,7 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result
spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
- if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
+ if (gact->tcfg_ptype)
action = gact_rand[gact->tcfg_ptype](gact);
else
action = gact->tcf_action;
diff --git a/net/socket.c b/net/socket.c
index cf41afc..1b0f0fc 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2645,6 +2645,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
return -EFAULT;
+ memset(&ifc, 0, sizeof(ifc));
if (ifc32.ifcbuf == 0) {
ifc32.ifc_len = 0;
ifc.ifc_len = 0;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 90b73d1..d85a149 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -331,6 +331,9 @@ static void reg_regdb_search(struct work_struct *work)
struct reg_regdb_search_request *request;
const struct ieee80211_regdomain *curdom, *regdom;
int i, r;
+ bool set_reg = false;
+
+ mutex_lock(&cfg80211_mutex);
mutex_lock(®_regdb_search_mutex);
while (!list_empty(®_regdb_search_list)) {
@@ -346,9 +349,7 @@ static void reg_regdb_search(struct work_struct *work)
r = reg_copy_regd(®dom, curdom);
if (r)
break;
- mutex_lock(&cfg80211_mutex);
- set_regdom(regdom);
- mutex_unlock(&cfg80211_mutex);
+ set_reg = true;
break;
}
}
@@ -356,6 +357,11 @@ static void reg_regdb_search(struct work_struct *work)
kfree(request);
}
mutex_unlock(®_regdb_search_mutex);
+
+ if (set_reg)
+ set_regdom(regdom);
+
+ mutex_unlock(&cfg80211_mutex);
}
static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 764cc93..075d5aa 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
}
static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
+static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
{
@@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
.info = ak4396_dac_vol_info,
.get = ak4396_dac_vol_get,
.put = ak4396_dac_vol_put,
- .tlv = { .p = db_scale_wm_dac },
+ .tlv = { .p = ak4396_db_scale },
},
};
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists