[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20121028180312.GA30602@kroah.com>
Date: Sun, 28 Oct 2012 11:03:12 -0700
From: Greg KH <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, Jiri Slaby <jslaby@...e.cz>
Subject: Re: Linux 3.0.49
diff --git a/Makefile b/Makefile
index 8dc65e0..1c962a1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 0
-SUBLEVEL = 48
+SUBLEVEL = 49
EXTRAVERSION =
NAME = Sneaky Weasel
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6860d40..904ed63 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -513,11 +513,13 @@ static u64 nop_for_index(int idx)
static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
- u64 val, mask = mask_for_index(idx);
+ u64 enc, val, mask = mask_for_index(idx);
+
+ enc = perf_event_get_enc(cpuc->events[idx]);
val = cpuc->pcr;
val &= ~mask;
- val |= hwc->config;
+ val |= event_encoding(enc, idx);
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
@@ -1380,8 +1382,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
-
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
struct sparc_stackf *usf, sf;
@@ -1402,8 +1402,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
-
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
struct sparc_stackf32 *usf, sf;
@@ -1422,6 +1420,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
+ perf_callchain_store(entry, regs->tpc);
+
+ if (!current->mm)
+ return;
+
flushw_user();
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 908b47a..10c9b36 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
int ret;
- if (current->personality == PER_LINUX32 &&
- personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 1d7e274..7f5f65d 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -212,24 +212,20 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
- /* Check if force_successful_syscall_return()
- * was invoked.
- */
- ldub [%g6 + TI_SYS_NOERROR], %l2
- brnz,a,pn %l2, 80f
- stb %g0, [%g6 + TI_SYS_NOERROR]
-
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
-80:
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+
+2:
+ stb %g0, [%g6 + TI_SYS_NOERROR]
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
+3:
stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
bne,pn %icc, linux_syscall_trace2
add %l1, 0x4, %l2 ! npc = npc+4
@@ -238,20 +234,20 @@ ret_sys_call:
stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1:
+ /* Check if force_successful_syscall_return()
+ * was invoked.
+ */
+ ldub [%g6 + TI_SYS_NOERROR], %l2
+ brnz,pn %l2, 2b
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
sub %g0, %o0, %o0
- or %g3, %g2, %g3
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
- stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
- bne,pn %icc, linux_syscall_trace2
- add %l1, 0x4, %l2 ! npc = npc+4
- stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+ ba,pt %xcc, 3b
+ or %g3, %g2, %g3
- b,pt %xcc, rtrap
- stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
linux_syscall_trace2:
call syscall_trace_leave
add %sp, PTREGS_OFF, %o0
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 8e073d8..6ff4d78 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2118,6 +2118,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
unsigned long vmemmap_table[VMEMMAP_SIZE];
+static long __meminitdata addr_start, addr_end;
+static int __meminitdata node_start;
+
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
{
unsigned long vstart = (unsigned long) start;
@@ -2148,15 +2151,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
*vmem_pp = pte_base | __pa(block);
- printk(KERN_INFO "[%p-%p] page_structs=%lu "
- "node=%d entry=%lu/%lu\n", start, block, nr,
- node,
- addr >> VMEMMAP_CHUNK_SHIFT,
- VMEMMAP_SIZE);
+ /* check to see if we have contiguous blocks */
+ if (addr_end != addr || node_start != node) {
+ if (addr_start)
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = addr;
+ node_start = node;
+ }
+ addr_end = addr + VMEMMAP_CHUNK;
}
}
return 0;
}
+
+void __meminit vmemmap_populate_print_last(void)
+{
+ if (addr_start) {
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = 0;
+ addr_end = 0;
+ node_start = 0;
+ }
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void prot_init_common(unsigned long page_none,
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 17acce7..04c637c 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
endif
endif
+# The tile compiler may emit .eh_frame information for backtracing.
+# In kernel modules, this causes load failures due to unsupported relocations.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
endif
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index edb3d46..268b40d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1029,7 +1029,7 @@ ENTRY(xen_sysenter_target)
ENTRY(xen_hypervisor_callback)
CFI_STARTPROC
- pushl_cfi $0
+ pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
TRACE_IRQS_OFF
@@ -1071,14 +1071,16 @@ ENTRY(xen_failsafe_callback)
2: mov 8(%esp),%es
3: mov 12(%esp),%fs
4: mov 16(%esp),%gs
+ /* EAX == 0 => Category 1 (Bad segment)
+ EAX != 0 => Category 2 (Bad IRET) */
testl %eax,%eax
popl_cfi %eax
lea 16(%esp),%esp
CFI_ADJUST_CFA_OFFSET -16
jz 5f
addl $16,%esp
- jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
-5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
+ jmp iret_exc
+5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
jmp ret_from_exception
CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 8a445a0..dd4dba4 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1308,7 +1308,7 @@ ENTRY(xen_failsafe_callback)
CFI_RESTORE r11
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
- pushq_cfi $0
+ pushq_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
jmp error_exit
CFI_ENDPROC
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index afaf384..af19a61 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -937,8 +937,21 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
- max_pfn_mapped = init_memory_mapping(1UL<<32,
- max_pfn<<PAGE_SHIFT);
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->addr + ei->size <= 1UL << 32)
+ continue;
+
+ if (ei->type == E820_RESERVED)
+ continue;
+
+ max_pfn_mapped = init_memory_mapping(
+ ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
+ ei->addr + ei->size);
+ }
+
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 68894fd..a00c588 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
val |= counter_config->extra;
event &= model->event_mask ? model->event_mask : 0xFF;
val |= event & 0xFF;
- val |= (event & 0x0F00) << 24;
+ val |= (u64)(event & 0x0F00) << 24;
return val;
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9a8bebc..feb2d10 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -161,8 +161,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
* memory controller and apply to register. Search for the first
* bandwidth entry that is greater or equal than the setting requested
* and program that. If at last entry, turn off DRAM scrubbing.
+ *
+ * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
+ * by falling back to the last element in scrubrates[].
*/
- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+ for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
/*
* skip scrub rates which aren't recommended
* (see F10 BKDG, F3x58)
@@ -172,12 +175,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
if (scrubrates[i].bandwidth <= new_bw)
break;
-
- /*
- * if no suitable bandwidth found, turn off DRAM scrubbing
- * entirely by falling back to the last element in the
- * scrubrates array.
- */
}
scrubval = scrubrates[i].scrubval;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 387b2b3..557e007 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3113,6 +3113,11 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+
#define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4b8e235..36d76989 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7584,6 +7584,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -7593,6 +7594,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
static void ironlake_teardown_rc6(struct drm_device *dev)
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index c03eb29..9945aaf 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -1697,14 +1697,18 @@ static int vidioc_streamoff(struct file *file, void *priv,
(AUVI_INPUT(i).audio_setup)(dev, 0);
}
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh, AU0828_RESOURCE_VIDEO);
+ if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
+ videobuf_streamoff(&fh->vb_vidq);
+ res_free(fh, AU0828_RESOURCE_VIDEO);
+ }
} else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
dev->vbi_timeout_running = 0;
del_timer_sync(&dev->vbi_timeout);
- videobuf_streamoff(&fh->vb_vbiq);
- res_free(fh, AU0828_RESOURCE_VBI);
+ if (res_check(fh, AU0828_RESOURCE_VBI)) {
+ videobuf_streamoff(&fh->vb_vbiq);
+ res_free(fh, AU0828_RESOURCE_VBI);
+ }
}
return 0;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index f4be5c7..b446e7e 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -4097,6 +4097,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
DMI_MATCH(DMI_BOARD_NAME, "nForce"),
},
},
+ {
+ .ident = "ASUS P5NSLI",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
+ },
+ },
{}
};
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index 81af2b3..097fa00 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -222,7 +222,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
sharpsl_pcmcia_init_reset(skt);
}
-static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
+static struct pcmcia_low_level sharpsl_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = sharpsl_pcmcia_hw_init,
.hw_shutdown = sharpsl_pcmcia_hw_shutdown,
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3b2ce7d..b107339 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -760,10 +760,6 @@ static const __u32 acm_tty_speed[] = {
2500000, 3000000, 3500000, 4000000
};
-static const __u8 acm_tty_size[] = {
- 5, 6, 7, 8
-};
-
static void acm_tty_set_termios(struct tty_struct *tty,
struct ktermios *termios_old)
{
@@ -780,7 +776,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
newline.bParityType = termios->c_cflag & PARENB ?
(termios->c_cflag & PARODD ? 1 : 2) +
(termios->c_cflag & CMSPAR ? 2 : 0) : 0;
- newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ newline.bDataBits = 5;
+ break;
+ case CS6:
+ newline.bDataBits = 6;
+ break;
+ case CS7:
+ newline.bDataBits = 7;
+ break;
+ case CS8:
+ default:
+ newline.bDataBits = 8;
+ break;
+ }
/* FIXME: Needs to clear unsupported bits in the termios */
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
@@ -1172,7 +1182,7 @@ made_compressed_probe:
if (usb_endpoint_xfer_int(epwrite))
usb_fill_int_urb(snd->urb, usb_dev,
- usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
else
usb_fill_bulk_urb(snd->urb, usb_dev,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b455f4c..a44f2d4 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1505,6 +1505,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct xhci_cd *cur_cd, *next_cd;
int size;
int i;
@@ -1525,6 +1526,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
xhci_dbg(xhci, "Freed command ring\n");
+ list_for_each_entry_safe(cur_cd, next_cd,
+ &xhci->cancel_cmd_list, cancel_cmd_list) {
+ list_del(&cur_cd->cancel_cmd_list);
+ kfree(cur_cd);
+ }
for (i = 1; i < MAX_HC_SLOTS; ++i)
xhci_free_virt_device(xhci, i);
@@ -2014,6 +2020,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
if (!xhci->cmd_ring)
goto fail;
+ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 152daca..fd56407 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -311,12 +311,123 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
+ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
+ return;
+
xhci_dbg(xhci, "// Ding dong!\n");
xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
xhci_readl(xhci, &xhci->dba->doorbell[0]);
}
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+{
+ u64 temp_64;
+ int ret;
+
+ xhci_dbg(xhci, "Abort command ring\n");
+
+ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
+ xhci_dbg(xhci, "The command ring isn't running, "
+ "Have the command ring been stopped?\n");
+ return 0;
+ }
+
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ if (!(temp_64 & CMD_RING_RUNNING)) {
+ xhci_dbg(xhci, "Command ring had been stopped\n");
+ return 0;
+ }
+ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
+
+ /* Section 4.6.1.2 of xHCI 1.0 spec says software should
+ * time the completion od all xHCI commands, including
+ * the Command Abort operation. If software doesn't see
+ * CRR negated in a timely manner (e.g. longer than 5
+ * seconds), then it should assume that the there are
+ * larger problems with the xHC and assert HCRST.
+ */
+ ret = handshake(xhci, &xhci->op_regs->cmd_ring,
+ CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
+ if (ret < 0) {
+ xhci_err(xhci, "Stopped the command ring failed, "
+ "maybe the host is dead\n");
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ xhci_quiesce(xhci);
+ xhci_halt(xhci);
+ return -ESHUTDOWN;
+ }
+
+ return 0;
+}
+
+static int xhci_queue_cd(struct xhci_hcd *xhci,
+ struct xhci_command *command,
+ union xhci_trb *cmd_trb)
+{
+ struct xhci_cd *cd;
+ cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
+ if (!cd)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&cd->cancel_cmd_list);
+
+ cd->command = command;
+ cd->cmd_trb = cmd_trb;
+ list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
+
+ return 0;
+}
+
+/*
+ * Cancel the command which has issue.
+ *
+ * Some commands may hang due to waiting for acknowledgement from
+ * usb device. It is outside of the xHC's ability to control and
+ * will cause the command ring is blocked. When it occurs software
+ * should intervene to recover the command ring.
+ * See Section 4.6.1.1 and 4.6.1.2
+ */
+int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
+ union xhci_trb *cmd_trb)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_warn(xhci, "Abort the command ring,"
+ " but the xHCI is dead.\n");
+ retval = -ESHUTDOWN;
+ goto fail;
+ }
+
+ /* queue the cmd desriptor to cancel_cmd_list */
+ retval = xhci_queue_cd(xhci, command, cmd_trb);
+ if (retval) {
+ xhci_warn(xhci, "Queuing command descriptor failed.\n");
+ goto fail;
+ }
+
+ /* abort command ring */
+ retval = xhci_abort_cmd_ring(xhci);
+ if (retval) {
+ xhci_err(xhci, "Abort command ring failed\n");
+ if (unlikely(retval == -ESHUTDOWN)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
+ return retval;
+ }
+ }
+
+fail:
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return retval;
+}
+
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index,
@@ -1046,6 +1157,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
}
}
+/* Complete the command and detele it from the devcie's command queue.
+ */
+static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
+ struct xhci_command *command, u32 status)
+{
+ command->status = status;
+ list_del(&command->cmd_list);
+ if (command->completion)
+ complete(command->completion);
+ else
+ xhci_free_command(xhci, command);
+}
+
+
/* Check to see if a command in the device's command queue matches this one.
* Signal the completion or free the command, and return 1. Return 0 if the
* completed command isn't at the head of the command list.
@@ -1064,15 +1189,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
if (xhci->cmd_ring->dequeue != command->command_trb)
return 0;
- command->status = GET_COMP_CODE(le32_to_cpu(event->status));
- list_del(&command->cmd_list);
- if (command->completion)
- complete(command->completion);
- else
- xhci_free_command(xhci, command);
+ xhci_complete_cmd_in_cmd_wait_list(xhci, command,
+ GET_COMP_CODE(le32_to_cpu(event->status)));
return 1;
}
+/*
+ * Finding the command trb need to be cancelled and modifying it to
+ * NO OP command. And if the command is in device's command wait
+ * list, finishing and freeing it.
+ *
+ * If we can't find the command trb, we think it had already been
+ * executed.
+ */
+static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
+{
+ struct xhci_segment *cur_seg;
+ union xhci_trb *cmd_trb;
+ u32 cycle_state;
+
+ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+ return;
+
+ /* find the current segment of command ring */
+ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
+ xhci->cmd_ring->dequeue, &cycle_state);
+
+ /* find the command trb matched by cd from command ring */
+ for (cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb != xhci->cmd_ring->enqueue;
+ next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
+ /* If the trb is link trb, continue */
+ if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
+ continue;
+
+ if (cur_cd->cmd_trb == cmd_trb) {
+
+ /* If the command in device's command list, we should
+ * finish it and free the command structure.
+ */
+ if (cur_cd->command)
+ xhci_complete_cmd_in_cmd_wait_list(xhci,
+ cur_cd->command, COMP_CMD_STOP);
+
+ /* get cycle state from the origin command trb */
+ cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
+ & TRB_CYCLE;
+
+ /* modify the command trb to NO OP command */
+ cmd_trb->generic.field[0] = 0;
+ cmd_trb->generic.field[1] = 0;
+ cmd_trb->generic.field[2] = 0;
+ cmd_trb->generic.field[3] = cpu_to_le32(
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
+ break;
+ }
+ }
+}
+
+static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
+{
+ struct xhci_cd *cur_cd, *next_cd;
+
+ if (list_empty(&xhci->cancel_cmd_list))
+ return;
+
+ list_for_each_entry_safe(cur_cd, next_cd,
+ &xhci->cancel_cmd_list, cancel_cmd_list) {
+ xhci_cmd_to_noop(xhci, cur_cd);
+ list_del(&cur_cd->cancel_cmd_list);
+ kfree(cur_cd);
+ }
+}
+
+/*
+ * traversing the cancel_cmd_list. If the command descriptor according
+ * to cmd_trb is found, the function free it and return 1, otherwise
+ * return 0.
+ */
+static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
+ union xhci_trb *cmd_trb)
+{
+ struct xhci_cd *cur_cd, *next_cd;
+
+ if (list_empty(&xhci->cancel_cmd_list))
+ return 0;
+
+ list_for_each_entry_safe(cur_cd, next_cd,
+ &xhci->cancel_cmd_list, cancel_cmd_list) {
+ if (cur_cd->cmd_trb == cmd_trb) {
+ if (cur_cd->command)
+ xhci_complete_cmd_in_cmd_wait_list(xhci,
+ cur_cd->command, COMP_CMD_STOP);
+ list_del(&cur_cd->cancel_cmd_list);
+ kfree(cur_cd);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
+ * trb pointed by the command ring dequeue pointer is the trb we want to
+ * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
+ * traverse the cancel_cmd_list to trun the all of the commands according
+ * to command descriptor to NO-OP trb.
+ */
+static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ int cmd_trb_comp_code)
+{
+ int cur_trb_is_good = 0;
+
+ /* Searching the cmd trb pointed by the command ring dequeue
+ * pointer in command descriptor list. If it is found, free it.
+ */
+ cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
+ xhci->cmd_ring->dequeue);
+
+ if (cmd_trb_comp_code == COMP_CMD_ABORT)
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ else if (cmd_trb_comp_code == COMP_CMD_STOP) {
+ /* traversing the cancel_cmd_list and canceling
+ * the command according to command descriptor
+ */
+ xhci_cancel_cmd_in_cd_list(xhci);
+
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+ /*
+ * ring command ring doorbell again to restart the
+ * command ring
+ */
+ if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
+ xhci_ring_cmd_db(xhci);
+ }
+ return cur_trb_is_good;
+}
+
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
@@ -1098,6 +1352,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci->error_bitmask |= 1 << 5;
return;
}
+
+ if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
+ (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
+ /* If the return value is 0, we think the trb pointed by
+ * command ring dequeue pointer is a good trb. The good
+ * trb means we don't want to cancel the trb, but it have
+ * been stopped by host. So we should handle it normally.
+ * Otherwise, driver should invoke inc_deq() and return.
+ */
+ if (handle_stopped_cmd_ring(xhci,
+ GET_COMP_CODE(le32_to_cpu(event->status)))) {
+ inc_deq(xhci, xhci->cmd_ring, false);
+ return;
+ }
+ }
+
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
& TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_ENABLE_SLOT):
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c39ab20..4864b25 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -51,7 +51,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*/
-static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
@@ -104,8 +104,10 @@ int xhci_halt(struct xhci_hcd *xhci)
ret = handshake(xhci, &xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
- if (!ret)
+ if (!ret) {
xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ }
return ret;
}
@@ -390,6 +392,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
return -ENODEV;
}
xhci->shared_hcd->state = HC_STATE_RUNNING;
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
@@ -1775,6 +1778,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct completion *cmd_completion;
u32 *cmd_status;
struct xhci_virt_device *virt_dev;
+ union xhci_trb *cmd_trb;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
@@ -1817,6 +1821,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
init_completion(cmd_completion);
+ cmd_trb = xhci->cmd_ring->dequeue;
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
udev->slot_id, must_succeed);
@@ -1838,14 +1843,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
/* Wait for the configure endpoint command to complete */
timeleft = wait_for_completion_interruptible_timeout(
cmd_completion,
- USB_CTRL_SET_TIMEOUT);
+ XHCI_CMD_DEFAULT_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for %s command\n",
timeleft == 0 ? "Timeout" : "Signal",
ctx_change == 0 ?
"configure endpoint" :
"evaluate context");
- /* FIXME cancel the configure endpoint command */
+ /* cancel the configure endpoint command */
+ ret = xhci_cancel_cmd(xhci, command, cmd_trb);
+ if (ret < 0)
+ return ret;
return -ETIME;
}
@@ -2778,8 +2786,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
unsigned long flags;
int timeleft;
int ret;
+ union xhci_trb *cmd_trb;
spin_lock_irqsave(&xhci->lock, flags);
+ cmd_trb = xhci->cmd_ring->dequeue;
ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -2791,12 +2801,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
/* XXX: how much time for xHC slot assignment? */
timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
- USB_CTRL_SET_TIMEOUT);
+ XHCI_CMD_DEFAULT_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for a slot\n",
timeleft == 0 ? "Timeout" : "Signal");
- /* FIXME cancel the enable slot request */
- return 0;
+ /* cancel the enable slot request */
+ return xhci_cancel_cmd(xhci, NULL, cmd_trb);
}
if (!xhci->slot_id) {
@@ -2857,6 +2867,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u64 temp_64;
+ union xhci_trb *cmd_trb;
if (!udev->slot_id) {
xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
@@ -2895,6 +2906,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
spin_lock_irqsave(&xhci->lock, flags);
+ cmd_trb = xhci->cmd_ring->dequeue;
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
if (ret) {
@@ -2907,7 +2919,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
- USB_CTRL_SET_TIMEOUT);
+ XHCI_CMD_DEFAULT_TIMEOUT);
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
* the SetAddress() "recovery interval" required by USB and aborting the
* command on a timeout.
@@ -2915,7 +2927,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for a slot\n",
timeleft == 0 ? "Timeout" : "Signal");
- /* FIXME cancel the address device command */
+ /* cancel the address device command */
+ ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
+ if (ret < 0)
+ return ret;
return -ETIME;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 21482df..1d72895 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1070,6 +1070,9 @@ union xhci_trb {
#define TRB_MFINDEX_WRAP 39
/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_LINK)))
+
/* Nec vendor-specific command completion event. */
#define TRB_NEC_CMD_COMP 48
/* Get NEC firmware revision. */
@@ -1111,6 +1114,16 @@ struct xhci_td {
union xhci_trb *last_trb;
};
+/* xHCI command default timeout value */
+#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
+
+/* command descriptor */
+struct xhci_cd {
+ struct list_head cancel_cmd_list;
+ struct xhci_command *command;
+ union xhci_trb *cmd_trb;
+};
+
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
@@ -1252,6 +1265,11 @@ struct xhci_hcd {
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
+ unsigned int cmd_ring_state;
+#define CMD_RING_STATE_RUNNING (1 << 0)
+#define CMD_RING_STATE_ABORTED (1 << 1)
+#define CMD_RING_STATE_STOPPED (1 << 2)
+ struct list_head cancel_cmd_list;
unsigned int cmd_ring_reserved_trbs;
struct xhci_ring *event_ring;
struct xhci_erst erst;
@@ -1486,6 +1504,8 @@ void xhci_unregister_pci(void);
#endif
/* xHCI host controller glue */
+int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
@@ -1568,6 +1588,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
+int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
+ union xhci_trb *cmd_trb);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 4e0c118..c334670 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
.reserved = BIT(5),
};
+static const struct option_blacklist_info net_intf6_blacklist = {
+ .reserved = BIT(6),
+};
+
static const struct option_blacklist_info zte_mf626_blacklist = {
.sendsetup = BIT(0) | BIT(1),
.reserved = BIT(4),
};
+static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
@@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
@@ -880,9 +895,22 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -998,18 +1026,24 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1054,8 +1088,16 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1067,15 +1109,21 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index 36057ce..6e2a2d5 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
{
__be32 *p;
- BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+ WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
p = xdr_reserve_space(xdr, 4);
*p = stat;
}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index df753a1..23d7451 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -40,7 +40,6 @@ struct nsm_args {
u32 proc;
char *mon_name;
- char *nodename;
};
struct nsm_res {
@@ -94,7 +93,6 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
.vers = 3,
.proc = NLMPROC_NSM_NOTIFY,
.mon_name = nsm->sm_mon_name,
- .nodename = utsname()->nodename,
};
struct rpc_message msg = {
.rpc_argp = &args,
@@ -431,7 +429,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
{
__be32 *p;
- encode_nsm_string(xdr, argp->nodename);
+ encode_nsm_string(xdr, utsname()->nodename);
p = xdr_reserve_space(xdr, 4 + 4 + 4);
*p++ = cpu_to_be32(argp->prog);
*p++ = cpu_to_be32(argp->vers);
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index d27aab1..d413af3 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
/* Obtain file pointer. Not used by FREE_ALL call. */
if (filp != NULL) {
- if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+ error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
+ if (error != 0)
goto no_locks;
*filp = file;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2efce77..69158d5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1800,9 +1800,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
* trading it for newcg is protected by cgroup_mutex, we're safe to drop
* it here; it will be freed under RCU.
*/
- put_css_set(oldcg);
-
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
+ put_css_set(oldcg);
return 0;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index dd29555..84e353b1 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1133,15 +1133,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1151,8 +1152,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = clamp_t(size_t, len, 1, sizeof(buf));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 96bb0a3..eb8857a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1313,8 +1313,6 @@ int neigh_resolve_output(struct sk_buff *skb)
if (!dst)
goto discard;
- __skb_pull(skb, skb_network_offset(skb));
-
if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
@@ -1326,6 +1324,7 @@ int neigh_resolve_output(struct sk_buff *skb)
neigh_hh_init(neigh, dst, dst->ops->protocol);
do {
+ __skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
@@ -1358,9 +1357,8 @@ int neigh_connected_output(struct sk_buff *skb)
struct net_device *dev = neigh->dev;
unsigned int seq;
- __skb_pull(skb, skb_network_offset(skb));
-
do {
+ __skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 53a5af6..d645c6f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -651,10 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
/* When socket is gone, all binding information is lost.
- * routing might fail in this case. using iif for oif to
- * make sure we can deliver it
+ * routing might fail in this case. No choice here, if we choose to force
+ * input interface, we will misroute in case of asymmetric route.
*/
- arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
+ if (sk)
+ arg.bound_dev_if = sk->sk_bound_dev_if;
net = dev_net(skb_dst(skb)->dev);
ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 848f963..a6d5850 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1060,7 +1060,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
fl6.flowi6_proto = IPPROTO_TCP;
- fl6.flowi6_oif = inet6_iif(skb);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = inet6_iif(skb);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
diff --git a/net/rds/send.c b/net/rds/send.c
index c803341..f6bdfb0 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1121,7 +1121,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
rds_stats_inc(s_send_pong);
if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- rds_send_xmit(conn);
+ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
rds_message_put(rm);
return 0;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 4530a91..237a2ee 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1404,11 +1404,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
size_t count, loff_t *ppos,
struct cache_detail *cd)
{
- char tbuf[20];
+ char tbuf[22];
unsigned long p = *ppos;
size_t len;
- sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
+ snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
len = strlen(tbuf);
if (p >= len)
return 0;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists