[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20121028180256.GB24247@kroah.com>
Date: Sun, 28 Oct 2012 11:02:56 -0700
From: Greg KH <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, Jiri Slaby <jslaby@...e.cz>
Subject: Re: Linux 3.4.16
diff --git a/Makefile b/Makefile
index fe9ea67..470d96b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 15
+SUBLEVEL = 16
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index d80f79d..8e1fb82 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -5,7 +5,7 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
#else
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390)
+OUTPUT_ARCH(s390:31-bit)
#endif
ENTRY(startup)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 21109c6..1343d7c 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -8,7 +8,7 @@
#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390)
+OUTPUT_ARCH(s390:31-bit)
ENTRY(startup)
jiffies = jiffies_64 + 4;
#else
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 28559ce..602eca8 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -557,11 +557,13 @@ static u64 nop_for_index(int idx)
static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
- u64 val, mask = mask_for_index(idx);
+ u64 enc, val, mask = mask_for_index(idx);
+
+ enc = perf_event_get_enc(cpuc->events[idx]);
val = cpuc->pcr;
val &= ~mask;
- val |= hwc->config;
+ val |= event_encoding(enc, idx);
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
@@ -1428,8 +1430,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
-
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
struct sparc_stackf *usf, sf;
@@ -1450,8 +1450,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
-
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
struct sparc_stackf32 *usf, sf;
@@ -1470,6 +1468,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
+ perf_callchain_store(entry, regs->tpc);
+
+ if (!current->mm)
+ return;
+
flushw_user();
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 3ee51f1..57b7cab 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
int ret;
- if (current->personality == PER_LINUX32 &&
- personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 1d7e274..7f5f65d 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -212,24 +212,20 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
- /* Check if force_successful_syscall_return()
- * was invoked.
- */
- ldub [%g6 + TI_SYS_NOERROR], %l2
- brnz,a,pn %l2, 80f
- stb %g0, [%g6 + TI_SYS_NOERROR]
-
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
-80:
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+
+2:
+ stb %g0, [%g6 + TI_SYS_NOERROR]
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
+3:
stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
bne,pn %icc, linux_syscall_trace2
add %l1, 0x4, %l2 ! npc = npc+4
@@ -238,20 +234,20 @@ ret_sys_call:
stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1:
+ /* Check if force_successful_syscall_return()
+ * was invoked.
+ */
+ ldub [%g6 + TI_SYS_NOERROR], %l2
+ brnz,pn %l2, 2b
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
sub %g0, %o0, %o0
- or %g3, %g2, %g3
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
- stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
- bne,pn %icc, linux_syscall_trace2
- add %l1, 0x4, %l2 ! npc = npc+4
- stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+ ba,pt %xcc, 3b
+ or %g3, %g2, %g3
- b,pt %xcc, rtrap
- stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
linux_syscall_trace2:
call syscall_trace_leave
add %sp, PTREGS_OFF, %o0
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 21faaee..0791618 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2099,6 +2099,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
unsigned long vmemmap_table[VMEMMAP_SIZE];
+static long __meminitdata addr_start, addr_end;
+static int __meminitdata node_start;
+
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
{
unsigned long vstart = (unsigned long) start;
@@ -2129,15 +2132,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
*vmem_pp = pte_base | __pa(block);
- printk(KERN_INFO "[%p-%p] page_structs=%lu "
- "node=%d entry=%lu/%lu\n", start, block, nr,
- node,
- addr >> VMEMMAP_CHUNK_SHIFT,
- VMEMMAP_SIZE);
+ /* check to see if we have contiguous blocks */
+ if (addr_end != addr || node_start != node) {
+ if (addr_start)
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = addr;
+ node_start = node;
+ }
+ addr_end = addr + VMEMMAP_CHUNK;
}
}
return 0;
}
+
+void __meminit vmemmap_populate_print_last(void)
+{
+ if (addr_start) {
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = 0;
+ addr_end = 0;
+ node_start = 0;
+ }
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void prot_init_common(unsigned long page_none,
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 9520bc5..99f461d 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
endif
endif
+# The tile compiler may emit .eh_frame information for backtracing.
+# In kernel modules, this causes load failures due to unsupported relocations.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
endif
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 7b784f4..6d4f7ba 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1025,7 +1025,7 @@ ENTRY(xen_sysenter_target)
ENTRY(xen_hypervisor_callback)
CFI_STARTPROC
- pushl_cfi $0
+ pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
TRACE_IRQS_OFF
@@ -1067,14 +1067,16 @@ ENTRY(xen_failsafe_callback)
2: mov 8(%esp),%es
3: mov 12(%esp),%fs
4: mov 16(%esp),%gs
+ /* EAX == 0 => Category 1 (Bad segment)
+ EAX != 0 => Category 2 (Bad IRET) */
testl %eax,%eax
popl_cfi %eax
lea 16(%esp),%esp
CFI_ADJUST_CFA_OFFSET -16
jz 5f
addl $16,%esp
- jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
-5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
+ jmp iret_exc
+5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
jmp ret_from_exception
CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index cdc79b5..bd6f592 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1351,7 +1351,7 @@ ENTRY(xen_failsafe_callback)
CFI_RESTORE r11
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
- pushq_cfi $0
+ pushq_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
jmp error_exit
CFI_ENDPROC
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 1a29015..fbbd1eb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -927,8 +927,21 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
- max_pfn_mapped = init_memory_mapping(1UL<<32,
- max_pfn<<PAGE_SHIFT);
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->addr + ei->size <= 1UL << 32)
+ continue;
+
+ if (ei->type == E820_RESERVED)
+ continue;
+
+ max_pfn_mapped = init_memory_mapping(
+ ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
+ ei->addr + ei->size);
+ }
+
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 26b8a85..48768df 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
val |= counter_config->extra;
event &= model->event_mask ? model->event_mask : 0xFF;
val |= event & 0xFF;
- val |= (event & 0x0F00) << 24;
+ val |= (u64)(event & 0x0F00) << 24;
return val;
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c9..a8bfe1c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
* memory controller and apply to register. Search for the first
* bandwidth entry that is greater or equal than the setting requested
* and program that. If at last entry, turn off DRAM scrubbing.
+ *
+ * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
+ * by falling back to the last element in scrubrates[].
*/
- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+ for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
/*
* skip scrub rates which aren't recommended
* (see F10 BKDG, F3x58)
@@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
if (scrubrates[i].bandwidth <= new_bw)
break;
-
- /*
- * if no suitable bandwidth found, turn off DRAM scrubbing
- * entirely by falling back to the last element in the
- * scrubrates array.
- */
}
scrubval = scrubrates[i].scrubval;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e3e470f..67432e2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3451,7 +3451,8 @@ out:
}
static const struct ibnl_client_cbs cma_cb_table[] = {
- [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
+ [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
+ .module = THIS_MODULE },
};
static int __init cma_init(void)
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 396e293..8c3b08a 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -151,6 +151,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct netlink_dump_control c = {
.dump = client->cb_table[op].dump,
+ .module = client->cb_table[op].module,
};
return netlink_dump_start(nls, skb, nlh, &c);
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 17ef6c4..035f28e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -148,7 +148,7 @@
#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
-#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
+#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 0b3e481..eab0641 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -1692,14 +1692,18 @@ static int vidioc_streamoff(struct file *file, void *priv,
(AUVI_INPUT(i).audio_setup)(dev, 0);
}
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh, AU0828_RESOURCE_VIDEO);
+ if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
+ videobuf_streamoff(&fh->vb_vidq);
+ res_free(fh, AU0828_RESOURCE_VIDEO);
+ }
} else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
dev->vbi_timeout_running = 0;
del_timer_sync(&dev->vbi_timeout);
- videobuf_streamoff(&fh->vb_vbiq);
- res_free(fh, AU0828_RESOURCE_VBI);
+ if (res_check(fh, AU0828_RESOURCE_VBI)) {
+ videobuf_streamoff(&fh->vb_vbiq);
+ res_free(fh, AU0828_RESOURCE_VBI);
+ }
}
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 47b19c0..eb9f5fb 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2897,9 +2897,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
if (le16_to_cpu(p->features) & 1)
*busw = NAND_BUSWIDTH_16;
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= (NAND_NO_READRDY |
- NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+ chip->options |= NAND_NO_READRDY | NAND_NO_AUTOINCR;
pr_info("ONFI flash detected\n");
return 1;
@@ -3064,9 +3062,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
}
}
- /* Get chip options, preserve non chip based options */
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+ /* Get chip options */
+ chip->options |= type->options;
/*
* Check if chip is not a Samsung device. Do not clear the
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5a30bf8..f4be8f7 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4153,6 +4153,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
DMI_MATCH(DMI_BOARD_NAME, "nForce"),
},
},
+ {
+ .ident = "ASUS P5NSLI",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
+ },
+ },
{}
};
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index b066273..7dd879c 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -194,7 +194,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
sharpsl_pcmcia_init_reset(skt);
}
-static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
+static struct pcmcia_low_level sharpsl_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = sharpsl_pcmcia_hw_init,
.socket_state = sharpsl_pcmcia_socket_state,
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 9b32968..2a32a2f 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -259,7 +259,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->lpmd_bit;
- *width = 1;
+ *width = 2;
break;
case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
*bank = g->drv_bank;
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index 4d7571d..636d96c 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -3343,10 +3343,10 @@ static const struct tegra_function tegra30_functions[] = {
FUNCTION(vi_alt3),
};
-#define MUXCTL_REG_A 0x3000
-#define PINGROUP_REG_A 0x868
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - MUXCTL_REG_A)
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \
@@ -3362,25 +3362,25 @@ static const struct tegra_function tegra30_functions[] = {
}, \
.func_safe = TEGRA_MUX_ ## f_safe, \
.mux_reg = PINGROUP_REG_Y(r), \
- .mux_bank = 0, \
+ .mux_bank = 1, \
.mux_bit = 0, \
.pupd_reg = PINGROUP_REG_Y(r), \
- .pupd_bank = 0, \
+ .pupd_bank = 1, \
.pupd_bit = 2, \
.tri_reg = PINGROUP_REG_Y(r), \
- .tri_bank = 0, \
+ .tri_bank = 1, \
.tri_bit = 4, \
.einput_reg = PINGROUP_REG_Y(r), \
- .einput_bank = 0, \
+ .einput_bank = 1, \
.einput_bit = 5, \
.odrain_reg = PINGROUP_REG_##od(r), \
- .odrain_bank = 0, \
+ .odrain_bank = 1, \
.odrain_bit = 6, \
.lock_reg = PINGROUP_REG_Y(r), \
- .lock_bank = 0, \
+ .lock_bank = 1, \
.lock_bit = 7, \
.ioreset_reg = PINGROUP_REG_##ior(r), \
- .ioreset_bank = 0, \
+ .ioreset_bank = 1, \
.ioreset_bit = 8, \
.drv_reg = -1, \
}
@@ -3399,8 +3399,8 @@ static const struct tegra_function tegra30_functions[] = {
.odrain_reg = -1, \
.lock_reg = -1, \
.ioreset_reg = -1, \
- .drv_reg = ((r) - PINGROUP_REG_A), \
- .drv_bank = 1, \
+ .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
+ .drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7f2fac1..c5f7eae 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -818,10 +818,6 @@ static const __u32 acm_tty_speed[] = {
2500000, 3000000, 3500000, 4000000
};
-static const __u8 acm_tty_size[] = {
- 5, 6, 7, 8
-};
-
static void acm_tty_set_termios(struct tty_struct *tty,
struct ktermios *termios_old)
{
@@ -835,7 +831,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
newline.bParityType = termios->c_cflag & PARENB ?
(termios->c_cflag & PARODD ? 1 : 2) +
(termios->c_cflag & CMSPAR ? 2 : 0) : 0;
- newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ newline.bDataBits = 5;
+ break;
+ case CS6:
+ newline.bDataBits = 6;
+ break;
+ case CS7:
+ newline.bDataBits = 7;
+ break;
+ case CS8:
+ default:
+ newline.bDataBits = 8;
+ break;
+ }
/* FIXME: Needs to clear unsupported bits in the termios */
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
@@ -1234,7 +1244,7 @@ made_compressed_probe:
if (usb_endpoint_xfer_int(epwrite))
usb_fill_int_urb(snd->urb, usb_dev,
- usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
else
usb_fill_bulk_urb(snd->urb, usb_dev,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b8d4697..cee0c3e 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1777,6 +1777,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
WARN_ON_ONCE(ret);
dep->res_trans_idx = 0;
+ dep->flags &= ~DWC3_EP_BUSY;
}
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f756231..e52ddfe 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -479,7 +479,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
if (strstr(dmi_product_name, "Z420") ||
strstr(dmi_product_name, "Z620") ||
- strstr(dmi_product_name, "Z820"))
+ strstr(dmi_product_name, "Z820") ||
+ strstr(dmi_product_name, "Z1"))
return true;
return false;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 57de734..17ec21e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
.reserved = BIT(5),
};
+static const struct option_blacklist_info net_intf6_blacklist = {
+ .reserved = BIT(6),
+};
+
static const struct option_blacklist_info zte_mf626_blacklist = {
.sendsetup = BIT(0) | BIT(1),
.reserved = BIT(4),
};
+static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
@@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
@@ -880,13 +895,22 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1002,18 +1026,24 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1058,8 +1088,16 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1071,15 +1109,21 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 8b384cc..c215985 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -52,6 +52,9 @@
#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
+#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
+#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
+
static int ext4_split_extent(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path,
@@ -2829,6 +2832,9 @@ static int ext4_split_extent_at(handle_t *handle,
unsigned int ee_len, depth;
int err = 0;
+ BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
+ (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
+
ext_debug("ext4_split_extents_at: inode %lu, logical"
"block %llu\n", inode->i_ino, (unsigned long long)split);
@@ -2887,7 +2893,14 @@ static int ext4_split_extent_at(handle_t *handle,
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
- err = ext4_ext_zeroout(inode, &orig_ex);
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+ if (split_flag & EXT4_EXT_DATA_VALID1)
+ err = ext4_ext_zeroout(inode, ex2);
+ else
+ err = ext4_ext_zeroout(inode, ex);
+ } else
+ err = ext4_ext_zeroout(inode, &orig_ex);
+
if (err)
goto fix_extent_len;
/* update the extent length and mark as initialized */
@@ -2940,12 +2953,13 @@ static int ext4_split_extent(handle_t *handle,
uninitialized = ext4_ext_is_uninitialized(ex);
if (map->m_lblk + map->m_len < ee_block + ee_len) {
- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
- EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
EXT4_EXT_MARK_UNINIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+ split_flag1 |= EXT4_EXT_DATA_VALID1;
err = ext4_split_extent_at(handle, inode, path,
map->m_lblk + map->m_len, split_flag1, flags1);
if (err)
@@ -2958,8 +2972,8 @@ static int ext4_split_extent(handle_t *handle,
return PTR_ERR(path);
if (map->m_lblk >= ee_block) {
- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
- EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_DATA_VALID2);
if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1;
if (split_flag & EXT4_EXT_MARK_UNINIT2)
@@ -3237,26 +3251,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= EXT4_EXT_MARK_UNINIT2;
-
+ if (flags & EXT4_GET_BLOCKS_CONVERT)
+ split_flag |= EXT4_EXT_DATA_VALID2;
flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, path, map, split_flag, flags);
}
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path *path)
+ struct inode *inode,
+ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path)
{
struct ext4_extent *ex;
+ ext4_lblk_t ee_block;
+ unsigned int ee_len;
int depth;
int err = 0;
depth = ext_depth(inode);
ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
- (unsigned long long)le32_to_cpu(ex->ee_block),
- ext4_ext_get_actual_len(ex));
+ (unsigned long long)ee_block, ee_len);
+
+ /* If extent is larger than requested then split is required */
+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
+ err = ext4_split_unwritten_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT);
+ if (err < 0)
+ goto out;
+ ext4_ext_drop_refs(path);
+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ }
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
@@ -3564,7 +3599,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
}
/* IO end_io complete, convert the filled extent to written */
if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
- ret = ext4_convert_unwritten_extents_endio(handle, inode,
+ ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
path);
if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 6b0a57e..e77c4fe 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4984,8 +4984,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
end = start + (range->len >> sb->s_blocksize_bits) - 1;
minlen = range->minlen >> sb->s_blocksize_bits;
- if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) ||
- unlikely(start >= max_blks))
+ if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
+ start >= max_blks ||
+ range->len < sb->s_blocksize)
return -EINVAL;
if (end >= max_blks)
end = max_blks - 1;
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index d269ada..982d267 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
{
__be32 *p;
- BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+ WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
p = xdr_reserve_space(xdr, 4);
*p = stat;
}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index d27aab1..d413af3 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
/* Obtain file pointer. Not used by FREE_ALL call. */
if (filp != NULL) {
- if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+ error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
+ if (error != 0)
goto no_locks;
*filp = file;
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 64c3b31..e296572 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -45,10 +45,13 @@ static cputime64_t get_iowait_time(int cpu)
static u64 get_idle_time(int cpu)
{
- u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
+ u64 idle, idle_time = -1ULL;
+
+ if (cpu_online(cpu))
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
if (idle_time == -1ULL)
- /* !NO_HZ so we can rely on cpustat.idle */
+ /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
else
idle = usecs_to_cputime64(idle_time);
@@ -58,10 +61,13 @@ static u64 get_idle_time(int cpu)
static u64 get_iowait_time(int cpu)
{
- u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
+ u64 iowait, iowait_time = -1ULL;
+
+ if (cpu_online(cpu))
+ iowait_time = get_cpu_iowait_time_us(cpu, NULL);
if (iowait_time == -1ULL)
- /* !NO_HZ so we can rely on cpustat.iowait */
+ /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
iowait = usecs_to_cputime64(iowait_time);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a810987..561e130 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -82,6 +82,8 @@ static inline int is_vlan_dev(struct net_device *dev)
}
#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define vlan_tx_nonzero_tag_present(__skb) \
+ (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -91,7 +93,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
-extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
+extern bool vlan_do_receive(struct sk_buff **skb);
extern struct sk_buff *vlan_untag(struct sk_buff *skb);
extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
@@ -120,10 +122,8 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
return 0;
}
-static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler)
+static inline bool vlan_do_receive(struct sk_buff **skb)
{
- if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler)
- (*skb)->pkt_type = PACKET_OTHERHOST;
return false;
}
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 1482340..2483513 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -215,9 +215,6 @@ typedef enum {
#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \
&& (chip->page_shift > 9))
-/* Mask to zero out the chip options, which come from the id table */
-#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
-
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
#define NAND_SKIP_BBTSCAN 0x00010000
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index a2092f5..b23e9cd 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -153,6 +153,7 @@ struct nlattr {
#include <linux/capability.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
struct net;
@@ -226,6 +227,8 @@ struct netlink_callback {
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
void *data;
+ /* the module that dump function belong to */
+ struct module *module;
u16 family;
u16 min_dump_alloc;
unsigned int prev_seq, seq;
@@ -251,14 +254,24 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
struct netlink_dump_control {
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
- int (*done)(struct netlink_callback*);
+ int (*done)(struct netlink_callback *);
void *data;
+ struct module *module;
u16 min_dump_alloc;
};
-extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- struct netlink_dump_control *control);
+extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control);
+static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control)
+{
+ if (!control->module)
+ control->module = THIS_MODULE;
+
+ return __netlink_dump_start(ssk, skb, nlh, control);
+}
#define NL_NONROOT_RECV 0x1
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 77d278d..005b507 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -114,6 +114,7 @@ struct rpc_xprt_ops {
void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*rpcbind)(struct rpc_task *task);
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_task *task);
@@ -279,6 +280,8 @@ void xprt_connect(struct rpc_task *task);
void xprt_reserve(struct rpc_task *task);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_prepare_transmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 3c5363a..bd3d8b2 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -39,6 +39,7 @@ struct rdma_cm_id_stats {
struct ibnl_client_cbs {
int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
+ struct module *module;
};
int ibnl_init(void);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ed64cca..762f7cc7 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1868,9 +1868,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
* trading it for newcg is protected by cgroup_mutex, we're safe to drop
* it here; it will be freed under RCU.
*/
- put_css_set(oldcg);
-
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
+ put_css_set(oldcg);
}
/**
@@ -4463,31 +4462,20 @@ static const struct file_operations proc_cgroupstats_operations = {
*
* A pointer to the shared css_set was automatically copied in
* fork.c by dup_task_struct(). However, we ignore that copy, since
- * it was not made under the protection of RCU, cgroup_mutex or
- * threadgroup_change_begin(), so it might no longer be a valid
- * cgroup pointer. cgroup_attach_task() might have already changed
- * current->cgroups, allowing the previously referenced cgroup
- * group to be removed and freed.
- *
- * Outside the pointer validity we also need to process the css_set
- * inheritance between threadgoup_change_begin() and
- * threadgoup_change_end(), this way there is no leak in any process
- * wide migration performed by cgroup_attach_proc() that could otherwise
- * miss a thread because it is too early or too late in the fork stage.
+ * it was not made under the protection of RCU or cgroup_mutex, so
+ * might no longer be a valid cgroup pointer. cgroup_attach_task() might
+ * have already changed current->cgroups, allowing the previously
+ * referenced cgroup group to be removed and freed.
*
* At the point that cgroup_fork() is called, 'current' is the parent
* task, and the passed argument 'child' points to the child task.
*/
void cgroup_fork(struct task_struct *child)
{
- /*
- * We don't need to task_lock() current because current->cgroups
- * can't be changed concurrently here. The parent obviously hasn't
- * exited and called cgroup_exit(), and we are synchronized against
- * cgroup migration through threadgroup_change_begin().
- */
+ task_lock(current);
child->cgroups = current->cgroups;
get_css_set(child->cgroups);
+ task_unlock(current);
INIT_LIST_HEAD(&child->cg_list);
}
@@ -4540,19 +4528,10 @@ void cgroup_post_fork(struct task_struct *child)
*/
if (use_task_css_set_links) {
write_lock(&css_set_lock);
- if (list_empty(&child->cg_list)) {
- /*
- * It's safe to use child->cgroups without task_lock()
- * here because we are protected through
- * threadgroup_change_begin() against concurrent
- * css_set change in cgroup_task_migrate(). Also
- * the task can't exit at that point until
- * wake_up_new_task() is called, so we are protected
- * against cgroup_exit() setting child->cgroup to
- * init_css_set.
- */
+ task_lock(child);
+ if (list_empty(&child->cg_list))
list_add(&child->cg_list, &child->cgroups->tasks);
- }
+ task_unlock(child);
write_unlock(&css_set_lock);
}
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 898a84c..b0003db 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1180,15 +1180,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1198,8 +1199,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = clamp_t(size_t, len, 1, sizeof(buf));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f364630..912613c 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -5,7 +5,7 @@
#include <linux/export.h>
#include "vlan.h"
-bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
+bool vlan_do_receive(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
struct vlan_pcpu_stats *rx_stats;
vlan_dev = vlan_find_dev(skb->dev, vlan_id);
- if (!vlan_dev) {
- /* Only the last call to vlan_do_receive() should change
- * pkt_type to PACKET_OTHERHOST
- */
- if (vlan_id && last_handler)
- skb->pkt_type = PACKET_OTHERHOST;
+ if (!vlan_dev)
return false;
- }
skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
diff --git a/net/core/dev.c b/net/core/dev.c
index 086bc2e..82ca51b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3209,18 +3209,18 @@ another_round:
ncls:
#endif
- rx_handler = rcu_dereference(skb->dev->rx_handler);
if (vlan_tx_tag_present(skb)) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
- if (vlan_do_receive(&skb, !rx_handler))
+ if (vlan_do_receive(&skb))
goto another_round;
else if (unlikely(!skb))
goto out;
}
+ rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -3240,6 +3240,9 @@ ncls:
}
}
+ if (vlan_tx_nonzero_tag_present(skb))
+ skb->pkt_type = PACKET_OTHERHOST;
+
/* deliver only exact match when indicated */
null_or_dev = deliver_exact ? skb->dev : NULL;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 73b9035..ac88107 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1285,8 +1285,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
if (!dst)
goto discard;
- __skb_pull(skb, skb_network_offset(skb));
-
if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
@@ -1296,6 +1294,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
neigh_hh_init(neigh, dst);
do {
+ __skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
@@ -1326,9 +1325,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
unsigned int seq;
int err;
- __skb_pull(skb, skb_network_offset(skb));
-
do {
+ __skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0cb86ce..8f2458d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -678,10 +678,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
/* When socket is gone, all binding information is lost.
- * routing might fail in this case. using iif for oif to
- * make sure we can deliver it
+ * routing might fail in this case. No choice here, if we choose to force
+ * input interface, we will misroute in case of asymmetric route.
*/
- arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
+ if (sk)
+ arg.bound_dev_if = sk->sk_bound_dev_if;
net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5ec6069..468a5ce 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3095,14 +3095,15 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
struct hlist_node *n;
hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
addr_lst) {
+ if (!net_eq(dev_net(ifa->idev->dev), net))
+ continue;
/* sync with offset */
if (p < state->offset) {
p++;
continue;
}
state->offset++;
- if (net_eq(dev_net(ifa->idev->dev), net))
- return ifa;
+ return ifa;
}
/* prepare for next bucket */
@@ -3120,18 +3121,20 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
struct hlist_node *n = &ifa->addr_lst;
hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
+ if (!net_eq(dev_net(ifa->idev->dev), net))
+ continue;
state->offset++;
- if (net_eq(dev_net(ifa->idev->dev), net))
- return ifa;
+ return ifa;
}
while (++state->bucket < IN6_ADDR_HSIZE) {
state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa, n,
&inet6_addr_lst[state->bucket], addr_lst) {
+ if (!net_eq(dev_net(ifa->idev->dev), net))
+ continue;
state->offset++;
- if (net_eq(dev_net(ifa->idev->dev), net))
- return ifa;
+ return ifa;
}
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 98256cf..8a8fa2d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -896,7 +896,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
fl6.flowi6_proto = IPPROTO_TCP;
- fl6.flowi6_oif = inet6_iif(skb);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = inet6_iif(skb);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index bba6ba1..edce424 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -156,6 +156,8 @@ static void netlink_sock_destruct(struct sock *sk)
if (nlk->cb) {
if (nlk->cb->done)
nlk->cb->done(nlk->cb);
+
+ module_put(nlk->cb->module);
netlink_destroy_callback(nlk->cb);
}
@@ -1728,6 +1730,7 @@ static int netlink_dump(struct sock *sk)
nlk->cb = NULL;
mutex_unlock(nlk->cb_mutex);
+ module_put(cb->module);
netlink_destroy_callback(cb);
return 0;
@@ -1737,9 +1740,9 @@ errout_skb:
return err;
}
-int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- struct netlink_dump_control *control)
+int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control)
{
struct netlink_callback *cb;
struct sock *sk;
@@ -1754,6 +1757,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->done = control->done;
cb->nlh = nlh;
cb->data = control->data;
+ cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
atomic_inc(&skb->users);
cb->skb = skb;
@@ -1764,19 +1768,28 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
return -ECONNREFUSED;
}
nlk = nlk_sk(sk);
- /* A dump is in progress... */
+
mutex_lock(nlk->cb_mutex);
+ /* A dump is in progress... */
if (nlk->cb) {
mutex_unlock(nlk->cb_mutex);
netlink_destroy_callback(cb);
- sock_put(sk);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
+ /* add reference of module which cb->dump belongs to */
+ if (!try_module_get(cb->module)) {
+ mutex_unlock(nlk->cb_mutex);
+ netlink_destroy_callback(cb);
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
nlk->cb = cb;
mutex_unlock(nlk->cb_mutex);
ret = netlink_dump(sk);
-
+out:
sock_put(sk);
if (ret)
@@ -1787,7 +1800,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
*/
return -EINTR;
}
-EXPORT_SYMBOL(netlink_dump_start);
+EXPORT_SYMBOL(__netlink_dump_start);
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
{
diff --git a/net/rds/send.c b/net/rds/send.c
index 96531d4..88eace5 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
rds_stats_inc(s_send_pong);
if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- rds_send_xmit(conn);
+ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
rds_message_put(rm);
return 0;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index de0b0f3..76cb304 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1406,11 +1406,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
size_t count, loff_t *ppos,
struct cache_detail *cd)
{
- char tbuf[20];
+ char tbuf[22];
unsigned long p = *ppos;
size_t len;
- sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
+ snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
len = strlen(tbuf);
if (p >= len)
return 0;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index da72492..176a24f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
return false;
}
-static void xprt_alloc_slot(struct rpc_task *task)
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req;
+ spin_lock(&xprt->reserve_lock);
if (!list_empty(&xprt->free)) {
req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
list_del(&req->rq_list);
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
default:
task->tk_status = -EAGAIN;
}
+ spin_unlock(&xprt->reserve_lock);
return;
out_init_req:
task->tk_status = 0;
task->tk_rqstp = req;
xprt_request_init(task, xprt);
+ spin_unlock(&xprt->reserve_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_alloc_slot);
+
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ /* Note: grabbing the xprt_lock_write() ensures that we throttle
+ * new slot allocation if the transport is congested (i.e. when
+ * reconnecting a stream transport or when out of socket write
+ * buffer space).
+ */
+ if (xprt_lock_write(xprt, task)) {
+ xprt_alloc_slot(xprt, task);
+ xprt_release_write(xprt, task);
+ }
}
+EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task)
if (task->tk_rqstp != NULL)
return;
- /* Note: grabbing the xprt_lock_write() here is not strictly needed,
- * but ensures that we throttle new slot allocation if the transport
- * is congested (e.g. if reconnecting or if we're out of socket
- * write buffer space).
- */
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
- if (!xprt_lock_write(xprt, task))
- return;
-
- spin_lock(&xprt->reserve_lock);
- xprt_alloc_slot(task);
- spin_unlock(&xprt->reserve_lock);
- xprt_release_write(xprt, task);
+ xprt->ops->alloc_slot(xprt, task);
}
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 06cdbff..5d9202d 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
static struct rpc_xprt_ops xprt_rdma_procs = {
.reserve_xprt = xprt_rdma_reserve_xprt,
.release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
+ .alloc_slot = xprt_alloc_slot,
.release_request = xprt_release_rqst_cong, /* ditto */
.set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
.rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 00ff343..a4a6586 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2444,6 +2444,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
static struct rpc_xprt_ops xs_local_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xs_tcp_release_xprt,
+ .alloc_slot = xprt_alloc_slot,
.rpcbind = xs_local_rpcbind,
.set_port = xs_local_set_port,
.connect = xs_connect,
@@ -2460,6 +2461,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
.release_xprt = xprt_release_xprt_cong,
+ .alloc_slot = xprt_alloc_slot,
.rpcbind = rpcb_getport_async,
.set_port = xs_set_port,
.connect = xs_connect,
@@ -2477,6 +2479,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
static struct rpc_xprt_ops xs_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xs_tcp_release_xprt,
+ .alloc_slot = xprt_lock_and_alloc_slot,
.rpcbind = rpcb_getport_async,
.set_port = xs_set_port,
.connect = xs_connect,
@@ -2496,6 +2499,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
static struct rpc_xprt_ops bc_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xprt_release_xprt,
+ .alloc_slot = xprt_alloc_slot,
.rpcbind = xs_local_rpcbind,
.buf_alloc = bc_malloc,
.buf_free = bc_free,
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists