[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20160624173957.GB10480@kroah.com>
Date: Fri, 24 Jun 2016 10:39:57 -0700
From: Greg KH <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, Jiri Slaby <jslaby@...e.cz>
Subject: Re: Linux 3.14.73
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 88152f214f48..302b5ed616a6 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
- nr_open
- overflowuid
- overflowgid
+- pipe-user-pages-hard
+- pipe-user-pages-soft
- protected_hardlinks
- protected_symlinks
- suid_dumpable
@@ -159,6 +161,27 @@ The default is 65534.
==============================================================
+pipe-user-pages-hard:
+
+Maximum total number of pages a non-privileged user may allocate for pipes.
+Once this limit is reached, no new pipes may be allocated until usage goes
+below the limit again. When set to 0, no limit is applied, which is the default
+setting.
+
+==============================================================
+
+pipe-user-pages-soft:
+
+Maximum total number of pages a non-privileged user may allocate for pipes
+before the pipe size gets limited to a single page. Once this limit is reached,
+new pipes will be limited to a single page in size for this user in order to
+limit total memory usage, and trying to increase them using fcntl() will be
+denied until usage goes below the limit again. The default value allows to
+allocate up to 1024 pipes at their default size. When set to 0, no limit is
+applied.
+
+==============================================================
+
protected_hardlinks:
A long-standing class of security issues is the hardlink-based
diff --git a/Makefile b/Makefile
index 6155aaf6342a..939dfae7bb5f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 14
-SUBLEVEL = 72
+SUBLEVEL = 73
EXTRAVERSION =
NAME = Remembering Coco
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 0dd3b79b15c3..ec33df500f86 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
if (ret)
return ret;
- vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
+ vfp_flush_hwstate(thread);
return 0;
}
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 3605b844ad87..efe9964ea9b4 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
* User space process size: 2GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
-#define TASK_SIZE 0x7fff8000UL
+#define TASK_SIZE 0x80000000UL
#endif
#ifdef __KERNEL__
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index d7c0acb35ec2..8d49614d600d 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
break;
}
- if (modify && R1(regs->iir))
+ if (ret == 0 && modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
if (ret)
{
+ /*
+ * The unaligned handler failed.
+ * If we were called by __get_user() or __put_user() jump
+ * to it's exception fixup handler instead of crashing.
+ */
+ if (!user_mode(regs) && fixup_exception(regs))
+ return;
+
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 92c538d5ed19..940b15703885 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -680,7 +680,7 @@
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
-#define SPRN_MMCR2 769
+#define SPRN_MMCR2 785
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -715,13 +715,13 @@
#define SPRN_PMC6 792
#define SPRN_PMC7 793
#define SPRN_PMC8 794
-#define SPRN_SIAR 780
-#define SPRN_SDAR 781
#define SPRN_SIER 784
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_SIAR 796
+#define SPRN_SDAR 797
#define SPRN_TACR 888
#define SPRN_TCSCR 889
#define SPRN_CSIGR 890
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 83da53fde6b5..6700b83b7259 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -615,29 +615,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
{
int config_addr;
int ret;
+ /* Waiting 0.2s maximum before skipping configuration */
+ int max_wait = 200;
/* Figure out the PE address */
config_addr = pe->config_addr;
if (pe->addr)
config_addr = pe->addr;
- /* Use new configure-pe function, if supported */
- if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
- ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid));
- } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
- ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid));
- } else {
- return -EFAULT;
- }
+ while (max_wait > 0) {
+ /* Use new configure-pe function, if supported */
+ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+ ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid));
+ } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
+ ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
+ config_addr, BUID_HI(pe->phb->buid),
+ BUID_LO(pe->phb->buid));
+ } else {
+ return -EFAULT;
+ }
- if (ret)
- pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
- __func__, pe->phb->global_number, pe->addr, ret);
+ if (!ret)
+ return ret;
+
+ /*
+ * If RTAS returns a delay value that's above 100ms, cut it
+ * down to 100ms in case firmware made a mistake. For more
+ * on how these delay values work see rtas_busy_delay_time
+ */
+ if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+ ret <= RTAS_EXTENDED_DELAY_MAX)
+ ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+ max_wait -= rtas_busy_delay_time(ret);
+
+ if (max_wait < 0)
+ break;
+
+ rtas_busy_delay(ret);
+ }
+ pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+ __func__, pe->phb->global_number, pe->addr, ret);
return ret;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8db66424be97..6cb4ce77df19 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3029,6 +3029,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
if (dbgregs->flags)
return -EINVAL;
+ if (dbgregs->dr6 & ~0xffffffffull)
+ return -EINVAL;
+ if (dbgregs->dr7 & ~0xffffffffull)
+ return -EINVAL;
+
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = dbgregs->dr6;
kvm_update_dr6(vcpu);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0237ab58f242..a39ee43d1a79 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -123,6 +123,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
+ u32 unit_size;
int ret;
if (!ctx->u.aes.key_len)
@@ -134,11 +135,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!req->info)
return -EINVAL;
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
- if (!(req->nbytes & (unit_size_map[unit].size - 1)))
- break;
+ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+ if (req->nbytes <= unit_size_map[0].size) {
+ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+ if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+ unit_size = unit_size_map[unit].value;
+ break;
+ }
+ }
+ }
- if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
@@ -159,7 +166,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
- rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+ rctx->cmd.u.xts.unit_size = unit_size;
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
rctx->cmd.u.xts.iv = &rctx->iv_sg;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7645a3ce3854..b2af06444c2d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -451,6 +451,17 @@ fail:
return rc;
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ /* All our existing PIO buffers went away */
+ efx_for_each_channel(channel, efx)
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->piobuf = NULL;
+}
+
#else /* !EFX_USE_PIO */
static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -467,6 +478,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
{
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+}
+
#endif /* EFX_USE_PIO */
static void efx_ef10_remove(struct efx_nic *efx)
@@ -698,6 +713,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
+ efx_ef10_forget_old_piobufs(efx);
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 9b235362efcd..47c06888dc05 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1500,7 +1500,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
struct dentry *dentry = __d_alloc(parent->d_sb, name);
if (!dentry)
return NULL;
-
+ dentry->d_flags |= DCACHE_RCUACCESS;
spin_lock(&parent->d_lock);
/*
* don't need child lock because it is not subject
@@ -2352,7 +2352,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
{
BUG_ON(!d_unhashed(entry));
hlist_bl_lock(b);
- entry->d_flags |= DCACHE_RCUACCESS;
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
}
@@ -2536,6 +2535,7 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
/* ... and switch the parents */
if (IS_ROOT(dentry)) {
+ dentry->d_flags |= DCACHE_RCUACCESS;
dentry->d_parent = target->d_parent;
target->d_parent = target;
INIT_LIST_HEAD(&target->d_child);
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index f1ea610362c6..9b661a4ccee7 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/mount.h>
+#include <linux/file.h>
#include "ecryptfs_kernel.h"
struct ecryptfs_open_req {
@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
(*lower_file) = dentry_open(&req.path, flags, cred);
if (!IS_ERR(*lower_file))
- goto out;
+ goto have_file;
if ((flags & O_ACCMODE) == O_RDONLY) {
rc = PTR_ERR((*lower_file));
goto out;
@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
mutex_unlock(&ecryptfs_kthread_ctl.mux);
wake_up(&ecryptfs_kthread_ctl.wait);
wait_for_completion(&req.done);
- if (IS_ERR(*lower_file))
+ if (IS_ERR(*lower_file)) {
rc = PTR_ERR(*lower_file);
+ goto out;
+ }
+have_file:
+ if ((*lower_file)->f_op->mmap == NULL) {
+ fput(*lower_file);
+ *lower_file = NULL;
+ rc = -EMEDIUMTYPE;
+ }
out:
return rc;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index a03801186366..fff8057a0809 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -39,6 +39,12 @@ unsigned int pipe_max_size = 1048576;
*/
unsigned int pipe_min_size = PAGE_SIZE;
+/* Maximum allocatable pages per user. Hard limit is unset by default, soft
+ * matches default values.
+ */
+unsigned long pipe_user_pages_hard;
+unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
+
/*
* We use a start+len construction, which provides full use of the
* allocated memory.
@@ -795,20 +801,49 @@ pipe_fasync(int fd, struct file *filp, int on)
return retval;
}
+static void account_pipe_buffers(struct pipe_inode_info *pipe,
+ unsigned long old, unsigned long new)
+{
+ atomic_long_add(new - old, &pipe->user->pipe_bufs);
+}
+
+static bool too_many_pipe_buffers_soft(struct user_struct *user)
+{
+ return pipe_user_pages_soft &&
+ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
+}
+
+static bool too_many_pipe_buffers_hard(struct user_struct *user)
+{
+ return pipe_user_pages_hard &&
+ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
+}
+
struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
if (pipe) {
- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
+ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
+ struct user_struct *user = get_current_user();
+
+ if (!too_many_pipe_buffers_hard(user)) {
+ if (too_many_pipe_buffers_soft(user))
+ pipe_bufs = 1;
+ pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
+ }
+
if (pipe->bufs) {
init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
- pipe->buffers = PIPE_DEF_BUFFERS;
+ pipe->buffers = pipe_bufs;
+ pipe->user = user;
+ account_pipe_buffers(pipe, 0, pipe_bufs);
mutex_init(&pipe->mutex);
return pipe;
}
+ free_uid(user);
kfree(pipe);
}
@@ -819,6 +854,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;
+ account_pipe_buffers(pipe, pipe->buffers, 0);
+ free_uid(pipe->user);
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
@@ -1209,6 +1246,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
}
+ account_pipe_buffers(pipe, pipe->buffers, nr_pages);
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
@@ -1280,6 +1318,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
ret = -EPERM;
goto out;
+ } else if ((too_many_pipe_buffers_hard(pipe->user) ||
+ too_many_pipe_buffers_soft(pipe->user)) &&
+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto out;
}
ret = pipe_set_size(pipe, nr_pages);
break;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index fb8579d35cd4..47533013a772 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3098,7 +3098,7 @@ xfs_iflush(
*/
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
0);
- if (error == -EAGAIN) {
+ if (error == EAGAIN) {
xfs_ifunlock(ip);
return error;
}
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index a3e215bb0241..7741efa43b35 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target);
int xt_register_matches(struct xt_match *match, unsigned int n);
void xt_unregister_matches(struct xt_match *match, unsigned int n);
+int xt_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+ struct xt_counters_info *info, bool compat);
+
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
struct xt_table_info *bootstrap,
@@ -421,7 +428,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
int xt_compat_match_offset(const struct xt_match *match);
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
unsigned int *size);
int xt_compat_match_to_user(const struct xt_entry_match *m,
void __user **dstptr, unsigned int *size);
@@ -431,6 +438,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
unsigned int *size);
int xt_compat_target_to_user(const struct xt_entry_target *t,
void __user **dstptr, unsigned int *size);
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
#endif /* CONFIG_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index ab5752692113..b3374f63bc36 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -42,6 +42,7 @@ struct pipe_buffer {
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
+ * @user: the user who created this pipe
**/
struct pipe_inode_info {
struct mutex mutex;
@@ -57,6 +58,7 @@ struct pipe_inode_info {
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
+ struct user_struct *user;
};
/*
@@ -140,6 +142,8 @@ void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
extern unsigned int pipe_max_size, pipe_min_size;
+extern unsigned long pipe_user_pages_hard;
+extern unsigned long pipe_user_pages_soft;
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d6152a6700c..77ac8b6a5b68 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -756,6 +756,7 @@ struct user_struct {
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c1b26e176aa6..df9eab6928d9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1668,6 +1668,20 @@ static struct ctl_table fs_table[] = {
.proc_handler = &pipe_proc_fn,
.extra1 = &pipe_min_size,
},
+ {
+ .procname = "pipe-user-pages-hard",
+ .data = &pipe_user_pages_hard,
+ .maxlen = sizeof(pipe_user_pages_hard),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "pipe-user-pages-soft",
+ .data = &pipe_user_pages_soft,
+ .maxlen = sizeof(pipe_user_pages_soft),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
{ }
};
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f95b6f93814b..42155b1af373 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -355,11 +355,25 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
/* All zeroes == unconditional rule. */
-static inline bool unconditional(const struct arpt_arp *arp)
+static inline bool unconditional(const struct arpt_entry *e)
{
static const struct arpt_arp uncond;
- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct arpt_entry) &&
+ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
+}
+
+static bool find_jump_target(const struct xt_table_info *t,
+ const void *entry0,
+ const struct arpt_entry *target)
+{
+ struct arpt_entry *iter;
+
+ xt_entry_foreach(iter, entry0, t->size) {
+ if (iter == target)
+ return true;
+ }
+ return false;
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -398,11 +412,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct arpt_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 && unconditional(&e->arp)) ||
- visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -435,6 +448,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
size = e->next_offset;
e = (struct arpt_entry *)
(entry0 + pos + size);
+ if (pos + size >= newinfo->size)
+ return 0;
e->counters.pcnt = pos;
pos += size;
} else {
@@ -454,9 +469,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
+ e = (struct arpt_entry *)
+ (entry0 + newpos);
+ if (!find_jump_target(newinfo, entry0, e))
+ return 0;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
+ if (newpos >= newinfo->size)
+ return 0;
}
e = (struct arpt_entry *)
(entry0 + newpos);
@@ -470,25 +491,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
return 1;
}
-static inline int check_entry(const struct arpt_entry *e, const char *name)
-{
- const struct xt_entry_target *t;
-
- if (!arp_checkentry(&e->arp)) {
- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
- return -EINVAL;
- }
-
- if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
- return -EINVAL;
-
- t = arpt_get_target_c(e);
- if (e->target_offset + t->u.target_size > e->next_offset)
- return -EINVAL;
-
- return 0;
-}
-
static inline int check_target(struct arpt_entry *e, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
@@ -518,10 +520,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
struct xt_target *target;
int ret;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
t = arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
@@ -547,7 +545,7 @@ static bool check_underflow(const struct arpt_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->arp))
+ if (!unconditional(e))
return false;
t = arpt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -566,9 +564,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -580,6 +580,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
return -EINVAL;
}
+ if (!arp_checkentry(&e->arp))
+ return -EINVAL;
+
+ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+ e->next_offset);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -588,9 +596,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -680,10 +688,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
}
}
- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
- duprintf("Looping hook\n");
+ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
return -ELOOP;
- }
/* Finally, each sanity check must pass */
i = 0;
@@ -1076,6 +1082,9 @@ static int do_replace(struct net *net, const void __user *user,
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
@@ -1116,56 +1125,18 @@ static int do_add_counters(struct net *net, const void __user *user,
unsigned int i, curcpu;
struct xt_counters_info tmp;
struct xt_counters *paddc;
- unsigned int num_counters;
- const char *name;
- int size;
- void *ptmp;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
struct arpt_entry *iter;
unsigned int addend;
-#ifdef CONFIG_COMPAT
- struct compat_xt_counters_info compat_tmp;
- if (compat) {
- ptmp = &compat_tmp;
- size = sizeof(struct compat_xt_counters_info);
- } else
-#endif
- {
- ptmp = &tmp;
- size = sizeof(struct xt_counters_info);
- }
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+ if (IS_ERR(paddc))
+ return PTR_ERR(paddc);
- if (copy_from_user(ptmp, user, size) != 0)
- return -EFAULT;
-
-#ifdef CONFIG_COMPAT
- if (compat) {
- num_counters = compat_tmp.num_counters;
- name = compat_tmp.name;
- } else
-#endif
- {
- num_counters = tmp.num_counters;
- name = tmp.name;
- }
-
- if (len != size + num_counters * sizeof(struct xt_counters))
- return -EINVAL;
-
- paddc = vmalloc(len - size);
- if (!paddc)
- return -ENOMEM;
-
- if (copy_from_user(paddc, user + size, len - size) != 0) {
- ret = -EFAULT;
- goto free;
- }
-
- t = xt_find_table_lock(net, NFPROTO_ARP, name);
+ t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
if (IS_ERR_OR_NULL(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
@@ -1173,7 +1144,7 @@ static int do_add_counters(struct net *net, const void __user *user,
local_bh_disable();
private = t->private;
- if (private->number != num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
@@ -1199,6 +1170,18 @@ static int do_add_counters(struct net *net, const void __user *user,
}
#ifdef CONFIG_COMPAT
+struct compat_arpt_replace {
+ char name[XT_TABLE_MAXNAMELEN];
+ u32 valid_hooks;
+ u32 num_entries;
+ u32 size;
+ u32 hook_entry[NF_ARP_NUMHOOKS];
+ u32 underflow[NF_ARP_NUMHOOKS];
+ u32 num_counters;
+ compat_uptr_t counters;
+ struct compat_arpt_entry entries[0];
+};
+
static inline void compat_release_entry(struct compat_arpt_entry *e)
{
struct xt_entry_target *t;
@@ -1207,24 +1190,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
module_put(t->u.kernel.target->me);
}
-static inline int
+static int
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
- const unsigned char *limit,
- const unsigned int *hook_entries,
- const unsigned int *underflows,
- const char *name)
+ const unsigned char *limit)
{
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
- int ret, off, h;
+ int ret, off;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1236,8 +1217,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
return -EINVAL;
}
- /* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct arpt_entry *)e, name);
+ if (!arp_checkentry(&e->arp))
+ return -EINVAL;
+
+ ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
+ e->next_offset);
if (ret)
return ret;
@@ -1261,17 +1245,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
if (ret)
goto release_target;
- /* Check hooks & underflows */
- for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
- if ((unsigned char *)e - base == hook_entries[h])
- newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
- newinfo->underflow[h] = underflows[h];
- }
-
- /* Clear counters and comefrom */
- memset(&e->counters, 0, sizeof(e->counters));
- e->comefrom = 0;
return 0;
release_target:
@@ -1280,18 +1253,17 @@ out:
return ret;
}
-static int
+static void
compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
- unsigned int *size, const char *name,
+ unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct xt_target *target;
struct arpt_entry *de;
unsigned int origsize;
- int ret, h;
+ int h;
- ret = 0;
origsize = *size;
de = (struct arpt_entry *)*dstptr;
memcpy(de, e, sizeof(struct arpt_entry));
@@ -1312,144 +1284,81 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
- return ret;
}
-static int translate_compat_table(const char *name,
- unsigned int valid_hooks,
- struct xt_table_info **pinfo,
+static int translate_compat_table(struct xt_table_info **pinfo,
void **pentry0,
- unsigned int total_size,
- unsigned int number,
- unsigned int *hook_entries,
- unsigned int *underflows)
+ const struct compat_arpt_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_arpt_entry *iter0;
- struct arpt_entry *iter1;
+ struct arpt_replace repl;
unsigned int size;
int ret = 0;
info = *pinfo;
entry0 = *pentry0;
- size = total_size;
- info->number = number;
-
- /* Init all hooks to impossible value. */
- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
- info->hook_entry[i] = 0xFFFFFFFF;
- info->underflow[i] = 0xFFFFFFFF;
- }
+ size = compatr->size;
+ info->number = compatr->num_entries;
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(NFPROTO_ARP);
- xt_compat_init_offsets(NFPROTO_ARP, number);
+ xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
/* Walk through entries, checking offsets. */
- xt_entry_foreach(iter0, entry0, total_size) {
+ xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
- entry0 + total_size,
- hook_entries,
- underflows,
- name);
+ entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
- if (j != number) {
+ if (j != compatr->num_entries) {
duprintf("translate_compat_table: %u not %u entries\n",
- j, number);
+ j, compatr->num_entries);
goto out_unlock;
}
- /* Check hooks all assigned */
- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
- /* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
- continue;
- if (info->hook_entry[i] == 0xFFFFFFFF) {
- duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
- goto out_unlock;
- }
- if (info->underflow[i] == 0xFFFFFFFF) {
- duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
- goto out_unlock;
- }
- }
-
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
- newinfo->number = number;
+ newinfo->number = compatr->num_entries;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = info->hook_entry[i];
newinfo->underflow[i] = info->underflow[i];
}
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
- size = total_size;
- xt_entry_foreach(iter0, entry0, total_size) {
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
- name, newinfo, entry1);
- if (ret != 0)
- break;
- }
+ size = compatr->size;
+ xt_entry_foreach(iter0, entry0, compatr->size)
+ compat_copy_entry_from_user(iter0, &pos, &size,
+ newinfo, entry1);
+
+ /* all module references in entry0 are now gone */
+
xt_compat_flush_offsets(NFPROTO_ARP);
xt_compat_unlock(NFPROTO_ARP);
- if (ret)
- goto free_newinfo;
- ret = -ELOOP;
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
- goto free_newinfo;
+ memcpy(&repl, compatr, sizeof(*compatr));
- i = 0;
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- ret = check_target(iter1, name);
- if (ret != 0)
- break;
- ++i;
- if (strcmp(arpt_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
- }
- if (ret) {
- /*
- * The first i matches need cleanup_entry (calls ->destroy)
- * because they had called ->check already. The other j-i
- * entries need only release.
- */
- int skip = i;
- j -= i;
- xt_entry_foreach(iter0, entry0, newinfo->size) {
- if (skip-- > 0)
- continue;
- if (j-- == 0)
- break;
- compat_release_entry(iter0);
- }
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- if (i-- == 0)
- break;
- cleanup_entry(iter1);
- }
- xt_free_table_info(newinfo);
- return ret;
+ for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+ repl.hook_entry[i] = newinfo->hook_entry[i];
+ repl.underflow[i] = newinfo->underflow[i];
}
- /* And one copy for every other CPU */
- for_each_possible_cpu(i)
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
- memcpy(newinfo->entries[i], entry1, newinfo->size);
+ repl.num_counters = 0;
+ repl.counters = NULL;
+ repl.size = newinfo->size;
+ ret = translate_table(newinfo, entry1, &repl);
+ if (ret)
+ goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
@@ -1458,31 +1367,18 @@ static int translate_compat_table(const char *name,
free_newinfo:
xt_free_table_info(newinfo);
-out:
- xt_entry_foreach(iter0, entry0, total_size) {
+ return ret;
+out_unlock:
+ xt_compat_flush_offsets(NFPROTO_ARP);
+ xt_compat_unlock(NFPROTO_ARP);
+ xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
-out_unlock:
- xt_compat_flush_offsets(NFPROTO_ARP);
- xt_compat_unlock(NFPROTO_ARP);
- goto out;
}
-struct compat_arpt_replace {
- char name[XT_TABLE_MAXNAMELEN];
- u32 valid_hooks;
- u32 num_entries;
- u32 size;
- u32 hook_entry[NF_ARP_NUMHOOKS];
- u32 underflow[NF_ARP_NUMHOOKS];
- u32 num_counters;
- compat_uptr_t counters;
- struct compat_arpt_entry entries[0];
-};
-
static int compat_do_replace(struct net *net, void __user *user,
unsigned int len)
{
@@ -1500,6 +1396,9 @@ static int compat_do_replace(struct net *net, void __user *user,
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
@@ -1513,10 +1412,7 @@ static int compat_do_replace(struct net *net, void __user *user,
goto free_newinfo;
}
- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
- &newinfo, &loc_cpu_entry, tmp.size,
- tmp.num_entries, tmp.hook_entry,
- tmp.underflow);
+ ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 99e810f84671..82b6bc4f9167 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ipt_ip *ip)
+static inline bool unconditional(const struct ipt_entry *e)
{
static const struct ipt_ip uncond;
- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct ipt_entry) &&
+ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
#undef FWINV
}
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
} else if (s == e) {
(*rulenum)++;
- if (s->target_offset == sizeof(struct ipt_entry) &&
+ if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
- t->verdict < 0 &&
- unconditional(&s->ip)) {
+ t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP_TRACE_COMMENT_POLICY]
@@ -439,6 +439,19 @@ ipt_do_table(struct sk_buff *skb,
#endif
}
+static bool find_jump_target(const struct xt_table_info *t,
+ const void *entry0,
+ const struct ipt_entry *target)
+{
+ struct ipt_entry *iter;
+
+ xt_entry_foreach(iter, entry0, t->size) {
+ if (iter == target)
+ return true;
+ }
+ return false;
+}
+
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
@@ -472,11 +485,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct ipt_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 && unconditional(&e->ip)) ||
- visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -517,6 +529,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
size = e->next_offset;
e = (struct ipt_entry *)
(entry0 + pos + size);
+ if (pos + size >= newinfo->size)
+ return 0;
e->counters.pcnt = pos;
pos += size;
} else {
@@ -535,9 +549,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
+ e = (struct ipt_entry *)
+ (entry0 + newpos);
+ if (!find_jump_target(newinfo, entry0, e))
+ return 0;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
+ if (newpos >= newinfo->size)
+ return 0;
}
e = (struct ipt_entry *)
(entry0 + newpos);
@@ -565,27 +585,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
}
static int
-check_entry(const struct ipt_entry *e, const char *name)
-{
- const struct xt_entry_target *t;
-
- if (!ip_checkentry(&e->ip)) {
- duprintf("ip check failed %p %s.\n", e, name);
- return -EINVAL;
- }
-
- if (e->target_offset + sizeof(struct xt_entry_target) >
- e->next_offset)
- return -EINVAL;
-
- t = ipt_get_target_c(e);
- if (e->target_offset + t->u.target_size > e->next_offset)
- return -EINVAL;
-
- return 0;
-}
-
-static int
check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
@@ -662,10 +661,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
j = 0;
mtpar.net = net;
mtpar.table = name;
@@ -709,7 +704,7 @@ static bool check_underflow(const struct ipt_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->ip))
+ if (!unconditional(e))
return false;
t = ipt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -729,9 +724,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -743,6 +740,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
return -EINVAL;
}
+ if (!ip_checkentry(&e->ip))
+ return -EINVAL;
+
+ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+ e->next_offset);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -751,9 +756,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -1263,6 +1268,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
@@ -1304,56 +1312,18 @@ do_add_counters(struct net *net, const void __user *user,
unsigned int i, curcpu;
struct xt_counters_info tmp;
struct xt_counters *paddc;
- unsigned int num_counters;
- const char *name;
- int size;
- void *ptmp;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
struct ipt_entry *iter;
unsigned int addend;
-#ifdef CONFIG_COMPAT
- struct compat_xt_counters_info compat_tmp;
-
- if (compat) {
- ptmp = &compat_tmp;
- size = sizeof(struct compat_xt_counters_info);
- } else
-#endif
- {
- ptmp = &tmp;
- size = sizeof(struct xt_counters_info);
- }
-
- if (copy_from_user(ptmp, user, size) != 0)
- return -EFAULT;
-#ifdef CONFIG_COMPAT
- if (compat) {
- num_counters = compat_tmp.num_counters;
- name = compat_tmp.name;
- } else
-#endif
- {
- num_counters = tmp.num_counters;
- name = tmp.name;
- }
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+ if (IS_ERR(paddc))
+ return PTR_ERR(paddc);
- if (len != size + num_counters * sizeof(struct xt_counters))
- return -EINVAL;
-
- paddc = vmalloc(len - size);
- if (!paddc)
- return -ENOMEM;
-
- if (copy_from_user(paddc, user + size, len - size) != 0) {
- ret = -EFAULT;
- goto free;
- }
-
- t = xt_find_table_lock(net, AF_INET, name);
+ t = xt_find_table_lock(net, AF_INET, tmp.name);
if (IS_ERR_OR_NULL(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
@@ -1361,7 +1331,7 @@ do_add_counters(struct net *net, const void __user *user,
local_bh_disable();
private = t->private;
- if (private->number != num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
@@ -1440,7 +1410,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
static int
compat_find_calc_match(struct xt_entry_match *m,
- const char *name,
const struct ipt_ip *ip,
unsigned int hookmask,
int *size)
@@ -1476,21 +1445,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
- const unsigned char *limit,
- const unsigned int *hook_entries,
- const unsigned int *underflows,
- const char *name)
+ const unsigned char *limit)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
- int ret, off, h;
+ int ret, off;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1502,8 +1469,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
return -EINVAL;
}
- /* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct ipt_entry *)e, name);
+ if (!ip_checkentry(&e->ip))
+ return -EINVAL;
+
+ ret = xt_compat_check_entry_offsets(e, e->elems,
+ e->target_offset, e->next_offset);
if (ret)
return ret;
@@ -1511,8 +1481,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
- ret = compat_find_calc_match(ematch, name,
- &e->ip, e->comefrom, &off);
+ ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
+ &off);
if (ret != 0)
goto release_matches;
++j;
@@ -1535,17 +1505,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
if (ret)
goto out;
- /* Check hooks & underflows */
- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
- if ((unsigned char *)e - base == hook_entries[h])
- newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
- newinfo->underflow[h] = underflows[h];
- }
-
- /* Clear counters and comefrom */
- memset(&e->counters, 0, sizeof(e->counters));
- e->comefrom = 0;
return 0;
out:
@@ -1559,19 +1518,18 @@ release_matches:
return ret;
}
-static int
+static void
compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
- unsigned int *size, const char *name,
+ unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct xt_target *target;
struct ipt_entry *de;
unsigned int origsize;
- int ret, h;
+ int h;
struct xt_entry_match *ematch;
- ret = 0;
origsize = *size;
de = (struct ipt_entry *)*dstptr;
memcpy(de, e, sizeof(struct ipt_entry));
@@ -1580,198 +1538,104 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
*dstptr += sizeof(struct ipt_entry);
*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
- xt_ematch_foreach(ematch, e) {
- ret = xt_compat_match_from_user(ematch, dstptr, size);
- if (ret != 0)
- return ret;
- }
+ xt_ematch_foreach(ematch, e)
+ xt_compat_match_from_user(ematch, dstptr, size);
+
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ipt_get_target(e);
target = t->u.kernel.target;
xt_compat_target_from_user(t, dstptr, size);
de->next_offset = e->next_offset - (origsize - *size);
+
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)de - base < newinfo->hook_entry[h])
newinfo->hook_entry[h] -= origsize - *size;
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
- return ret;
-}
-
-static int
-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
-{
- struct xt_entry_match *ematch;
- struct xt_mtchk_param mtpar;
- unsigned int j;
- int ret = 0;
-
- j = 0;
- mtpar.net = net;
- mtpar.table = name;
- mtpar.entryinfo = &e->ip;
- mtpar.hook_mask = e->comefrom;
- mtpar.family = NFPROTO_IPV4;
- xt_ematch_foreach(ematch, e) {
- ret = check_match(ematch, &mtpar);
- if (ret != 0)
- goto cleanup_matches;
- ++j;
- }
-
- ret = check_target(e, net, name);
- if (ret)
- goto cleanup_matches;
- return 0;
-
- cleanup_matches:
- xt_ematch_foreach(ematch, e) {
- if (j-- == 0)
- break;
- cleanup_match(ematch, net);
- }
- return ret;
}
static int
translate_compat_table(struct net *net,
- const char *name,
- unsigned int valid_hooks,
struct xt_table_info **pinfo,
void **pentry0,
- unsigned int total_size,
- unsigned int number,
- unsigned int *hook_entries,
- unsigned int *underflows)
+ const struct compat_ipt_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_ipt_entry *iter0;
- struct ipt_entry *iter1;
+ struct ipt_replace repl;
unsigned int size;
int ret;
info = *pinfo;
entry0 = *pentry0;
- size = total_size;
- info->number = number;
-
- /* Init all hooks to impossible value. */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- info->hook_entry[i] = 0xFFFFFFFF;
- info->underflow[i] = 0xFFFFFFFF;
- }
+ size = compatr->size;
+ info->number = compatr->num_entries;
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET);
- xt_compat_init_offsets(AF_INET, number);
+ xt_compat_init_offsets(AF_INET, compatr->num_entries);
/* Walk through entries, checking offsets. */
- xt_entry_foreach(iter0, entry0, total_size) {
+ xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
- entry0 + total_size,
- hook_entries,
- underflows,
- name);
+ entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
- if (j != number) {
+ if (j != compatr->num_entries) {
duprintf("translate_compat_table: %u not %u entries\n",
- j, number);
+ j, compatr->num_entries);
goto out_unlock;
}
- /* Check hooks all assigned */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- /* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
- continue;
- if (info->hook_entry[i] == 0xFFFFFFFF) {
- duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
- goto out_unlock;
- }
- if (info->underflow[i] == 0xFFFFFFFF) {
- duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
- goto out_unlock;
- }
- }
-
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
- newinfo->number = number;
+ newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- newinfo->hook_entry[i] = info->hook_entry[i];
- newinfo->underflow[i] = info->underflow[i];
+ newinfo->hook_entry[i] = compatr->hook_entry[i];
+ newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
- size = total_size;
- xt_entry_foreach(iter0, entry0, total_size) {
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
- name, newinfo, entry1);
- if (ret != 0)
- break;
- }
+ size = compatr->size;
+ xt_entry_foreach(iter0, entry0, compatr->size)
+ compat_copy_entry_from_user(iter0, &pos, &size,
+ newinfo, entry1);
+
+ /* all module references in entry0 are now gone.
+ * entry1/newinfo contains a 64bit ruleset that looks exactly as
+ * generated by 64bit userspace.
+ *
+ * Call standard translate_table() to validate all hook_entrys,
+ * underflows, check for loops, etc.
+ */
xt_compat_flush_offsets(AF_INET);
xt_compat_unlock(AF_INET);
- if (ret)
- goto free_newinfo;
- ret = -ELOOP;
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
- goto free_newinfo;
+ memcpy(&repl, compatr, sizeof(*compatr));
- i = 0;
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- ret = compat_check_entry(iter1, net, name);
- if (ret != 0)
- break;
- ++i;
- if (strcmp(ipt_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
- }
- if (ret) {
- /*
- * The first i matches need cleanup_entry (calls ->destroy)
- * because they had called ->check already. The other j-i
- * entries need only release.
- */
- int skip = i;
- j -= i;
- xt_entry_foreach(iter0, entry0, newinfo->size) {
- if (skip-- > 0)
- continue;
- if (j-- == 0)
- break;
- compat_release_entry(iter0);
- }
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- if (i-- == 0)
- break;
- cleanup_entry(iter1, net);
- }
- xt_free_table_info(newinfo);
- return ret;
+ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+ repl.hook_entry[i] = newinfo->hook_entry[i];
+ repl.underflow[i] = newinfo->underflow[i];
}
- /* And one copy for every other CPU */
- for_each_possible_cpu(i)
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
- memcpy(newinfo->entries[i], entry1, newinfo->size);
+ repl.num_counters = 0;
+ repl.counters = NULL;
+ repl.size = newinfo->size;
+ ret = translate_table(net, newinfo, entry1, &repl);
+ if (ret)
+ goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
@@ -1780,17 +1644,16 @@ translate_compat_table(struct net *net,
free_newinfo:
xt_free_table_info(newinfo);
-out:
- xt_entry_foreach(iter0, entry0, total_size) {
+ return ret;
+out_unlock:
+ xt_compat_flush_offsets(AF_INET);
+ xt_compat_unlock(AF_INET);
+ xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
-out_unlock:
- xt_compat_flush_offsets(AF_INET);
- xt_compat_unlock(AF_INET);
- goto out;
}
static int
@@ -1810,6 +1673,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
@@ -1824,10 +1690,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
goto free_newinfo;
}
- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
- &newinfo, &loc_cpu_entry, tmp.size,
- tmp.num_entries, tmp.hook_entry,
- tmp.underflow);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index e080fbbbc0e5..67d5e86dd84c 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -195,11 +195,12 @@ get_entry(const void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
+static inline bool unconditional(const struct ip6t_entry *e)
{
static const struct ip6t_ip6 uncond;
- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
+ return e->target_offset == sizeof(struct ip6t_entry) &&
+ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
}
static inline const struct xt_entry_target *
@@ -255,11 +256,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
} else if (s == e) {
(*rulenum)++;
- if (s->target_offset == sizeof(struct ip6t_entry) &&
+ if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
- t->verdict < 0 &&
- unconditional(&s->ipv6)) {
+ t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP6_TRACE_COMMENT_POLICY]
@@ -449,6 +449,19 @@ ip6t_do_table(struct sk_buff *skb,
#endif
}
+static bool find_jump_target(const struct xt_table_info *t,
+ const void *entry0,
+ const struct ip6t_entry *target)
+{
+ struct ip6t_entry *iter;
+
+ xt_entry_foreach(iter, entry0, t->size) {
+ if (iter == target)
+ return true;
+ }
+ return false;
+}
+
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
@@ -482,11 +495,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
- if ((e->target_offset == sizeof(struct ip6t_entry) &&
+ if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
- t->verdict < 0 &&
- unconditional(&e->ipv6)) || visited) {
+ t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
@@ -527,6 +539,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
size = e->next_offset;
e = (struct ip6t_entry *)
(entry0 + pos + size);
+ if (pos + size >= newinfo->size)
+ return 0;
e->counters.pcnt = pos;
pos += size;
} else {
@@ -545,9 +559,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
+ e = (struct ip6t_entry *)
+ (entry0 + newpos);
+ if (!find_jump_target(newinfo, entry0, e))
+ return 0;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
+ if (newpos >= newinfo->size)
+ return 0;
}
e = (struct ip6t_entry *)
(entry0 + newpos);
@@ -574,27 +594,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
module_put(par.match->me);
}
-static int
-check_entry(const struct ip6t_entry *e, const char *name)
-{
- const struct xt_entry_target *t;
-
- if (!ip6_checkentry(&e->ipv6)) {
- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
- return -EINVAL;
- }
-
- if (e->target_offset + sizeof(struct xt_entry_target) >
- e->next_offset)
- return -EINVAL;
-
- t = ip6t_get_target_c(e);
- if (e->target_offset + t->u.target_size > e->next_offset)
- return -EINVAL;
-
- return 0;
-}
-
static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
const struct ip6t_ip6 *ipv6 = par->entryinfo;
@@ -673,10 +672,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
- ret = check_entry(e, name);
- if (ret)
- return ret;
-
j = 0;
mtpar.net = net;
mtpar.table = name;
@@ -720,7 +715,7 @@ static bool check_underflow(const struct ip6t_entry *e)
const struct xt_entry_target *t;
unsigned int verdict;
- if (!unconditional(&e->ipv6))
+ if (!unconditional(e))
return false;
t = ip6t_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -740,9 +735,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
+ int err;
if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
@@ -754,6 +751,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
return -EINVAL;
}
+ if (!ip6_checkentry(&e->ipv6))
+ return -EINVAL;
+
+ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+ e->next_offset);
+ if (err)
+ return err;
+
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@@ -762,9 +767,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
@@ -1273,6 +1278,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
@@ -1314,56 +1322,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
unsigned int i, curcpu;
struct xt_counters_info tmp;
struct xt_counters *paddc;
- unsigned int num_counters;
- char *name;
- int size;
- void *ptmp;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
const void *loc_cpu_entry;
struct ip6t_entry *iter;
unsigned int addend;
-#ifdef CONFIG_COMPAT
- struct compat_xt_counters_info compat_tmp;
-
- if (compat) {
- ptmp = &compat_tmp;
- size = sizeof(struct compat_xt_counters_info);
- } else
-#endif
- {
- ptmp = &tmp;
- size = sizeof(struct xt_counters_info);
- }
-
- if (copy_from_user(ptmp, user, size) != 0)
- return -EFAULT;
-
-#ifdef CONFIG_COMPAT
- if (compat) {
- num_counters = compat_tmp.num_counters;
- name = compat_tmp.name;
- } else
-#endif
- {
- num_counters = tmp.num_counters;
- name = tmp.name;
- }
-
- if (len != size + num_counters * sizeof(struct xt_counters))
- return -EINVAL;
-
- paddc = vmalloc(len - size);
- if (!paddc)
- return -ENOMEM;
- if (copy_from_user(paddc, user + size, len - size) != 0) {
- ret = -EFAULT;
- goto free;
- }
-
- t = xt_find_table_lock(net, AF_INET6, name);
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+ if (IS_ERR(paddc))
+ return PTR_ERR(paddc);
+ t = xt_find_table_lock(net, AF_INET6, tmp.name);
if (IS_ERR_OR_NULL(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
@@ -1372,7 +1341,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
local_bh_disable();
private = t->private;
- if (private->number != num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
@@ -1452,7 +1421,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
static int
compat_find_calc_match(struct xt_entry_match *m,
- const char *name,
const struct ip6t_ip6 *ipv6,
unsigned int hookmask,
int *size)
@@ -1488,21 +1456,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
- const unsigned char *limit,
- const unsigned int *hook_entries,
- const unsigned int *underflows,
- const char *name)
+ const unsigned char *limit)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
- int ret, off, h;
+ int ret, off;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
+ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
+ (unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
@@ -1514,8 +1480,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
return -EINVAL;
}
- /* For purposes of check_entry casting the compat entry is fine */
- ret = check_entry((struct ip6t_entry *)e, name);
+ if (!ip6_checkentry(&e->ipv6))
+ return -EINVAL;
+
+ ret = xt_compat_check_entry_offsets(e, e->elems,
+ e->target_offset, e->next_offset);
if (ret)
return ret;
@@ -1523,8 +1492,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
- ret = compat_find_calc_match(ematch, name,
- &e->ipv6, e->comefrom, &off);
+ ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
+ &off);
if (ret != 0)
goto release_matches;
++j;
@@ -1547,17 +1516,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
if (ret)
goto out;
- /* Check hooks & underflows */
- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
- if ((unsigned char *)e - base == hook_entries[h])
- newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
- newinfo->underflow[h] = underflows[h];
- }
-
- /* Clear counters and comefrom */
- memset(&e->counters, 0, sizeof(e->counters));
- e->comefrom = 0;
return 0;
out:
@@ -1571,18 +1529,17 @@ release_matches:
return ret;
}
-static int
+static void
compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
- unsigned int *size, const char *name,
+ unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct ip6t_entry *de;
unsigned int origsize;
- int ret, h;
+ int h;
struct xt_entry_match *ematch;
- ret = 0;
origsize = *size;
de = (struct ip6t_entry *)*dstptr;
memcpy(de, e, sizeof(struct ip6t_entry));
@@ -1591,11 +1548,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
*dstptr += sizeof(struct ip6t_entry);
*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
- xt_ematch_foreach(ematch, e) {
- ret = xt_compat_match_from_user(ematch, dstptr, size);
- if (ret != 0)
- return ret;
- }
+ xt_ematch_foreach(ematch, e)
+ xt_compat_match_from_user(ematch, dstptr, size);
+
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ip6t_get_target(e);
xt_compat_target_from_user(t, dstptr, size);
@@ -1607,181 +1562,82 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
- return ret;
-}
-
-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
- const char *name)
-{
- unsigned int j;
- int ret = 0;
- struct xt_mtchk_param mtpar;
- struct xt_entry_match *ematch;
-
- j = 0;
- mtpar.net = net;
- mtpar.table = name;
- mtpar.entryinfo = &e->ipv6;
- mtpar.hook_mask = e->comefrom;
- mtpar.family = NFPROTO_IPV6;
- xt_ematch_foreach(ematch, e) {
- ret = check_match(ematch, &mtpar);
- if (ret != 0)
- goto cleanup_matches;
- ++j;
- }
-
- ret = check_target(e, net, name);
- if (ret)
- goto cleanup_matches;
- return 0;
-
- cleanup_matches:
- xt_ematch_foreach(ematch, e) {
- if (j-- == 0)
- break;
- cleanup_match(ematch, net);
- }
- return ret;
}
static int
translate_compat_table(struct net *net,
- const char *name,
- unsigned int valid_hooks,
struct xt_table_info **pinfo,
void **pentry0,
- unsigned int total_size,
- unsigned int number,
- unsigned int *hook_entries,
- unsigned int *underflows)
+ const struct compat_ip6t_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_ip6t_entry *iter0;
- struct ip6t_entry *iter1;
+ struct ip6t_replace repl;
unsigned int size;
int ret = 0;
info = *pinfo;
entry0 = *pentry0;
- size = total_size;
- info->number = number;
-
- /* Init all hooks to impossible value. */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- info->hook_entry[i] = 0xFFFFFFFF;
- info->underflow[i] = 0xFFFFFFFF;
- }
+ size = compatr->size;
+ info->number = compatr->num_entries;
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET6);
- xt_compat_init_offsets(AF_INET6, number);
+ xt_compat_init_offsets(AF_INET6, compatr->num_entries);
/* Walk through entries, checking offsets. */
- xt_entry_foreach(iter0, entry0, total_size) {
+ xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
- entry0 + total_size,
- hook_entries,
- underflows,
- name);
+ entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
- if (j != number) {
+ if (j != compatr->num_entries) {
duprintf("translate_compat_table: %u not %u entries\n",
- j, number);
+ j, compatr->num_entries);
goto out_unlock;
}
- /* Check hooks all assigned */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- /* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
- continue;
- if (info->hook_entry[i] == 0xFFFFFFFF) {
- duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
- goto out_unlock;
- }
- if (info->underflow[i] == 0xFFFFFFFF) {
- duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
- goto out_unlock;
- }
- }
-
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
- newinfo->number = number;
+ newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- newinfo->hook_entry[i] = info->hook_entry[i];
- newinfo->underflow[i] = info->underflow[i];
+ newinfo->hook_entry[i] = compatr->hook_entry[i];
+ newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
- size = total_size;
- xt_entry_foreach(iter0, entry0, total_size) {
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
- name, newinfo, entry1);
- if (ret != 0)
- break;
- }
+ size = compatr->size;
+ xt_entry_foreach(iter0, entry0, compatr->size)
+ compat_copy_entry_from_user(iter0, &pos, &size,
+ newinfo, entry1);
+
+ /* all module references in entry0 are now gone. */
xt_compat_flush_offsets(AF_INET6);
xt_compat_unlock(AF_INET6);
- if (ret)
- goto free_newinfo;
- ret = -ELOOP;
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
- goto free_newinfo;
+ memcpy(&repl, compatr, sizeof(*compatr));
- i = 0;
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- ret = compat_check_entry(iter1, net, name);
- if (ret != 0)
- break;
- ++i;
- if (strcmp(ip6t_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
- }
- if (ret) {
- /*
- * The first i matches need cleanup_entry (calls ->destroy)
- * because they had called ->check already. The other j-i
- * entries need only release.
- */
- int skip = i;
- j -= i;
- xt_entry_foreach(iter0, entry0, newinfo->size) {
- if (skip-- > 0)
- continue;
- if (j-- == 0)
- break;
- compat_release_entry(iter0);
- }
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- if (i-- == 0)
- break;
- cleanup_entry(iter1, net);
- }
- xt_free_table_info(newinfo);
- return ret;
+ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+ repl.hook_entry[i] = newinfo->hook_entry[i];
+ repl.underflow[i] = newinfo->underflow[i];
}
- /* And one copy for every other CPU */
- for_each_possible_cpu(i)
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
- memcpy(newinfo->entries[i], entry1, newinfo->size);
+ repl.num_counters = 0;
+ repl.counters = NULL;
+ repl.size = newinfo->size;
+ ret = translate_table(net, newinfo, entry1, &repl);
+ if (ret)
+ goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
@@ -1790,17 +1646,16 @@ translate_compat_table(struct net *net,
free_newinfo:
xt_free_table_info(newinfo);
-out:
- xt_entry_foreach(iter0, entry0, total_size) {
+ return ret;
+out_unlock:
+ xt_compat_flush_offsets(AF_INET6);
+ xt_compat_unlock(AF_INET6);
+ xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
-out_unlock:
- xt_compat_flush_offsets(AF_INET6);
- xt_compat_unlock(AF_INET6);
- goto out;
}
static int
@@ -1820,6 +1675,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
@@ -1834,10 +1692,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
goto free_newinfo;
}
- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
- &newinfo, &loc_cpu_entry, tmp.size,
- tmp.num_entries, tmp.hook_entry,
- tmp.underflow);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b50ae296537d..038304f796d8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1783,7 +1783,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 227aa11e8409..1393af786e15 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -435,6 +435,47 @@ int xt_check_match(struct xt_mtchk_param *par,
}
EXPORT_SYMBOL_GPL(xt_check_match);
+/** xt_check_entry_match - check that matches end before start of target
+ *
+ * @match: beginning of xt_entry_match
+ * @target: beginning of this rules target (alleged end of matches)
+ * @alignment: alignment requirement of match structures
+ *
+ * Validates that all matches add up to the beginning of the target,
+ * and that each match covers at least the base structure size.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static int xt_check_entry_match(const char *match, const char *target,
+ const size_t alignment)
+{
+ const struct xt_entry_match *pos;
+ int length = target - match;
+
+ if (length == 0) /* no matches */
+ return 0;
+
+ pos = (struct xt_entry_match *)match;
+ do {
+ if ((unsigned long)pos % alignment)
+ return -EINVAL;
+
+ if (length < (int)sizeof(struct xt_entry_match))
+ return -EINVAL;
+
+ if (pos->u.match_size < sizeof(struct xt_entry_match))
+ return -EINVAL;
+
+ if (pos->u.match_size > length)
+ return -EINVAL;
+
+ length -= pos->u.match_size;
+ pos = ((void *)((char *)(pos) + (pos)->u.match_size));
+ } while (length > 0);
+
+ return 0;
+}
+
#ifdef CONFIG_COMPAT
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{
@@ -504,13 +545,14 @@ int xt_compat_match_offset(const struct xt_match *match)
}
EXPORT_SYMBOL_GPL(xt_compat_match_offset);
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
- unsigned int *size)
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
int pad, off = xt_compat_match_offset(match);
u_int16_t msize = cm->u.user.match_size;
+ char name[sizeof(m->u.user.name)];
m = *dstptr;
memcpy(m, cm, sizeof(*cm));
@@ -524,10 +566,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
msize += off;
m->u.user.match_size = msize;
+ strlcpy(name, match->name, sizeof(name));
+ module_put(match->me);
+ strncpy(m->u.user.name, name, sizeof(m->u.user.name));
*size += off;
*dstptr += msize;
- return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
@@ -558,8 +602,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
+
+/* non-compat version may have padding after verdict */
+struct compat_xt_standard_target {
+ struct compat_xt_entry_target t;
+ compat_uint_t verdict;
+};
+
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset)
+{
+ long size_of_base_struct = elems - (const char *)base;
+ const struct compat_xt_entry_target *t;
+ const char *e = base;
+
+ if (target_offset < size_of_base_struct)
+ return -EINVAL;
+
+ if (target_offset + sizeof(*t) > next_offset)
+ return -EINVAL;
+
+ t = (void *)(e + target_offset);
+ if (t->u.target_size < sizeof(*t))
+ return -EINVAL;
+
+ if (target_offset + t->u.target_size > next_offset)
+ return -EINVAL;
+
+ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
+ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
+ return -EINVAL;
+
+ /* compat_xt_entry match has less strict aligment requirements,
+ * otherwise they are identical. In case of padding differences
+ * we need to add compat version of xt_check_entry_match.
+ */
+ BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
+
+ return xt_check_entry_match(elems, base + target_offset,
+ __alignof__(struct compat_xt_entry_match));
+}
+EXPORT_SYMBOL(xt_compat_check_entry_offsets);
#endif /* CONFIG_COMPAT */
+/**
+ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
+ *
+ * @base: pointer to arp/ip/ip6t_entry
+ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
+ * @target_offset: the arp/ip/ip6_t->target_offset
+ * @next_offset: the arp/ip/ip6_t->next_offset
+ *
+ * validates that target_offset and next_offset are sane and that all
+ * match sizes (if any) align with the target offset.
+ *
+ * This function does not validate the targets or matches themselves, it
+ * only tests that all the offsets and sizes are correct, that all
+ * match structures are aligned, and that the last structure ends where
+ * the target structure begins.
+ *
+ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
+ *
+ * The arp/ip/ip6t_entry structure @base must have passed following tests:
+ * - it must point to a valid memory location
+ * - base to base + next_offset must be accessible, i.e. not exceed allocated
+ * length.
+ *
+ * A well-formed entry looks like this:
+ *
+ * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
+ * e->elems[]-----' | |
+ * matchsize | |
+ * matchsize | |
+ * | |
+ * target_offset---------------------------------' |
+ * next_offset---------------------------------------------------'
+ *
+ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
+ * This is where matches (if any) and the target reside.
+ * target_offset: beginning of target.
+ * next_offset: start of the next rule; also: size of this rule.
+ * Since targets have a minimum size, target_offset + minlen <= next_offset.
+ *
+ * Every match stores its size, sum of sizes must not exceed target_offset.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int xt_check_entry_offsets(const void *base,
+ const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset)
+{
+ long size_of_base_struct = elems - (const char *)base;
+ const struct xt_entry_target *t;
+ const char *e = base;
+
+ /* target start is within the ip/ip6/arpt_entry struct */
+ if (target_offset < size_of_base_struct)
+ return -EINVAL;
+
+ if (target_offset + sizeof(*t) > next_offset)
+ return -EINVAL;
+
+ t = (void *)(e + target_offset);
+ if (t->u.target_size < sizeof(*t))
+ return -EINVAL;
+
+ if (target_offset + t->u.target_size > next_offset)
+ return -EINVAL;
+
+ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
+ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
+ return -EINVAL;
+
+ return xt_check_entry_match(elems, base + target_offset,
+ __alignof__(struct xt_entry_match));
+}
+EXPORT_SYMBOL(xt_check_entry_offsets);
+
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
@@ -610,6 +771,80 @@ int xt_check_target(struct xt_tgchk_param *par,
}
EXPORT_SYMBOL_GPL(xt_check_target);
+/**
+ * xt_copy_counters_from_user - copy counters and metadata from userspace
+ *
+ * @user: src pointer to userspace memory
+ * @len: alleged size of userspace memory
+ * @info: where to store the xt_counters_info metadata
+ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
+ *
+ * Copies counter meta data from @user and stores it in @info.
+ *
+ * vmallocs memory to hold the counters, then copies the counter data
+ * from @user to the new memory and returns a pointer to it.
+ *
+ * If @compat is true, @info gets converted automatically to the 64bit
+ * representation.
+ *
+ * The metadata associated with the counters is stored in @info.
+ *
+ * Return: returns pointer that caller has to test via IS_ERR().
+ * If IS_ERR is false, caller has to vfree the pointer.
+ */
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+ struct xt_counters_info *info, bool compat)
+{
+ void *mem;
+ u64 size;
+
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ /* structures only differ in size due to alignment */
+ struct compat_xt_counters_info compat_tmp;
+
+ if (len <= sizeof(compat_tmp))
+ return ERR_PTR(-EINVAL);
+
+ len -= sizeof(compat_tmp);
+ if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
+ return ERR_PTR(-EFAULT);
+
+ strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+ info->num_counters = compat_tmp.num_counters;
+ user += sizeof(compat_tmp);
+ } else
+#endif
+ {
+ if (len <= sizeof(*info))
+ return ERR_PTR(-EINVAL);
+
+ len -= sizeof(*info);
+ if (copy_from_user(info, user, sizeof(*info)) != 0)
+ return ERR_PTR(-EFAULT);
+
+ info->name[sizeof(info->name) - 1] = '\0';
+ user += sizeof(*info);
+ }
+
+ size = sizeof(struct xt_counters);
+ size *= info->num_counters;
+
+ if (size != (u64)len)
+ return ERR_PTR(-EINVAL);
+
+ mem = vmalloc(len);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(mem, user, len) == 0)
+ return mem;
+
+ vfree(mem);
+ return ERR_PTR(-EFAULT);
+}
+EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
+
#ifdef CONFIG_COMPAT
int xt_compat_target_offset(const struct xt_target *target)
{
@@ -625,6 +860,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
int pad, off = xt_compat_target_offset(target);
u_int16_t tsize = ct->u.user.target_size;
+ char name[sizeof(t->u.user.name)];
t = *dstptr;
memcpy(t, ct, sizeof(*ct));
@@ -638,6 +874,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
tsize += off;
t->u.user.target_size = tsize;
+ strlcpy(name, target->name, sizeof(name));
+ module_put(target->me);
+ strncpy(t->u.user.name, name, sizeof(t->u.user.name));
*size += off;
*dstptr += tsize;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index fd9373c9f057..d6231f219edc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2651,6 +2651,7 @@ static int netlink_dump(struct sock *sk)
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
+ struct module *module;
int len, err = -ENOBUFS;
int alloc_size;
@@ -2700,9 +2701,11 @@ static int netlink_dump(struct sock *sk)
cb->done(cb);
nlk->cb_running = false;
+ module = cb->module;
+ skb = cb->skb;
mutex_unlock(nlk->cb_mutex);
- module_put(cb->module);
- consume_skb(cb->skb);
+ module_put(module);
+ consume_skb(skb);
return 0;
errout_skb:
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 87dd619fb2e9..1c9a505b7019 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -954,8 +954,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
- if (dev->netdev_ops->ndo_do_ioctl)
- return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ if (dev->netdev_ops->ndo_do_ioctl) {
+#ifdef CONFIG_COMPAT
+ if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+ int ret = 0;
+ struct iwreq iwr_lcl;
+ struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+
+ memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+ iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+ iwr_lcl.u.data.length = iwp_compat->length;
+ iwr_lcl.u.data.flags = iwp_compat->flags;
+
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+
+ iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+ iwp_compat->length = iwr_lcl.u.data.length;
+ iwp_compat->flags = iwr_lcl.u.data.flags;
+
+ return ret;
+ } else
+#endif
+ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ }
return -EOPNOTSUPP;
}
Powered by blists - more mailing lists