[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20161126161210.GB6839@kroah.com>
Date: Sat, 26 Nov 2016 17:12:10 +0100
From: Greg KH <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, Jiri Slaby <jslaby@...e.cz>
Subject: Re: Linux 4.4.35
diff --git a/Makefile b/Makefile
index 30924aabf1b4..f88830af1533 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 34
+SUBLEVEL = 35
EXTRAVERSION =
NAME = Blurry Fish Butt
@@ -395,11 +395,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -std=gnu89
+ -std=gnu89 $(call cc-option,-fno-PIE)
+
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS := -D__ASSEMBLY__
+KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 6cb5834062a3..e2defc7593a4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -352,7 +352,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
unsigned bits;
int cpu = smp_processor_id();
- unsigned int socket_id, core_complex_id;
bits = c->x86_coreid_bits;
/* Low order bits define the core id (index of core in socket) */
@@ -370,10 +369,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
return;
- socket_id = (c->apicid >> bits) - 1;
- core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
-
- per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
+ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
#endif
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 685ef431a41d..7429d481a311 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -199,7 +199,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
+ unsigned long flags;
+ /*
+ * Disabling irqs at this point since the following code could be
+ * interrupted and executed through kvm_arch_hardware_disable()
+ */
+ local_irq_save(flags);
+ if (locals->registered) {
+ locals->registered = false;
+ user_return_notifier_unregister(urn);
+ }
+ local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
@@ -207,8 +218,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
values->curr = values->host;
}
}
- locals->registered = false;
- user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
@@ -3317,6 +3326,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
+ int idx;
r = -EINVAL;
if (!lapic_in_kernel(vcpu))
@@ -3324,7 +3334,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_X86_SETUP_MCE: {
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 2c835e356349..d445c5f1aeb1 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -12,6 +12,7 @@ targets += purgatory.ro
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
KBUILD_CFLAGS += -m$(BITS)
+KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ae60e6357d7b..e9b713675c7c 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1022,6 +1022,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1033,8 +1035,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -1169,6 +1169,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1180,8 +1182,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 251533d87c65..f261b1d292c7 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 64eaf4141c69..427f4bb08665 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index 13d6173326a4..cdf5ba566d3b 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apbcp_base = of_iomap(np, 3);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apbcp_base) {
pr_err("failed to map apbcp registers\n");
return;
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b3044219772c..2cde3796cb82 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -4542,6 +4542,15 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /*
+ * Check support for AES modes not available
+ * on LP devices.
+ */
+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
+ continue;
+
t_alg = caam_alg_alloc(alg);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ff5566c69f7d..e8e962f7b5cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -532,6 +532,7 @@ struct amdgpu_bo {
u64 metadata_flags;
void *metadata;
u32 metadata_size;
+ unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f82a2dd83874..3c7a7235988d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -117,7 +117,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
}
entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
+ entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 59f735a933a9..e6a7d30c3747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -77,20 +77,36 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
list_add_tail(&bo->list, &adev->gem.objects);
mutex_unlock(&adev->gem.mutex);
+ bo->prime_shared_count = 1;
return &bo->gem_base;
}
int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int ret = 0;
+ long ret = 0;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
+ /*
+ * Wait for all shared fences to complete before we switch to future
+ * use of exclusive fence on this prime shared bo.
+ */
+ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(ret < 0)) {
+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
+ amdgpu_bo_unreserve(bo);
+ return ret;
+ }
+
/* pin buffer into GTT */
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
amdgpu_bo_unreserve(bo);
return ret;
}
@@ -105,6 +121,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
return;
amdgpu_bo_unpin(bo);
+ if (bo->prime_shared_count)
+ bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
}
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 78fbee463628..65dbde778181 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -59,7 +59,6 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
- depends on HAS_IOMEM
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index f06b0e24673b..af2a63cb4056 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
config I2C_MUX_REG
tristate "Register-based I2C multiplexer"
+ depends on HAS_IOMEM
help
If you say yes to this option, support will be included for a
register based I2C multiplexer. This driver provides access to
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 4d8e7f18a9af..941cd9b83941 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -80,6 +80,8 @@ static struct ib_cm {
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
+ /* Sync on cm change port state */
+ spinlock_t state_lock;
} cm;
/* Counter indexes ordered by attribute ID */
@@ -161,6 +163,8 @@ struct cm_port {
struct ib_mad_agent *mad_agent;
struct kobject port_obj;
u8 port_num;
+ struct list_head cm_priv_prim_list;
+ struct list_head cm_priv_altr_list;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};
@@ -241,6 +245,12 @@ struct cm_id_private {
u8 service_timeout;
u8 target_ack_delay;
+ struct list_head prim_list;
+ struct list_head altr_list;
+ /* Indicates that the send port mad is registered and av is set */
+ int prim_send_port_not_ready;
+ int altr_send_port_not_ready;
+
struct list_head work_list;
atomic_t work_count;
};
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
+ struct cm_av *av;
+ unsigned long flags, flags2;
+ int ret = 0;
+ /* don't let the port to be released till the agent is down */
+ spin_lock_irqsave(&cm.state_lock, flags2);
+ spin_lock_irqsave(&cm.lock, flags);
+ if (!cm_id_priv->prim_send_port_not_ready)
+ av = &cm_id_priv->av;
+ else if (!cm_id_priv->altr_send_port_not_ready &&
+ (cm_id_priv->alt_av.port))
+ av = &cm_id_priv->alt_av;
+ else {
+ pr_info("%s: not valid CM id\n", __func__);
+ ret = -ENODEV;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&cm.lock, flags);
+ /* Make sure the port haven't released the mad yet */
mad_agent = cm_id_priv->av.port->mad_agent;
- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
- if (IS_ERR(ah))
- return PTR_ERR(ah);
+ if (!mad_agent) {
+ pr_info("%s: not a valid MAD agent\n", __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
+ if (IS_ERR(ah)) {
+ ret = PTR_ERR(ah);
+ goto out;
+ }
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
- cm_id_priv->av.pkey_index,
+ av->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
- return PTR_ERR(m);
+ ret = PTR_ERR(m);
+ goto out;
}
/* Timeout set by caller if response is expected. */
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
atomic_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
- return 0;
+
+out:
+ spin_unlock_irqrestore(&cm.state_lock, flags2);
+ return ret;
}
static int cm_alloc_response_msg(struct cm_port *port,
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
+ struct cm_id_private *cm_id_priv)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
- return 0;
+ spin_lock_irqsave(&cm.lock, flags);
+ if (&cm_id_priv->av == av)
+ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
+ else if (&cm_id_priv->alt_av == av)
+ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
+ else
+ ret = -EINVAL;
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+
+ return ret;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
+ INIT_LIST_HEAD(&cm_id_priv->prim_list);
+ INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
@@ -892,6 +945,15 @@ retest:
break;
}
+ spin_lock_irq(&cm.lock);
+ if (!list_empty(&cm_id_priv->altr_list) &&
+ (!cm_id_priv->altr_send_port_not_ready))
+ list_del(&cm_id_priv->altr_list);
+ if (!list_empty(&cm_id_priv->prim_list) &&
+ (!cm_id_priv->prim_send_port_not_ready))
+ list_del(&cm_id_priv->prim_list);
+ spin_unlock_irq(&cm.lock);
+
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
+ cm_id_priv);
if (ret)
goto error1;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path,
- &cm_id_priv->alt_av);
+ &cm_id_priv->alt_av, cm_id_priv);
if (ret)
goto error1;
}
@@ -1639,7 +1702,8 @@ static int cm_req_handler(struct cm_work *work)
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
if (ret) {
ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num, 0, &work->path[0].sgid,
@@ -1650,7 +1714,8 @@ static int cm_req_handler(struct cm_work *work)
goto rejected;
}
if (req_msg->alt_local_lid) {
- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
@@ -2705,7 +2770,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret)
goto out;
cm_id_priv->alt_av.timeout =
@@ -2817,7 +2883,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3009,7 +3076,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
if (ret)
goto out;
@@ -3446,7 +3513,9 @@ out:
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
+ struct cm_av tmp_av;
unsigned long flags;
+ int tmp_send_port_not_ready;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3455,7 +3524,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
+ /* Swap address vector */
+ tmp_av = cm_id_priv->av;
cm_id_priv->av = cm_id_priv->alt_av;
+ cm_id_priv->alt_av = tmp_av;
+ /* Swap port send ready state */
+ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
+ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
+ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3875,6 +3951,9 @@ static void cm_add_one(struct ib_device *ib_device)
port->cm_dev = cm_dev;
port->port_num = i;
+ INIT_LIST_HEAD(&port->cm_priv_prim_list);
+ INIT_LIST_HEAD(&port->cm_priv_altr_list);
+
ret = cm_create_port_fs(port);
if (ret)
goto error1;
@@ -3932,6 +4011,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
struct cm_device *cm_dev = client_data;
struct cm_port *port;
+ struct cm_id_private *cm_id_priv;
+ struct ib_mad_agent *cur_mad_agent;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
@@ -3955,15 +4036,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+ /* Mark all the cm_id's as not valid */
+ spin_lock_irq(&cm.lock);
+ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
+ cm_id_priv->altr_send_port_not_ready = 1;
+ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
+ cm_id_priv->prim_send_port_not_ready = 1;
+ spin_unlock_irq(&cm.lock);
/*
* We flush the queue here after the going_down set, this
* verify that no new works will be queued in the recv handler,
* after that we can call the unregister_mad_agent
*/
flush_workqueue(cm.wq);
- ib_unregister_mad_agent(port->mad_agent);
+ spin_lock_irq(&cm.state_lock);
+ cur_mad_agent = port->mad_agent;
+ port->mad_agent = NULL;
+ spin_unlock_irq(&cm.state_lock);
+ ib_unregister_mad_agent(cur_mad_agent);
cm_remove_port_fs(port);
}
+
device_unregister(cm_dev->device);
kfree(cm_dev);
}
@@ -3976,6 +4069,7 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
+ spin_lock_init(&cm.state_lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 38acb3cfc545..04f3c0db9126 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK;
- if (npages == 0) {
+ if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d625c82d6c82..7becef27cbbe 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -244,12 +244,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- if (qp != qp->real_qp) {
- ib_close_qp(qp);
- } else {
+ if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
- }
+ ib_destroy_qp(qp);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 06da56bda201..c007c766c61e 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ah->av.eth.gid_index = ret;
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
if (ah_attr->static_rate) {
ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index b88fc8f5ab18..57e1a08925d9 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
- goto err_dbmap;
+ goto err_cq_free;
}
return &cq->ibcq;
+err_cq_free:
+ mlx4_cq_free(dev->dev, &cq->mcq);
+
err_dbmap:
if (context)
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 8184267c7901..02c8deab1fff 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -787,8 +787,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_create;
} else {
- /* for now choose 64 bytes till we have a proper interface */
- cqe_size = 64;
+ cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index bfc940ff9c8a..2a1fdcaa3044 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -947,13 +947,13 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
{
struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
struct ib_event ibev;
-
+ bool fatal = false;
u8 port = 0;
switch (event) {
case MLX5_DEV_EVENT_SYS_ERROR:
- ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
+ fatal = true;
break;
case MLX5_DEV_EVENT_PORT_UP:
@@ -998,6 +998,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
+
+ if (fatal)
+ ibdev->ib_active = false;
}
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 88e80ec772f6..fe89e5e337d5 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -494,9 +494,6 @@ int intel_lpss_suspend(struct device *dev)
for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
- /* Put the device into reset state */
- writel(0, lpss->priv + LPSS_PRIV_RESETS);
-
return 0;
}
EXPORT_SYMBOL_GPL(intel_lpss_suspend);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 60b60dc63ddd..022c9374ce8b 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -354,6 +354,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
clones[i]);
}
+ put_device(dev);
+
return 0;
}
EXPORT_SYMBOL(mfd_clone_cell);
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index d58c094f2f04..f7e6a09926dd 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -475,48 +475,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#ifdef CONFIG_ACPI
-#define SPL_METHOD "SPLC"
-#define SPL_DOMAINTYPE_MODULE BIT(0)
-#define SPL_DOMAINTYPE_WIFI BIT(1)
-#define SPL_DOMAINTYPE_WIGIG BIT(2)
-#define SPL_DOMAINTYPE_RFEM BIT(3)
+#define ACPI_SPLC_METHOD "SPLC"
+#define ACPI_SPLC_DOMAIN_WIFI (0x07)
-static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
{
- union acpi_object *limits, *domain_type, *power_limit;
-
- if (splx->type != ACPI_TYPE_PACKAGE ||
- splx->package.count != 2 ||
- splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
- splx->package.elements[0].integer.value != 0) {
- IWL_ERR(trans, "Unsupported splx structure\n");
+ union acpi_object *data_pkg, *dflt_pwr_limit;
+ int i;
+
+ /* We need at least two elements, one for the revision and one
+ * for the data itself. Also check that the revision is
+ * supported (currently only revision 0).
+ */
+ if (splc->type != ACPI_TYPE_PACKAGE ||
+ splc->package.count < 2 ||
+ splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splc->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_INFO(trans,
+ "Unsupported structure returned by the SPLC method. Ignoring.\n");
return 0;
}
- limits = &splx->package.elements[1];
- if (limits->type != ACPI_TYPE_PACKAGE ||
- limits->package.count < 2 ||
- limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
- limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
- IWL_ERR(trans, "Invalid limits element\n");
- return 0;
+ /* loop through all the packages to find the one for WiFi */
+ for (i = 1; i < splc->package.count; i++) {
+ union acpi_object *domain;
+
+ data_pkg = &splc->package.elements[i];
+
+ /* Skip anything that is not a package with the right
+ * amount of elements (i.e. at least 2 integers).
+ */
+ if (data_pkg->type != ACPI_TYPE_PACKAGE ||
+ data_pkg->package.count < 2 ||
+ data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ continue;
+
+ domain = &data_pkg->package.elements[0];
+ if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
+ break;
+
+ data_pkg = NULL;
}
- domain_type = &limits->package.elements[0];
- power_limit = &limits->package.elements[1];
- if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
- IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
+ if (!data_pkg) {
+ IWL_DEBUG_INFO(trans,
+ "No element for the WiFi domain returned by the SPLC method.\n");
return 0;
}
- return power_limit->integer.value;
+ dflt_pwr_limit = &data_pkg->package.elements[1];
+ return dflt_pwr_limit->integer.value;
}
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
{
acpi_handle pxsx_handle;
acpi_handle handle;
- struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
pxsx_handle = ACPI_HANDLE(&pdev->dev);
@@ -527,23 +543,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
}
/* Get the method's handle */
- status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
+ &handle);
if (ACPI_FAILURE(status)) {
- IWL_DEBUG_INFO(trans, "SPL method not found\n");
+ IWL_DEBUG_INFO(trans, "SPLC method not found\n");
return;
}
/* Call SPLC with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ status = acpi_evaluate_object(handle, NULL, NULL, &splc);
if (ACPI_FAILURE(status)) {
IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
return;
}
- trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
trans->dflt_pwr_limit);
- kfree(splx.pointer);
+ kfree(splc.pointer);
}
#else /* CONFIG_ACPI */
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index ec2e9c5fb993..22394fe30579 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -109,6 +109,7 @@
/* OMAP_RTC_OSC_REG bit fields: */
#define OMAP_RTC_OSC_32KCLK_EN BIT(6)
#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
+#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
/* OMAP_RTC_IRQWAKEEN bit fields: */
#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
@@ -646,8 +647,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
*/
if (rtc->has_ext_clk) {
reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
- rtc_write(rtc, OMAP_RTC_OSC_REG,
- reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
+ reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
+ reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
+ rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
}
rtc->type->lock(rtc);
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index d059ad4d0dbd..97ee1b46db69 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
+ put_device(dev);
}
+
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
find_rc_grandpa);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
return rc;
}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
index c1304b8d4985..678e93741ae1 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/uwb/pal.c
@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
+ put_device(dev);
+
return (dev != NULL);
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b7e921d207fb..cd5914495ad7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -221,6 +221,7 @@ struct ext4_io_submit {
#define EXT4_MAX_BLOCK_SIZE 65536
#define EXT4_MIN_BLOCK_LOG_SIZE 10
#define EXT4_MAX_BLOCK_LOG_SIZE 16
+#define EXT4_MAX_CLUSTER_LOG_SIZE 30
#ifdef __KERNEL__
# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5bab28caa9d4..127155b82e6e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3394,7 +3394,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
- "Unsupported filesystem blocksize %d", blocksize);
+ "Unsupported filesystem blocksize %d (%d log_block_size)",
+ blocksize, le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+ if (le32_to_cpu(es->s_log_block_size) >
+ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log block size: %u",
+ le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
@@ -3533,6 +3541,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_cluster_size) >
+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log cluster size: %u",
+ le32_to_cpu(es->s_log_cluster_size));
+ goto failed_mount;
+ }
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 682e79965c16..8821c380a71a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1997,6 +1997,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = page->mapping->host;
+ /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
+ if (!copied)
+ goto unlock;
+
if (!PageUptodate(page)) {
/* Zero any unwritten bytes at the end of the page */
size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
@@ -2007,6 +2011,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
fuse_write_update_size(inode, pos + copied);
set_page_dirty(page);
+
+unlock:
unlock_page(page);
page_cache_release(page);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 084452e34a12..bdff5ed57f10 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -203,8 +203,10 @@ static int __init test_suspend(void)
/* RTCs have initialized by now too ... can we use one? */
dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
- if (dev)
+ if (dev) {
rtc = rtc_class_open(dev_name(dev));
+ put_device(dev);
+ }
if (!rtc) {
printk(warn_no_rtc);
return 0;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6863310d6973..8ef1afacad82 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1500,24 +1500,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
+ int ret = 0;
if (len < sizeof(*addr))
return -EINVAL;
- if (bo->bound)
- return -EISCONN;
+ lock_sock(sk);
+
+ if (bo->bound) {
+ ret = -EISCONN;
+ goto fail;
+ }
/* bind a device to this socket */
if (addr->can_ifindex) {
struct net_device *dev;
dev = dev_get_by_index(&init_net, addr->can_ifindex);
- if (!dev)
- return -ENODEV;
-
+ if (!dev) {
+ ret = -ENODEV;
+ goto fail;
+ }
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
bo->ifindex = dev->ifindex;
@@ -1528,17 +1535,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
bo->ifindex = 0;
}
- bo->bound = 1;
-
if (proc_dir) {
/* unique socket address as filename */
sprintf(bo->procname, "%lu", sock_i_ino(sk));
bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
proc_dir,
&bcm_proc_fops, sk);
+ if (!bo->bcm_proc_read) {
+ ret = -ENOMEM;
+ goto fail;
+ }
}
- return 0;
+ bo->bound = 1;
+
+fail:
+ release_sock(sk);
+
+ return ret;
}
static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 9dec3bd1b63c..0a5df0cbaa28 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -140,7 +140,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+ tb[NFTA_DYNSET_TIMEOUT])));
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@@ -227,7 +228,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout)))
+ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
+ cpu_to_be64(jiffies_to_msecs(priv->timeout))))
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
goto nla_put_failure;
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
index 973e8c141567..17867e723a51 100755
--- a/scripts/gcc-x86_64-has-stack-protector.sh
+++ b/scripts/gcc-x86_64-has-stack-protector.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
if [ "$?" -eq "0" ] ; then
echo y
else
Powered by blists - more mailing lists