[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2025092557-willfully-winner-164b@gregkh>
Date: Thu, 25 Sep 2025 11:42:57 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
akpm@...ux-foundation.org,
torvalds@...ux-foundation.org,
stable@...r.kernel.org
Cc: lwn@....net,
jslaby@...e.cz,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: Linux 6.6.108
diff --git a/Makefile b/Makefile
index 9c9e272f48b8..1e3fb36bb71d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
-SUBLEVEL = 107
+SUBLEVEL = 108
EXTRAVERSION =
NAME = Pinguïn Aangedreven
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 25aa993abebc..9f56c15ed22d 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -503,10 +503,14 @@ config ARCH_STRICT_ALIGN
-mstrict-align build parameter to prevent unaligned accesses.
CPUs with h/w unaligned access support:
- Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
+ Loongson-2K2000/2K3000 and all of Loongson-3 series processors
+ based on LoongArch.
CPUs without h/w unaligned access support:
- Loongson-2K500/2K1000.
+ Loongson-2K0300/2K0500/2K1000.
+
+ If you want to make sure whether to support unaligned memory access
+ on your hardware, please read the bit 20 (UAL) of CPUCFG1 register.
This option is enabled by default to make the kernel be able to run
on all LoongArch systems. But you can disable it manually if you want
diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h
index 52f298f7293b..483c955f2ae5 100644
--- a/arch/loongarch/include/asm/acenv.h
+++ b/arch/loongarch/include/asm/acenv.h
@@ -10,9 +10,8 @@
#ifndef _ASM_LOONGARCH_ACENV_H
#define _ASM_LOONGARCH_ACENV_H
-/*
- * This header is required by ACPI core, but we have nothing to fill in
- * right now. Will be updated later when needed.
- */
+#ifdef CONFIG_ARCH_STRICT_ALIGN
+#define ACPI_MISALIGNMENT_NOT_SUPPORTED
+#endif /* CONFIG_ARCH_STRICT_ALIGN */
#endif /* _ASM_LOONGARCH_ACENV_H */
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 6b3bfb0092e6..2178b35896af 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -72,6 +72,8 @@ static int __init boardinfo_init(void)
struct kobject *loongson_kobj;
loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
+ if (!loongson_kobj)
+ return -ENOMEM;
return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
}
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index 8adca2000e51..d790acfc2c67 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -1229,10 +1229,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
device_set_wakeup_capable(&vu_dev->vdev.dev, true);
rc = register_virtio_device(&vu_dev->vdev);
- if (rc)
+ if (rc) {
put_device(&vu_dev->vdev.dev);
+ return rc;
+ }
vu_dev->registered = 1;
- return rc;
+ return 0;
error_init:
os_close_file(vu_dev->sock);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index abff6d45ae33..29566e457ec4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4029,8 +4029,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;
- if (nested_svm_virtualize_tpr(vcpu) ||
- kvm_vcpu_apicv_active(vcpu))
+ if (nested_svm_virtualize_tpr(vcpu))
return;
cr8 = kvm_get_cr8(vcpu);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index b18f5a71e679..98aa303ad054 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -107,7 +107,7 @@ static inline void pgd_list_del(pgd_t *pgd)
#define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
#define MAX_UNSHARED_PTRS_PER_PGD \
- max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
+ MAX_T(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 68cc9290cabe..886eccb97b04 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -969,6 +969,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
lock_sock(sk);
+ if (ctx->write) {
+ release_sock(sk);
+ return -EBUSY;
+ }
+ ctx->write = true;
+
if (ctx->init && !ctx->more) {
if (ctx->used) {
err = -EINVAL;
@@ -1018,6 +1024,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
continue;
}
+ ctx->merge = 0;
+
if (!af_alg_writable(sk)) {
err = af_alg_wait_for_wmem(sk, msg->msg_flags);
if (err)
@@ -1057,7 +1065,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
ctx->used += plen;
copied += plen;
size -= plen;
- ctx->merge = 0;
} else {
do {
struct page *pg;
@@ -1103,6 +1110,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unlock:
af_alg_data_wakeup(sk);
+ ctx->write = false;
release_sock(sk);
return copied ?: err;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ed004e1610dd..455e2a2b149f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1472,36 +1472,19 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
return error;
}
-static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
- struct block_device *bdev, unsigned long arg)
+static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
int err = 0;
- /*
- * If we don't hold exclusive handle for the device, upgrade to it
- * here to avoid changing device under exclusive owner.
- */
- if (!(mode & BLK_OPEN_EXCL)) {
- err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL);
- if (err)
- return err;
- }
-
- err = mutex_lock_killable(&lo->lo_mutex);
- if (err)
- goto abort_claim;
-
- if (lo->lo_state != Lo_bound) {
- err = -ENXIO;
- goto unlock;
- }
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
err = blk_validate_block_size(arg);
if (err)
return err;
if (lo->lo_queue->limits.logical_block_size == arg)
- goto unlock;
+ return 0;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
@@ -1513,11 +1496,6 @@ static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
-unlock:
- mutex_unlock(&lo->lo_mutex);
-abort_claim:
- if (!(mode & BLK_OPEN_EXCL))
- bd_abort_claiming(bdev, loop_set_block_size);
return err;
}
@@ -1536,6 +1514,9 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
case LOOP_SET_DIRECT_IO:
err = loop_set_dio(lo, arg);
break;
+ case LOOP_SET_BLOCK_SIZE:
+ err = loop_set_block_size(lo, arg);
+ break;
default:
err = -EINVAL;
}
@@ -1590,12 +1571,9 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
- case LOOP_SET_BLOCK_SIZE:
- if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- return loop_set_block_size(lo, mode, bdev, arg);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
+ case LOOP_SET_BLOCK_SIZE:
if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
fallthrough;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 0c779a0326b6..6c3a40e6f4c6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -109,8 +109,8 @@ static const u32 knl_interleave_list[] = {
0x104, 0x10c, 0x114, 0x11c, /* 20-23 */
};
#define MAX_INTERLEAVE \
- (max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \
- max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \
+ (MAX_T(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \
+ MAX_T(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \
ARRAY_SIZE(knl_interleave_list))))
struct interleave_pkg {
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index ddf944651c55..08885a5ba826 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2705,7 +2705,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@@ -2775,8 +2775,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
- if (platform->pdata.intp_irq)
+ if (platform->pdata.intp_irq) {
+ enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
+ }
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 858f5b650849..bdb9fc00c776 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2057,8 +2057,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
- if (!mhdp_state->current_mode)
- return;
+ if (!mhdp_state->current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
drm_mode_set_name(mhdp_state->current_mode);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index d021497841b8..3969dc548cff 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -532,7 +532,7 @@ int drm_plane_create_color_properties(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct drm_property *prop;
- struct drm_prop_enum_list enum_list[max_t(int, DRM_COLOR_ENCODING_MAX,
+ struct drm_prop_enum_list enum_list[MAX_T(int, DRM_COLOR_ENCODING_MAX,
DRM_COLOR_RANGE_MAX)];
int i, len;
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 7dc30c2b56b3..d872054b874f 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -540,6 +540,7 @@ struct amd_irte_ops;
container_of((x), struct amd_io_pgtable, pgtbl_cfg)
struct amd_io_pgtable {
+ seqcount_t seqcount; /* Protects root/mode update */
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable iop;
int mode;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 2892aa1b4dc1..b785d8239998 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/seqlock.h>
#include <asm/barrier.h>
@@ -171,8 +172,11 @@ static bool increase_address_space(struct protection_domain *domain,
*pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
+ write_seqcount_begin(&domain->iop.seqcount);
domain->iop.root = pte;
domain->iop.mode += 1;
+ write_seqcount_end(&domain->iop.seqcount);
+
amd_iommu_update_and_flush_device_table(domain);
amd_iommu_domain_flush_complete(domain);
@@ -199,6 +203,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
gfp_t gfp,
bool *updated)
{
+ unsigned int seqcount;
int level, end_lvl;
u64 *pte, *page;
@@ -214,8 +219,14 @@ static u64 *alloc_pte(struct protection_domain *domain,
}
- level = domain->iop.mode - 1;
- pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&domain->iop.seqcount);
+
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&domain->iop.seqcount, seqcount));
+
+
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -292,6 +303,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long *page_size)
{
int level;
+ unsigned int seqcount;
u64 *pte;
*page_size = 0;
@@ -299,8 +311,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
if (address > PM_LEVEL_SIZE(pgtable->mode))
return NULL;
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -524,6 +540,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;
+ seqcount_init(&pgtable->seqcount);
+
pgtable->iop.ops.map_pages = iommu_v1_map_pages;
pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6a745616d85a..88bccdbb0bed 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2168,6 +2168,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
struct dma_pte *pte = NULL;
unsigned long i;
+ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
+ !IS_ALIGNED(end_pfn + 1, lvl_pages)))
+ return;
+
while (start_pfn <= end_pfn) {
if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level,
@@ -2241,7 +2245,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long pages_to_remove;
pteval |= DMA_PTE_LARGE_PAGE;
- pages_to_remove = min_t(unsigned long, nr_pages,
+ pages_to_remove = min_t(unsigned long,
+ round_down(nr_pages, lvl_pages),
nr_pte_to_next_page(pte) * lvl_pages);
end_pfn = iov_pfn + pages_to_remove - 1;
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1e27a5bce2d9..0bd76f8d4dc6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1794,7 +1794,7 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
unsigned int sectors_to_process;
@@ -2073,7 +2073,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -2638,7 +2638,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
unlikely(from_replay) &&
#endif
ic->internal_hash) {
- char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char test_tag[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index ca01b7d204ba..955df7f370a7 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index cd5691ed9f17..f7ed129fc811 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2042,6 +2042,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
} else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ bond_has_slaves(bond) &&
memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
/* Set slave to random address to avoid duplicate mac
* address in later fail over.
@@ -3260,7 +3261,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
/* Find out through which dev should the packet go */
memset(&fl6, 0, sizeof(struct flowi6));
fl6.daddr = targets[i];
- fl6.flowi6_oif = bond->dev->ifindex;
dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
if (dst->error) {
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 7926aaef8f0c..ad2745c07c1a 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4220,8 +4220,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
- cancel_delayed_work(&cp->delete_task);
- flush_workqueue(cnic_wq);
+ cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index de8a6ce86ad7..12105ffb5dac 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 76795bb0b564..cdab37e9634d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -2700,7 +2700,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
- ethsw->bpid = dpbp_attrs.id;
+ ethsw->bpid = dpbp_attrs.bpid;
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index c962987d8b51..6a9b47b005d2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -950,9 +950,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (!eop_desc)
break;
- /* prevent any other reads prior to eop_desc */
- smp_rmb();
-
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 3a72b0793d4a..82725923555c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -476,7 +476,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d378aa55f22f..09ba60b2e744 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -109,8 +109,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
- NULL, NULL, NULL);
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 851c499faa79..656a7b65f4c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1448,12 +1448,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = mlx5_uplink_netdev_get(dev);
+ if (!netdev)
+ return 0;
+ priv = netdev_priv(netdev);
rpriv->netdev = priv->netdev;
- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ mlx5_uplink_netdev_put(dev, netdev);
+ return err;
}
static void
@@ -1565,8 +1574,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+ void *ppriv;
+
+ if (!netdev) {
+ ppriv = rpriv;
+ goto free_ppriv;
+ }
+
+ priv = netdev_priv(netdev);
+ ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
mlx5e_vport_uplink_rep_unload(rpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index cc0f2be21a26..05fbd2098b26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "eswitch.h"
+#include "lib/mlx5.h"
#include "esw/qos.h"
#include "en/port.h"
#define CREATE_TRACE_POINTS
@@ -712,6 +713,71 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
return err;
}
+static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+{
+ struct ethtool_link_ksettings lksettings;
+ struct net_device *slave, *master;
+ u32 speed = SPEED_UNKNOWN;
+
+ /* Lock ensures a stable reference to master and slave netdevice
+ * while port speed of master is queried.
+ */
+ ASSERT_RTNL();
+
+ slave = mlx5_uplink_netdev_get(mdev);
+ if (!slave)
+ goto out;
+
+ master = netdev_master_upper_dev_get(slave);
+ if (master && !__ethtool_get_link_ksettings(master, &lksettings))
+ speed = lksettings.base.speed;
+
+out:
+ mlx5_uplink_netdev_put(mdev, slave);
+ return speed;
+}
+
+static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
+ bool hold_rtnl_lock, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!mlx5_lag_is_active(mdev))
+ goto skip_lag;
+
+ if (hold_rtnl_lock)
+ rtnl_lock();
+
+ *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
+
+ if (hold_rtnl_lock)
+ rtnl_unlock();
+
+ if (*link_speed_max != (u32)SPEED_UNKNOWN)
+ return 0;
+
+skip_lag:
+ err = mlx5_port_max_linkspeed(mdev, link_speed_max);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
+
+ return err;
+}
+
+static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
+ const char *name, u32 link_speed_max,
+ u64 value, struct netlink_ext_ack *extack)
+{
+ if (value > link_speed_max) {
+ pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
+ name, value, link_speed_max);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
@@ -755,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
u64 value;
int err;
- err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
- return err;
- }
-
value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
@@ -769,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return -EINVAL;
}
- if (value > link_speed_max) {
- pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
- name, value, link_speed_max);
- NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
- return -EINVAL;
- }
+ err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
+ if (err)
+ return err;
+
+ err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
+ if (err)
+ return err;
*rate = value;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 2b5826a785c4..adcc2bc9c8c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -52,6 +52,19 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
- return mdev->mlx5e_res.uplink_netdev;
+ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
+ struct net_device *netdev;
+
+ mutex_lock(&mlx5e_res->uplink_netdev_lock);
+ netdev = mlx5e_res->uplink_netdev;
+ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
+ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
+ return netdev;
+}
+
+static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ netdev_put(netdev, &mdev->mlx5e_res.tracker);
}
#endif
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 998586872599..c692d2e878b2 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
- int rx_rc, len;
+ int len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
goto netdev_mangle_me_harder_failed;
+ }
if (cmdsts & CMDSTS_DEST_MULTI)
ndev->stats.multicast++;
ndev->stats.rx_packets++;
@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
- rx_rc = netif_rx(skb);
- if (NET_RX_DROP == rx_rc) {
-netdev_mangle_me_harder_failed:
- ndev->stats.rx_dropped++;
- }
+ netif_rx(skb);
} else {
dev_kfree_skb_irq(skb);
}
+netdev_mangle_me_harder_failed:
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index cdcead614e9f..ae421c270778 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Add override window info to buffer */
+ /* Add override window info to buffer, preventing buffer overflow */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
+ PROTECTION_OVERRIDE_DEPTH_DWORDS);
if (override_window_dwords) {
addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
offset += qed_grc_dump_addr_range(p_hwfn,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff5389a8efc3..f3155d69a013 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2841,7 +2841,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
u32 channels_to_check = tx_channel_count > rx_channel_count ?
tx_channel_count : rx_channel_count;
u32 chan;
- int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+ int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6e4023791b47..68b8e458a88f 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1981,6 +1981,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq->comp_ring.next2proc = 0;
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
}
@@ -2021,11 +2026,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
- rq->page_pool = NULL;
-
if (rq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[0].size * rq->data_ring.desc_size,
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index 131388886acb..cfabd5aebb54 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = {
};
static const struct wilc_cfg_str g_cfg_str[] = {
- {WID_FIRMWARE_VERSION, NULL},
- {WID_MAC_ADDR, NULL},
- {WID_ASSOC_RES_INFO, NULL},
- {WID_NIL, NULL}
+ {WID_FIRMWARE_VERSION, 0, NULL},
+ {WID_MAC_ADDR, 0, NULL},
+ {WID_ASSOC_RES_INFO, 0, NULL},
+ {WID_NIL, 0, NULL}
};
#define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
switch (FIELD_GET(WILC_WID_TYPE, wid)) {
case WID_CHAR:
+ len = 3;
+ if (len + 2 > size)
+ return;
+
while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
if (cfg->b[i].id == wid)
cfg->b[i].val = info[4];
- len = 3;
break;
case WID_SHORT:
+ len = 4;
+ if (len + 2 > size)
+ return;
+
while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
i++;
if (cfg->hw[i].id == wid)
cfg->hw[i].val = get_unaligned_le16(&info[4]);
- len = 4;
break;
case WID_INT:
+ len = 6;
+ if (len + 2 > size)
+ return;
+
while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
i++;
if (cfg->w[i].id == wid)
cfg->w[i].val = get_unaligned_le32(&info[4]);
- len = 6;
break;
case WID_STR:
+ len = 2 + get_unaligned_le16(&info[2]);
+
while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
i++;
- if (cfg->s[i].id == wid)
+ if (cfg->s[i].id == wid) {
+ if (len > cfg->s[i].len || (len + 2 > size))
+ return;
+
memcpy(cfg->s[i].str, &info[2],
- get_unaligned_le16(&info[2]) + 2);
+ len);
+ }
- len = 2 + get_unaligned_le16(&info[2]);
break;
default:
@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl)
/* store the string cfg parameters */
wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
wl->cfg.s[i].str = str_vals->firmware_version;
+ wl->cfg.s[i].len = sizeof(str_vals->firmware_version);
i++;
wl->cfg.s[i].id = WID_MAC_ADDR;
wl->cfg.s[i].str = str_vals->mac_address;
+ wl->cfg.s[i].len = sizeof(str_vals->mac_address);
i++;
wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
wl->cfg.s[i].str = str_vals->assoc_rsp;
+ wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp);
i++;
wl->cfg.s[i].id = WID_NIL;
wl->cfg.s[i].str = NULL;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
index 7038b74f8e8f..5ae74bced7d7 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
@@ -24,12 +24,13 @@ struct wilc_cfg_word {
struct wilc_cfg_str {
u16 id;
+ u16 len;
u8 *str;
};
struct wilc_cfg_str_vals {
- u8 mac_address[7];
- u8 firmware_version[129];
+ u8 mac_address[8];
+ u8 firmware_version[130];
u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
};
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 25382612e48a..a8e0dd5d30c4 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -305,7 +305,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver omap_cf_driver = {
+/*
+ * omap_cf_remove() lives in .exit.text. For drivers registered via
+ * platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver omap_cf_driver __refdata = {
.driver = {
.name = driver_name,
},
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index 69584b685edb..2c8b1b7dda5b 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -16,10 +16,11 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/property.h>
#include <linux/slab.h>
#define BCM_NS_USB3_PHY_BASE_ADDR_REG 0x1f
@@ -189,7 +190,6 @@ static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
{
struct device *dev = &mdiodev->dev;
- const struct of_device_id *of_id;
struct phy_provider *phy_provider;
struct device_node *syscon_np;
struct bcm_ns_usb3 *usb3;
@@ -203,10 +203,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
usb3->dev = dev;
usb3->mdiodev = mdiodev;
- of_id = of_match_device(bcm_ns_usb3_id_table, dev);
- if (!of_id)
- return -EINVAL;
- usb3->family = (uintptr_t)of_id->data;
+ usb3->family = (enum bcm_ns_family)device_get_match_data(dev);
syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
err = of_address_to_resource(syscon_np, 0, &res);
diff --git a/drivers/phy/marvell/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c
index 78ef6ae72a9a..f26bf630da2c 100644
--- a/drivers/phy/marvell/phy-berlin-usb.c
+++ b/drivers/phy/marvell/phy-berlin-usb.c
@@ -8,9 +8,10 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reset.h>
#define USB_PHY_PLL 0x04
@@ -162,8 +163,6 @@ MODULE_DEVICE_TABLE(of, phy_berlin_usb_of_match);
static int phy_berlin_usb_probe(struct platform_device *pdev)
{
- const struct of_device_id *match =
- of_match_device(phy_berlin_usb_of_match, &pdev->dev);
struct phy_berlin_usb_priv *priv;
struct phy *phy;
struct phy_provider *phy_provider;
@@ -180,7 +179,7 @@ static int phy_berlin_usb_probe(struct platform_device *pdev)
if (IS_ERR(priv->rst_ctrl))
return PTR_ERR(priv->rst_ctrl);
- priv->pll_divider = *((u32 *)match->data);
+ priv->pll_divider = *((u32 *)device_get_match_data(&pdev->dev));
phy = devm_phy_create(&pdev->dev, NULL, &phy_berlin_usb_ops);
if (IS_ERR(phy)) {
diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c
index 2bd8ad2e76ed..41bce5290e92 100644
--- a/drivers/phy/ralink/phy-ralink-usb.c
+++ b/drivers/phy/ralink/phy-ralink-usb.c
@@ -13,9 +13,10 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -171,18 +172,13 @@ static int ralink_usb_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
- const struct of_device_id *match;
struct ralink_usb_phy *phy;
- match = of_match_device(ralink_usb_phy_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- phy->clk = (uintptr_t)match->data;
+ phy->clk = (uintptr_t)device_get_match_data(&pdev->dev);
phy->base = NULL;
phy->sysctl = syscon_regmap_lookup_by_phandle(dev->of_node, "ralink,sysctl");
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index cbf3c140a138..4669e87b3243 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -12,10 +12,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -62,7 +61,7 @@ struct rockchip_pcie_data {
};
struct rockchip_pcie_phy {
- struct rockchip_pcie_data *phy_data;
+ const struct rockchip_pcie_data *phy_data;
struct regmap *reg_base;
struct phy_pcie_instance {
struct phy *phy;
@@ -349,7 +348,6 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
struct rockchip_pcie_phy *rk_phy;
struct phy_provider *phy_provider;
struct regmap *grf;
- const struct of_device_id *of_id;
int i;
u32 phy_num;
@@ -363,11 +361,10 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
if (!rk_phy)
return -ENOMEM;
- of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev);
- if (!of_id)
+ rk_phy->phy_data = device_get_match_data(&pdev->dev);
+ if (!rk_phy->phy_data)
return -EINVAL;
- rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
rk_phy->reg_base = grf;
mutex_init(&rk_phy->pcie_mutex);
diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
index 8454285977eb..666a896c8f0a 100644
--- a/drivers/phy/rockchip/phy-rockchip-usb.c
+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
@@ -13,10 +13,9 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/regmap.h>
@@ -458,7 +457,6 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rockchip_usb_phy_base *phy_base;
struct phy_provider *phy_provider;
- const struct of_device_id *match;
struct device_node *child;
int err;
@@ -466,14 +464,12 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
if (!phy_base)
return -ENOMEM;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data) {
+ phy_base->pdata = device_get_match_data(dev);
+ if (!phy_base->pdata) {
dev_err(dev, "missing phy data\n");
return -EINVAL;
}
- phy_base->pdata = match->data;
-
phy_base->dev = dev;
phy_base->reg_base = ERR_PTR(-ENODEV);
if (dev->parent && dev->parent->of_node)
diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
index 76c5595f0859..2fdb8f4241c7 100644
--- a/drivers/phy/ti/phy-omap-control.c
+++ b/drivers/phy/ti/phy-omap-control.c
@@ -8,9 +8,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
@@ -268,20 +268,15 @@ MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
static int omap_control_phy_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct omap_control_phy *control_phy;
- of_id = of_match_device(omap_control_phy_id_table, &pdev->dev);
- if (!of_id)
- return -EINVAL;
-
control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy),
GFP_KERNEL);
if (!control_phy)
return -ENOMEM;
control_phy->dev = &pdev->dev;
- control_phy->type = *(enum omap_control_phy_type *)of_id->data;
+ control_phy->type = *(enum omap_control_phy_type *)device_get_match_data(&pdev->dev);
if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
control_phy->otghs_control =
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 6bd3c7492330..0fea766a98d7 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -19,6 +19,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -362,6 +363,13 @@ static void omap_usb2_init_errata(struct omap_usb *phy)
phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
}
+static void omap_usb2_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
@@ -371,16 +379,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
- const struct of_device_id *of_id;
- struct usb_phy_data *phy_data;
-
- of_id = of_match_device(omap_usb2_id_table, &pdev->dev);
+ const struct usb_phy_data *phy_data;
+ int ret;
- if (!of_id)
+ phy_data = device_get_match_data(&pdev->dev);
+ if (!phy_data)
return -EINVAL;
- phy_data = (struct usb_phy_data *)of_id->data;
-
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
@@ -426,6 +431,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
return -EINVAL;
}
phy->control_dev = &control_pdev->dev;
+
+ ret = devm_add_action_or_reset(&pdev->dev, omap_usb2_put_device,
+ phy->control_dev);
+ if (ret)
+ return ret;
} else {
if (of_property_read_u32_index(node,
"syscon-phy-power", 1,
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 3127f3702c3a..8e94d2c6e266 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/phy/phy.h>
#include <linux/of.h>
@@ -791,23 +792,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
int ret;
- const struct of_device_id *match;
- struct pipe3_data *data;
+ const struct pipe3_data *data;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- match = of_match_device(ti_pipe3_id_table, dev);
- if (!match)
+ data = device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct pipe3_data *)match->data;
- if (!data) {
- dev_err(dev, "no driver data\n");
- return -EINVAL;
- }
-
phy->dev = dev;
phy->mode = data->mode;
phy->dpll_map = data->dpll_map;
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index e51fa2c694bc..1f06dee4b8b4 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1872,8 +1872,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
- if ((cache.flags & 0xff) == 0xff)
- cache.flags = -1; /* read error */
+ if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
+ cache.flags = -ENODEV; /* bq27000 hdq read error */
if (cache.flags >= 0) {
cache.temperature = bq27xxx_battery_read_temperature(di);
if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index fc079b9dcf71..502571f0c203 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -1383,11 +1383,6 @@ static int pcf2127_i2c_probe(struct i2c_client *client)
variant = &pcf21xx_cfg[type];
}
- if (variant->type == PCF2131) {
- config.read_flag_mask = 0x0;
- config.write_flag_mask = 0x0;
- }
-
config.max_register = variant->max_register,
regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap,
@@ -1461,6 +1456,11 @@ static int pcf2127_spi_probe(struct spi_device *spi)
variant = &pcf21xx_cfg[type];
}
+ if (variant->type == PCF2131) {
+ config.read_flag_mask = 0x0;
+ config.write_flag_mask = 0x0;
+ }
+
config.max_register = variant->max_register;
regmap = devm_regmap_init_spi(spi, &config);
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 2cd8c757c653..764657070883 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -86,13 +86,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
return string_length;
}
+static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
+{
+ struct xhci_ep_ctx *ep_ctx;
+ unsigned int max_burst;
+ dma_addr_t deq;
+
+ max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+
+ /* Populate bulk out endpoint context: */
+ ep_ctx = dbc_bulkout_ctx(dbc);
+ deq = dbc_bulkout_enq(dbc);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
+
+ /* Populate bulk in endpoint context: */
+ ep_ctx = dbc_bulkin_ctx(dbc);
+ deq = dbc_bulkin_enq(dbc);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
+}
+
static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
{
struct dbc_info_context *info;
- struct xhci_ep_ctx *ep_ctx;
u32 dev_info;
- dma_addr_t deq, dma;
- unsigned int max_burst;
+ dma_addr_t dma;
if (!dbc)
return;
@@ -106,20 +127,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
info->length = cpu_to_le32(string_length);
- /* Populate bulk out endpoint context: */
- ep_ctx = dbc_bulkout_ctx(dbc);
- max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
- deq = dbc_bulkout_enq(dbc);
- ep_ctx->ep_info = 0;
- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
-
- /* Populate bulk in endpoint context: */
- ep_ctx = dbc_bulkin_ctx(dbc);
- deq = dbc_bulkin_enq(dbc);
- ep_ctx->ep_info = 0;
- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
+ /* Populate bulk in and out endpoint contexts: */
+ xhci_dbc_init_ep_contexts(dbc);
/* Set DbC context and info registers: */
lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
@@ -421,6 +430,42 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags)
return ctx;
}
+static void xhci_dbc_ring_init(struct xhci_ring *ring)
+{
+ struct xhci_segment *seg = ring->first_seg;
+
+ /* clear all trbs on ring in case of old ring */
+ memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
+
+ /* Only event ring does not use link TRB */
+ if (ring->type != TYPE_EVENT) {
+ union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+
+ trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
+ trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+ }
+ xhci_initialize_ring_info(ring, 1);
+}
+
+static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
+{
+ struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
+ struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
+
+ if (!in_ring || !out_ring || !dbc->ctx) {
+ dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
+ return -ENODEV;
+ }
+
+ xhci_dbc_ring_init(in_ring);
+ xhci_dbc_ring_init(out_ring);
+
+ /* set ep context enqueue, dequeue, and cycle to initial values */
+ xhci_dbc_init_ep_contexts(dbc);
+
+ return 0;
+}
+
static struct xhci_ring *
xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
{
@@ -449,15 +494,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
seg->dma = dma;
- /* Only event ring does not use link TRB */
- if (type != TYPE_EVENT) {
- union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
-
- trb->link.segment_ptr = cpu_to_le64(dma);
- trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
- }
INIT_LIST_HEAD(&ring->td_list);
- xhci_initialize_ring_info(ring, 1);
+
+ xhci_dbc_ring_init(ring);
+
return ring;
dma_fail:
kfree(seg);
@@ -850,7 +890,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
dev_info(dbc->dev, "DbC cable unplugged\n");
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
-
+ xhci_dbc_reinit_ep_rings(dbc);
return EVT_DISC;
}
@@ -860,7 +900,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
writel(portsc, &dbc->regs->portsc);
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
-
+ xhci_dbc_reinit_ep_rings(dbc);
return EVT_DISC;
}
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 6d16506bbdc0..d49ce7768f7f 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1717,10 +1717,10 @@ static int check_inode_ref(struct extent_buffer *leaf,
while (ptr < end) {
u16 namelen;
- if (unlikely(ptr + sizeof(iref) > end)) {
+ if (unlikely(ptr + sizeof(*iref) > end)) {
inode_ref_err(leaf, slot,
"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
- ptr, end, sizeof(iref));
+ ptr, end, sizeof(*iref));
return -EUCLEAN;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e5d6bc1bb5e5..4b53e19f7520 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1998,7 +1998,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
search_key.objectid = log_key.objectid;
search_key.type = BTRFS_INODE_EXTREF_KEY;
- search_key.offset = key->objectid;
+ search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len);
ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
if (ret < 0) {
goto out;
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 905c7eadf967..59fda8ce0790 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
************************************************************************/
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d.%d\n",
NILFS_CURRENT_REV, NILFS_MINOR_REV);
@@ -1087,7 +1087,7 @@ static const char features_readme_str[] =
"(1) revision\n\tshow current revision of NILFS file system driver.\n";
static ssize_t nilfs_feature_README_show(struct kobject *kobj,
- struct attribute *attr,
+ struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, features_readme_str);
diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h
index 78a87a016928..d370cd5cce3f 100644
--- a/fs/nilfs2/sysfs.h
+++ b/fs/nilfs2/sysfs.h
@@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
struct completion sg_segments_kobj_unregister;
};
-#define NILFS_COMMON_ATTR_STRUCT(name) \
+#define NILFS_KOBJ_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \
struct attribute attr; \
- ssize_t (*show)(struct kobject *, struct attribute *, \
+ ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
char *); \
- ssize_t (*store)(struct kobject *, struct attribute *, \
+ ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
const char *, size_t); \
}
-NILFS_COMMON_ATTR_STRUCT(feature);
+NILFS_KOBJ_ATTR_STRUCT(feature);
#define NILFS_DEV_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 713bd1dcd39c..be9be8f36331 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -1064,8 +1064,10 @@ static int smbd_negotiate(struct smbd_connection *info)
log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
rc, response->sge.addr,
response->sge.length, response->sge.lkey);
- if (rc)
+ if (rc) {
+ put_receive_buffer(info, response);
return rc;
+ }
init_completion(&info->negotiate_completion);
info->negotiate_done = false;
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index a4ff1167c9a1..3720304d6792 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -553,7 +553,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
case SMB_DIRECT_MSG_DATA_TRANSFER: {
struct smb_direct_data_transfer *data_transfer =
(struct smb_direct_data_transfer *)recvmsg->packet;
- unsigned int data_length;
+ u32 remaining_data_length, data_offset, data_length;
int avail_recvmsg_count, receive_credits;
if (wc->byte_len <
@@ -563,15 +563,25 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
+ remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
data_length = le32_to_cpu(data_transfer->data_length);
- if (data_length) {
- if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
- (u64)data_length) {
- put_recvmsg(t, recvmsg);
- smb_direct_disconnect_rdma_connection(t);
- return;
- }
+ data_offset = le32_to_cpu(data_transfer->data_offset);
+ if (wc->byte_len < data_offset ||
+ wc->byte_len < (u64)data_offset + data_length) {
+ put_recvmsg(t, recvmsg);
+ smb_direct_disconnect_rdma_connection(t);
+ return;
+ }
+ if (remaining_data_length > t->max_fragmented_recv_size ||
+ data_length > t->max_fragmented_recv_size ||
+ (u64)remaining_data_length + (u64)data_length >
+ (u64)t->max_fragmented_recv_size) {
+ put_recvmsg(t, recvmsg);
+ smb_direct_disconnect_rdma_connection(t);
+ return;
+ }
+ if (data_length) {
if (t->full_packet_received)
recvmsg->first_segment = true;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 08b803a4fcde..7402bf3e037e 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -134,6 +134,7 @@ struct af_alg_async_req {
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
+ * @write: True if we are in the middle of a write.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
* @inflight: Non-zero when AIO requests are in flight.
@@ -149,10 +150,11 @@ struct af_alg_ctx {
size_t used;
atomic_t rcvused;
- bool more;
- bool merge;
- bool enc;
- bool init;
+ u32 more:1,
+ merge:1,
+ enc:1,
+ write:1,
+ init:1;
unsigned int len;
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 2ec559284a9f..9c2848abc804 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -45,17 +45,20 @@
#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
-#define __cmp_once(op, x, y, unique_x, unique_y) ({ \
- typeof(x) unique_x = (x); \
- typeof(y) unique_y = (y); \
+#define __cmp_once_unique(op, type, x, y, ux, uy) \
+ ({ type ux = (x); type uy = (y); __cmp(op, ux, uy); })
+
+#define __cmp_once(op, type, x, y) \
+ __cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
+
+#define __careful_cmp_once(op, x, y) ({ \
static_assert(__types_ok(x, y), \
#op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
- __cmp(op, unique_x, unique_y); })
+ __cmp_once(op, __auto_type, x, y); })
#define __careful_cmp(op, x, y) \
__builtin_choose_expr(__is_constexpr((x) - (y)), \
- __cmp(op, x, y), \
- __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
+ __cmp(op, x, y), __careful_cmp_once(op, x, y))
#define __clamp(val, lo, hi) \
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
@@ -158,7 +161,7 @@
* @x: first value
* @y: second value
*/
-#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y))
+#define min_t(type, x, y) __cmp_once(min, type, x, y)
/**
* max_t - return maximum of two values, using the specified type
@@ -166,7 +169,7 @@
* @x: first value
* @y: second value
*/
-#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y))
+#define max_t(type, x, y) __cmp_once(max, type, x, y)
/*
* Do not check the array parameter using __must_be_array().
@@ -270,4 +273,11 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+/*
+ * Use these carefully: no type checking, and uses the arguments
+ * multiple times. Use for obvious constants only.
+ */
+#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
+#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
+
#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 696a2227869f..c0e0468b25a1 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -677,6 +677,7 @@ struct mlx5e_resources {
struct mlx5_sq_bfreg bfreg;
} hw_objs;
struct net_device *uplink_netdev;
+ netdevice_tracker tracker;
struct mutex uplink_netdev_lock;
struct mlx5_crypto_dek_priv *dek_priv;
};
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e83c4c095041..a4bf7f0989b2 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -41,7 +41,7 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
+#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index ee9c49f949a2..00d622121673 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -81,6 +81,8 @@ enum {
#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
+#define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 _BITUL(0)
+
#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
@@ -132,13 +134,13 @@ struct mptcp_info {
/*
* MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport
+ * sport, dport, server-side, [flags]
* A new MPTCP connection has been created. It is the good time to allocate
* memory and send ADD_ADDR if needed. Depending on the traffic-patterns
* it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent.
*
* MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport
+ * sport, dport, server-side, [flags]
* A MPTCP connection is established (can start new subflows).
*
* MPTCP_EVENT_CLOSED: token
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 897f07014c01..07a5824ad98d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1459,9 +1459,10 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
{
- io_tw_lock(req->ctx, ts);
- /* req->task == current here, checking PF_EXITING is safe */
- if (unlikely(req->task->flags & PF_EXITING))
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_tw_lock(ctx, ts);
+ if (unlikely(io_should_terminate_tw(ctx)))
io_req_defer_failed(req, -EFAULT);
else if (req->flags & REQ_F_FORCE_ASYNC)
io_queue_iowq(req);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 59f5f71037ff..0f6b47f55c24 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -394,6 +394,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
ctx->submitter_task == current);
}
+/*
+ * Terminate the request if either of these conditions are true:
+ *
+ * 1) It's being executed by the original task, but that task is marked
+ * with PF_EXITING as it's exiting.
+ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
+ * our fallback task_work.
+ */
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
+{
+ return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
+}
+
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
{
io_req_set_res(req, res, 0);
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 65935ec8de89..b6c8acd8625e 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -258,8 +258,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
{
int v;
- /* req->task == current here, checking PF_EXITING is safe */
- if (unlikely(req->task->flags & PF_EXITING))
+ if (unlikely(io_should_terminate_tw(req->ctx)))
return -ECANCELED;
do {
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 277e22d55c61..be2a0f6c209b 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -307,7 +307,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t
int ret = -ENOENT;
if (prev) {
- if (!(req->task->flags & PF_EXITING)) {
+ if (!io_should_terminate_tw(req->ctx)) {
struct io_cancel_data cd = {
.ctx = req->ctx,
.data = prev->cqe.user_data,
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e8ef062f6ca0..5135838b5899 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -123,8 +123,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
* of concurrent destructions. Use a separate workqueue so that cgroup
* destruction work items don't end up filling up max_active of system_wq
* which may lead to deadlock.
+ *
+ * A cgroup destruction should enqueue work sequentially to:
+ * cgroup_offline_wq: use for css offline work
+ * cgroup_release_wq: use for css release work
+ * cgroup_free_wq: use for free work
+ *
+ * Rationale for using separate workqueues:
+ * The cgroup root free work may depend on completion of other css offline
+ * operations. If all tasks were enqueued to a single workqueue, this could
+ * create a deadlock scenario where:
+ * - Free work waits for other css offline work to complete.
+ * - But other css offline work is queued after free work in the same queue.
+ *
+ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
+ * 1. umount net_prio
+ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
+ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
+ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
+ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
+ * which can never complete as it's behind in the same queue and
+ * workqueue's max_active is 1.
*/
-static struct workqueue_struct *cgroup_destroy_wq;
+static struct workqueue_struct *cgroup_offline_wq;
+static struct workqueue_struct *cgroup_release_wq;
+static struct workqueue_struct *cgroup_free_wq;
/* generate an array of cgroup subsystem pointers */
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
@@ -5435,7 +5458,7 @@ static void css_release_work_fn(struct work_struct *work)
cgroup_unlock();
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
+ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
}
static void css_release(struct percpu_ref *ref)
@@ -5444,7 +5467,7 @@ static void css_release(struct percpu_ref *ref)
container_of(ref, struct cgroup_subsys_state, refcnt);
INIT_WORK(&css->destroy_work, css_release_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ queue_work(cgroup_release_wq, &css->destroy_work);
}
static void init_and_link_css(struct cgroup_subsys_state *css,
@@ -5566,7 +5589,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
err_free_css:
list_del_rcu(&css->rstat_css_node);
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
+ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
return ERR_PTR(err);
}
@@ -5801,7 +5824,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
if (atomic_dec_and_test(&css->online_cnt)) {
INIT_WORK(&css->destroy_work, css_killed_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ queue_work(cgroup_offline_wq, &css->destroy_work);
}
}
@@ -6173,8 +6196,14 @@ static int __init cgroup_wq_init(void)
* We would prefer to do this in cgroup_init() above, but that
* is called before init_workqueues(): so leave this until after.
*/
- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
- BUG_ON(!cgroup_destroy_wq);
+ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
+ BUG_ON(!cgroup_offline_wq);
+
+ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
+ BUG_ON(!cgroup_release_wq);
+
+ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
+ BUG_ON(!cgroup_free_wq);
return 0;
}
core_initcall(cgroup_wq_init);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index a85b0aba3646..b444767b15a5 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -43,7 +43,7 @@
#include <net/sock.h>
#include <net/raw.h>
-#define TCPUDP_MIB_MAX max_t(u32, UDP_MIB_MAX, TCP_MIB_MAX)
+#define TCPUDP_MIB_MAX MAX_T(u32, UDP_MIB_MAX, TCP_MIB_MAX)
/*
* Report socket allocation statistics [mea@....fi]
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index a4bbe959d1e2..40a2f172be2c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3011,6 +3011,7 @@ int tcp_disconnect(struct sock *sk, int flags)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int old_state = sk->sk_state;
+ struct request_sock *req;
u32 seq;
if (old_state != TCP_CLOSE)
@@ -3121,6 +3122,10 @@ int tcp_disconnect(struct sock *sk, int flags)
/* Clean up fastopen related fields */
+ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ if (req)
+ reqsk_fastopen_remove(sk, req, false);
tcp_free_fastopen_req(tp);
inet_clear_bit(DEFER_CONNECT, sk);
tp->fastopen_client_fail = 0;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 6d1d9221649d..752327b10dde 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -27,7 +27,7 @@
#include <net/ipv6.h>
#define MAX4(a, b, c, d) \
- max_t(u32, max_t(u32, a, b), max_t(u32, c, d))
+ MAX_T(u32, MAX_T(u32, a, b), MAX_T(u32, c, d))
#define SNMP_MIB_MAX MAX4(UDP_MIB_MAX, TCP_MIB_MAX, \
IPSTATS_MIB_MAX, ICMP_MIB_MAX)
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 78aa3bc51586..5f8f72ca2769 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1273,7 +1273,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_ftm_responder_stats *ftm_stats)
{
- u32 ret = -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
if (local->ops->get_ftm_responder_stats)
ret = local->ops->get_ftm_responder_stats(&local->hw,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 3a6fff98748b..80b143bde93d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -965,7 +965,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
int result, i;
enum nl80211_band band;
int channels, max_bitrates;
- bool supp_ht, supp_vht, supp_he, supp_eht;
+ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
struct cfg80211_chan_def dflt_chandef = {};
if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
@@ -1081,6 +1081,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_vht = false;
supp_he = false;
supp_eht = false;
+ supp_s1g = false;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband;
@@ -1127,6 +1128,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
+ supp_s1g = supp_s1g || sband->s1g_cap.s1g;
for (i = 0; i < sband->n_iftype_data; i++) {
const struct ieee80211_sband_iftype_data *iftd;
@@ -1253,6 +1255,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->scan_ies_len +=
2 + sizeof(struct ieee80211_vht_cap);
+ if (supp_s1g)
+ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
+
/*
* HE cap element is variable in size - set len to allow max size */
if (supp_he) {
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 9406d2d555e7..b245abd08c82 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -985,13 +985,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
return false;
}
- if (mp_opt->deny_join_id0)
- WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
-
if (unlikely(!READ_ONCE(msk->pm.server_side)))
pr_warn_once("bogus mpc option on established client sk");
set_fully_established:
+ if (mp_opt->deny_join_id0)
+ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+
mptcp_data_lock((struct sock *)msk);
__mptcp_subflow_fully_established(msk, subflow, mp_opt);
mptcp_data_unlock((struct sock *)msk);
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index e8042014bd5f..de24989b05a6 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -2252,6 +2252,7 @@ static int mptcp_event_created(struct sk_buff *skb,
const struct sock *ssk)
{
int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
+ u16 flags = 0;
if (err)
return err;
@@ -2259,6 +2260,12 @@ static int mptcp_event_created(struct sk_buff *skb,
if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
return -EMSGSIZE;
+ if (READ_ONCE(msk->pm.remote_deny_join_id0))
+ flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0;
+
+ if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags))
+ return -EMSGSIZE;
+
return mptcp_event_add_subflow(skb, ssk);
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index e3f09467b36b..643d64bdef2e 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -415,6 +415,20 @@ static void mptcp_close_wake_up(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
+
+ slow = lock_sock_fast(ssk);
+ tcp_shutdown(ssk, SEND_SHUTDOWN);
+ unlock_sock_fast(ssk, slow);
+ }
+}
+
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -438,6 +452,7 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
break;
case TCP_CLOSING:
case TCP_LAST_ACK:
+ mptcp_shutdown_subflows(msk);
mptcp_set_state(sk, TCP_CLOSE);
break;
}
@@ -605,6 +620,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
mptcp_set_state(sk, TCP_CLOSING);
break;
case TCP_FIN_WAIT2:
+ mptcp_shutdown_subflows(msk);
mptcp_set_state(sk, TCP_CLOSE);
break;
default:
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 0c9b9c0c277c..dfee1890c841 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -863,6 +863,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
ctx->subflow_id = 1;
owner = mptcp_sk(ctx->conn);
+
+ if (mp_opt.deny_join_id0)
+ WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
+
mptcp_pm_new_connection(owner, child, 1);
/* with OoO packets we can reach here without ingress
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 28c1b0022178..bd861191157b 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -133,12 +133,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
&off, PAGE_SIZE);
- if (unlikely(ret != ibmr->sg_dma_len))
- return ret < 0 ? ret : -EINVAL;
+ if (unlikely(ret != ibmr->sg_dma_len)) {
+ ret = ret < 0 ? ret : -EINVAL;
+ goto out_inc;
+ }
- if (cmpxchg(&frmr->fr_state,
- FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
- return -EBUSY;
+ if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) {
+ ret = -EBUSY;
+ goto out_inc;
+ }
atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
@@ -166,11 +169,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
/* Failure here can be because of -ENOMEM as well */
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
- atomic_inc(&ibmr->ic->i_fastreg_wrs);
if (printk_ratelimit())
pr_warn("RDS/IB: %s returned error(%d)\n",
__func__, ret);
- goto out;
+ goto out_inc;
}
/* Wait for the registration to complete in order to prevent an invalid
@@ -179,8 +181,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
*/
wait_event(frmr->fr_reg_done, !frmr->fr_reg);
-out:
+ return ret;
+out_inc:
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
return ret;
}
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index b12edbe0ef45..badc71acbbdd 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -79,10 +79,10 @@ static int rfkill_gpio_acpi_probe(struct device *dev,
static int rfkill_gpio_probe(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill;
- struct gpio_desc *gpio;
+ const char *type_name = NULL;
const char *name_property;
const char *type_property;
- const char *type_name;
+ struct gpio_desc *gpio;
int ret;
rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 5dc61c85c076..a3c5c5a59fda 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -141,6 +141,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo);
void tls_err_abort(struct sock *sk, int err);
+void tls_strp_abort_strp(struct tls_strparser *strp, int err);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index 6ce64a6e4495..ae723cd6af39 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -12,7 +12,7 @@
static struct workqueue_struct *tls_strp_wq;
-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+void tls_strp_abort_strp(struct tls_strparser *strp, int err)
{
if (strp->stopped)
return;
@@ -210,11 +210,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
struct sk_buff *in_skb, unsigned int offset,
size_t in_len)
{
+ unsigned int nfrag = skb->len / PAGE_SIZE;
size_t len, chunk;
skb_frag_t *frag;
int sz;
- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
+ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return -EMSGSIZE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[nfrag];
len = in_len;
/* First make sure we got the header */
@@ -519,10 +525,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
tls_strp_load_anchor_with_queue(strp, inq);
if (!strp->stm.full_len) {
sz = tls_rx_msg_size(strp, strp->anchor);
- if (sz < 0) {
- tls_strp_abort_strp(strp, sz);
+ if (sz < 0)
return sz;
- }
strp->stm.full_len = sz;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 27ce1feb79e1..435235a351e2 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2441,8 +2441,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
return data_len + TLS_HEADER_SIZE;
read_failure:
- tls_err_abort(strp->sk, ret);
-
+ tls_strp_abort_strp(strp, ret);
return ret;
}
diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
index 88d1f4b56e4b..a220ac0c8eb8 100644
--- a/sound/firewire/motu/motu-hwdep.c
+++ b/sound/firewire/motu/motu-hwdep.c
@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
events = 0;
spin_unlock_irq(&motu->lock);
- return events | EPOLLOUT;
+ return events;
}
static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6aae06223f26..5fe6b71d90f4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -10161,6 +10161,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x89a0, "HP Laptop 15-dw4xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index b9432f8b64e5..39d2c8e85d9d 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -220,7 +220,7 @@ static const struct snd_kcontrol_new wm8940_snd_controls[] = {
SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL,
0, 255, 0, wm8940_adc_tlv),
SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum),
- SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST,
+ SOC_SINGLE_TLV("Capture Boost Volume", WM8940_ADCBOOST,
8, 1, 0, wm8940_capture_boost_vol_tlv),
SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL,
0, 63, 0, wm8940_spk_vol_tlv),
@@ -693,7 +693,12 @@ static int wm8940_update_clocks(struct snd_soc_dai *dai)
f = wm8940_get_mclkdiv(priv->mclk, fs256, &mclkdiv);
if (f != priv->mclk) {
/* The PLL performs best around 90MHz */
- fpll = wm8940_get_mclkdiv(22500000, fs256, &mclkdiv);
+ if (fs256 % 8000)
+ f = 22579200;
+ else
+ f = 24576000;
+
+ fpll = wm8940_get_mclkdiv(f, fs256, &mclkdiv);
}
wm8940_set_dai_pll(dai, 0, 0, priv->mclk, fpll);
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 260bac695b20..2aaa1cbe68b7 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -419,10 +419,14 @@ static int wm8974_update_clocks(struct snd_soc_dai *dai)
fs256 = 256 * priv->fs;
f = wm8974_get_mclkdiv(priv->mclk, fs256, &mclkdiv);
-
if (f != priv->mclk) {
/* The PLL performs best around 90MHz */
- fpll = wm8974_get_mclkdiv(22500000, fs256, &mclkdiv);
+ if (fs256 % 8000)
+ f = 22579200;
+ else
+ f = 24576000;
+
+ fpll = wm8974_get_mclkdiv(f, fs256, &mclkdiv);
}
wm8974_set_dai_pll(dai, 0, 0, priv->mclk, fpll);
diff --git a/sound/soc/qcom/qdsp6/audioreach.c b/sound/soc/qcom/qdsp6/audioreach.c
index 5974c7929dd3..3eab43e9efb3 100644
--- a/sound/soc/qcom/qdsp6/audioreach.c
+++ b/sound/soc/qcom/qdsp6/audioreach.c
@@ -967,6 +967,7 @@ static int audioreach_i2s_set_media_format(struct q6apm_graph *graph,
param_data->param_id = PARAM_ID_I2S_INTF_CFG;
param_data->param_size = ic_sz - APM_MODULE_PARAM_DATA_SIZE;
+ intf_cfg->cfg.lpaif_type = module->hw_interface_type;
intf_cfg->cfg.intf_idx = module->hw_interface_idx;
intf_cfg->cfg.sd_line_idx = module->sd_line_idx;
diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
index 6511f0a08de1..3813fe5e0181 100644
--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
@@ -207,8 +207,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
return 0;
err:
- q6apm_graph_close(dai_data->graph[dai->id]);
- dai_data->graph[dai->id] = NULL;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ q6apm_graph_close(dai_data->graph[dai->id]);
+ dai_data->graph[dai->id] = NULL;
+ }
return rc;
}
@@ -254,6 +256,7 @@ static const struct snd_soc_dai_ops q6i2s_ops = {
.shutdown = q6apm_lpass_dai_shutdown,
.set_channel_map = q6dma_set_channel_map,
.hw_params = q6dma_hw_params,
+ .set_fmt = q6i2s_set_fmt,
};
static const struct snd_soc_dai_ops q6hdmi_ops = {
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index 0b0087abcc50..3bb743cb167a 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -842,7 +842,7 @@ int hda_dsp_stream_init(struct snd_sof_dev *sdev)
if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
dev_err(sdev->dev, "error: too many capture streams %d\n",
- num_playback);
+ num_capture);
return -EINVAL;
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index c83a8b47bbdf..fc9eff0e89e2 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -1079,6 +1079,7 @@ int main_loop_s(int listensock)
struct pollfd polls;
socklen_t salen;
int remotesock;
+ int err = 0;
int fd = 0;
again:
@@ -1111,7 +1112,7 @@ int main_loop_s(int listensock)
SOCK_TEST_TCPULP(remotesock, 0);
memset(&winfo, 0, sizeof(winfo));
- copyfd_io(fd, remotesock, 1, true, &winfo);
+ err = copyfd_io(fd, remotesock, 1, true, &winfo);
} else {
perror("accept");
return 1;
@@ -1120,10 +1121,10 @@ int main_loop_s(int listensock)
if (cfg_input)
close(fd);
- if (--cfg_repeat > 0)
+ if (!err && --cfg_repeat > 0)
goto again;
- return 0;
+ return err;
}
static void init_rng(void)
@@ -1233,7 +1234,7 @@ void xdisconnect(int fd)
else
xerror("bad family");
- strcpy(cmd, "ss -M | grep -q ");
+ strcpy(cmd, "ss -Mnt | grep -q ");
cmdlen = strlen(cmd);
if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
sizeof(cmd) - cmdlen))
@@ -1243,7 +1244,7 @@ void xdisconnect(int fd)
/*
* wait until the pending data is completely flushed and all
- * the MPTCP sockets reached the closed status.
+ * the sockets reached the closed status.
* disconnect will bypass/ignore/drop any pending data.
*/
for (i = 0; ; i += msec_sleep) {
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
index 926b0be87c99..1dc2bd6ee4a5 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
@@ -658,22 +658,26 @@ static void process_one_client(int fd, int pipefd)
do_getsockopts(&s, fd, ret, ret2);
if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
- xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
+ xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64,
+ s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1));
/* be nice when running on top of older kernel */
if (s.pkt_stats_avail) {
if (s.last_sample.mptcpi_bytes_sent != ret2)
- xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
+ xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64
+ ", diff %" PRId64,
s.last_sample.mptcpi_bytes_sent, ret2,
s.last_sample.mptcpi_bytes_sent - ret2);
if (s.last_sample.mptcpi_bytes_received != ret)
- xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
+ xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64
+ ", diff %" PRId64,
s.last_sample.mptcpi_bytes_received, ret,
s.last_sample.mptcpi_bytes_received - ret);
if (s.last_sample.mptcpi_bytes_acked != ret)
- xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
- s.last_sample.mptcpi_bytes_acked, ret2,
- s.last_sample.mptcpi_bytes_acked - ret2);
+ xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64
+ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_acked, ret,
+ s.last_sample.mptcpi_bytes_acked - ret);
}
close(fd);
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index 763402dd1774..234c267dd2aa 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -194,6 +194,13 @@ static int capture_events(int fd, int event_group)
fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_FLAGS) {
+ __u16 flags = *(__u16 *)RTA_DATA(attrs);
+
+ /* only print when present, easier */
+ if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0)
+ fprintf(stderr, ",deny_join_id0:1");
+ }
attrs = RTA_NEXT(attrs, msg_len);
}
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index c5d7af8e8efd..4e966a9e3738 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -196,6 +196,9 @@ make_connection()
is_v6="v4"
fi
+ # set this on the client side only: will not affect the rest
+ ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0
+
# Capture netlink events over the two network namespaces running
# the MPTCP client and server
if [ -z "$client_evts" ]; then
@@ -227,23 +230,28 @@ make_connection()
local client_token
local client_port
local client_serverside
+ local client_nojoin
local server_token
local server_serverside
+ local server_nojoin
client_token=$(mptcp_lib_evts_get_info token "$client_evts")
client_port=$(mptcp_lib_evts_get_info sport "$client_evts")
client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts")
+ client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts")
server_token=$(mptcp_lib_evts_get_info token "$server_evts")
server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts")
+ server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts")
print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1"
- if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
- [ "$server_serverside" = 1 ]
+ if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] &&
+ [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] &&
+ [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ]
then
test_pass
print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
else
- test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
+ test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})"
mptcp_lib_result_print_all_tap
exit 1
fi
Powered by blists - more mailing lists