[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1617800556221136@kroah.com>
Date: Wed, 7 Apr 2021 15:02:37 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org, akpm@...ux-foundation.org,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, jslaby@...e.cz,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: Linux 5.4.110
diff --git a/Makefile b/Makefile
index e037662c369b..b028b5ead5f7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
-SUBLEVEL = 109
+SUBLEVEL = 110
EXTRAVERSION =
NAME = Kleptomaniac Octopus
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 7897d16e0990..727d4b321937 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -7,7 +7,7 @@
#include <linux/bug.h>
#include <asm/cputable.h>
-static inline bool early_cpu_has_feature(unsigned long feature)
+static __always_inline bool early_cpu_has_feature(unsigned long feature)
{
return !!((CPU_FTRS_ALWAYS & feature) ||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
return static_branch_likely(&cpu_feature_keys[i]);
}
#else
-static inline bool cpu_has_feature(unsigned long feature)
+static __always_inline bool cpu_has_feature(unsigned long feature)
{
return early_cpu_has_feature(feature);
}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 80828b95a51f..d956f87fcb09 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -108,37 +108,6 @@
.previous
-/*
- * coprocessor_flush(struct thread_info*, index)
- * a2 a3
- *
- * Save coprocessor registers for coprocessor 'index'.
- * The register values are saved to or loaded from the coprocessor area
- * inside the task_info structure.
- *
- * Note that this function doesn't update the coprocessor_owner information!
- *
- */
-
-ENTRY(coprocessor_flush)
-
- /* reserve 4 bytes on stack to save a0 */
- abi_entry(4)
-
- s32i a0, a1, 0
- movi a0, .Lsave_cp_regs_jump_table
- addx8 a3, a3, a0
- l32i a4, a3, 4
- l32i a3, a3, 0
- add a2, a2, a4
- beqz a3, 1f
- callx0 a3
-1: l32i a0, a1, 0
-
- abi_ret(4)
-
-ENDPROC(coprocessor_flush)
-
/*
* Entry condition:
*
@@ -261,6 +230,39 @@ ENTRY(fast_coprocessor)
ENDPROC(fast_coprocessor)
+ .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ * a2 a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+ /* reserve 4 bytes on stack to save a0 */
+ abi_entry(4)
+
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ callx0 a3
+1: l32i a0, a1, 0
+
+ abi_ret(4)
+
+ENDPROC(coprocessor_flush)
+
.data
ENTRY(coprocessor_owner)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index e0c4ef06ca91..94785083c018 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1663,8 +1663,8 @@ void pm_runtime_get_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
- refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1677,6 +1677,8 @@ void pm_runtime_get_suppliers(struct device *dev)
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
+ unsigned long flags;
+ bool put;
int idx;
idx = device_links_read_lock();
@@ -1685,7 +1687,11 @@ void pm_runtime_put_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
- if (refcount_dec_not_one(&link->rpm_active))
+ spin_lock_irqsave(&dev->power.lock, flags);
+ put = pm_runtime_status_suspended(dev) &&
+ refcount_dec_not_one(&link->rpm_active);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ if (put)
pm_runtime_put(link->supplier);
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e055893fd5c3..5c9e156cd086 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
sizeof(*edev->nh), GFP_KERNEL);
if (!edev->nh) {
ret = -ENOMEM;
+ device_unregister(&edev->dev);
goto err_dev;
}
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0cc746673677..9ee747a85ee4 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct client *client = file->private_data;
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
struct nosy_stats stats;
+ int ret;
switch (cmd) {
case NOSY_IOC_GET_STATS:
@@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
case NOSY_IOC_START:
+ ret = -EBUSY;
spin_lock_irq(client_list_lock);
- list_add_tail(&client->link, &client->lynx->client_list);
+ if (list_empty(&client->link)) {
+ list_add_tail(&client->link, &client->lynx->client_list);
+ ret = 0;
+ }
spin_unlock_irq(client_list_lock);
- return 0;
+ return ret;
case NOSY_IOC_STOP:
spin_lock_irq(client_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6335bd4ae374..fb47ddc6f7f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2123,8 +2123,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t eaddr;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2188,8 +2188,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
int r;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2333,7 +2333,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->start = eaddr + 1;
after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->start - tmp->start;
+ after->offset += (after->start - tmp->start) << PAGE_SHIFT;
after->flags = tmp->flags;
after->bo_va = tmp->bo_va;
list_add(&after->list, &tmp->bo_va->invalids);
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 22164300122d..a2b4463d8480 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan.o
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += dev.o
-can-dev-y += rx-offload.o
-
-can-dev-$(CONFIG_CAN_LEDS) += led.o
-
+obj-y += dev/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
deleted file mode 100644
index 1e0c1a05df82..000000000000
--- a/drivers/net/can/dev.c
+++ /dev/null
@@ -1,1310 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
- * Copyright (C) 2006 Andrey Volkov, Varma Electronics
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@...ndegger.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/can-ml.h>
-#include <linux/can/dev.h>
-#include <linux/can/skb.h>
-#include <linux/can/netlink.h>
-#include <linux/can/led.h>
-#include <linux/of.h>
-#include <net/rtnetlink.h>
-
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@...ndegger.com>");
-
-/* CAN DLC to real data length conversion helpers */
-
-static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
- 8, 12, 16, 20, 24, 32, 48, 64};
-
-/* get data length from can_dlc with sanitized can_dlc */
-u8 can_dlc2len(u8 can_dlc)
-{
- return dlc2len[can_dlc & 0x0F];
-}
-EXPORT_SYMBOL_GPL(can_dlc2len);
-
-static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
- 9, 9, 9, 9, /* 9 - 12 */
- 10, 10, 10, 10, /* 13 - 16 */
- 11, 11, 11, 11, /* 17 - 20 */
- 12, 12, 12, 12, /* 21 - 24 */
- 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
- 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
- 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_len2dlc(u8 len)
-{
- if (unlikely(len > 64))
- return 0xF;
-
- return len2dlc[len];
-}
-EXPORT_SYMBOL_GPL(can_len2dlc);
-
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-#define CAN_CALC_SYNC_SEG 1
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@....felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
-{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_CALC_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) /
- (tseg + CAN_CALC_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
-
- return best_sample_point;
-}
-
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800000)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500000)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
- }
-
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
- }
-
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
-
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
-
- return 0;
-}
-#else /* !CONFIG_CAN_CALC_BITTIMING */
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- netdev_err(dev, "bit-timing calculation not available\n");
- return -EINVAL;
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
-/* Checks the validity of the specified bit-timing parameters prop_seg,
- * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
- * prescaler value brp. You can find more information in the header
- * file linux/can/netlink.h.
- */
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- int tseg1, alltseg;
- u64 brp64;
-
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
-
- brp64 = (u64)priv->clock.freq * (u64)bt->tq;
- if (btc->brp_inc > 1)
- do_div(brp64, btc->brp_inc);
- brp64 += 500000000UL - 1;
- do_div(brp64, 1000000000UL); /* the practicable BRP */
- if (btc->brp_inc > 1)
- brp64 *= btc->brp_inc;
- bt->brp = (u32)brp64;
-
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
- return -EINVAL;
-
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
-
- return 0;
-}
-
-/* Checks the validity of predefined bitrate settings */
-static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int i;
-
- for (i = 0; i < bitrate_const_cnt; i++) {
- if (bt->bitrate == bitrate_const[i])
- break;
- }
-
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
-
- return 0;
-}
-
-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- int err;
-
- /* Depending on the given can_bittiming parameter structure the CAN
- * timing parameters are calculated based on the provided bitrate OR
- * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
- * provided directly which are then checked and fixed up.
- */
- if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
-}
-
-static void can_update_state_error_stats(struct net_device *dev,
- enum can_state new_state)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (new_state <= priv->state)
- return;
-
- switch (new_state) {
- case CAN_STATE_ERROR_WARNING:
- priv->can_stats.error_warning++;
- break;
- case CAN_STATE_ERROR_PASSIVE:
- priv->can_stats.error_passive++;
- break;
- case CAN_STATE_BUS_OFF:
- priv->can_stats.bus_off++;
- break;
- default:
- break;
- }
-}
-
-static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_TX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_TX_PASSIVE;
- default:
- return 0;
- }
-}
-
-static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_RX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_RX_PASSIVE;
- default:
- return 0;
- }
-}
-
-void can_change_state(struct net_device *dev, struct can_frame *cf,
- enum can_state tx_state, enum can_state rx_state)
-{
- struct can_priv *priv = netdev_priv(dev);
- enum can_state new_state = max(tx_state, rx_state);
-
- if (unlikely(new_state == priv->state)) {
- netdev_warn(dev, "%s: oops, state did not change", __func__);
- return;
- }
-
- netdev_dbg(dev, "New error state: %d\n", new_state);
-
- can_update_state_error_stats(dev, new_state);
- priv->state = new_state;
-
- if (!cf)
- return;
-
- if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_BUSOFF;
- return;
- }
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= tx_state >= rx_state ?
- can_tx_state_to_frame(dev, tx_state) : 0;
- cf->data[1] |= tx_state <= rx_state ?
- can_rx_state_to_frame(dev, rx_state) : 0;
-}
-EXPORT_SYMBOL_GPL(can_change_state);
-
-/* Local echo of CAN messages
- *
- * CAN network devices *should* support a local echo functionality
- * (see Documentation/networking/can.rst). To test the handling of CAN
- * interfaces that do not support the local echo both driver types are
- * implemented. In the case that the driver does not support the echo
- * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
- * to perform the echo as a fallback solution.
- */
-static void can_flush_echo_skb(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- int i;
-
- for (i = 0; i < priv->echo_skb_max; i++) {
- if (priv->echo_skb[i]) {
- kfree_skb(priv->echo_skb[i]);
- priv->echo_skb[i] = NULL;
- stats->tx_dropped++;
- stats->tx_aborted_errors++;
- }
- }
-}
-
-/* Put the skb on the stack to be looped backed locally lateron
- *
- * The function is typically called in the start_xmit function
- * of the device driver. The driver must protect access to
- * priv->echo_skb, if necessary.
- */
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- /* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
- (skb->protocol != htons(ETH_P_CAN) &&
- skb->protocol != htons(ETH_P_CANFD))) {
- kfree_skb(skb);
- return;
- }
-
- if (!priv->echo_skb[idx]) {
- skb = can_create_echo_skb(skb);
- if (!skb)
- return;
-
- /* make settings for echo to reduce code in irq context */
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->dev = dev;
-
- /* save this skb for tx interrupt echo handling */
- priv->echo_skb[idx] = skb;
- } else {
- /* locking problem with netif_stop_queue() ?? */
- netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
- kfree_skb(skb);
- }
-}
-EXPORT_SYMBOL_GPL(can_put_echo_skb);
-
-struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (idx >= priv->echo_skb_max) {
- netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
- __func__, idx, priv->echo_skb_max);
- return NULL;
- }
-
- if (priv->echo_skb[idx]) {
- /* Using "struct canfd_frame::len" for the frame
- * length is supported on both CAN and CANFD frames.
- */
- struct sk_buff *skb = priv->echo_skb[idx];
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
-
- /* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
-
- priv->echo_skb[idx] = NULL;
-
- return skb;
- }
-
- return NULL;
-}
-
-/* Get the skb from the stack and loop it back locally
- *
- * The function is typically called when the TX done interrupt
- * is handled in the device driver. The driver must protect
- * access to priv->echo_skb, if necessary.
- */
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct sk_buff *skb;
- u8 len;
-
- skb = __can_get_echo_skb(dev, idx, &len);
- if (!skb)
- return 0;
-
- skb_get(skb);
- if (netif_rx(skb) == NET_RX_SUCCESS)
- dev_consume_skb_any(skb);
- else
- dev_kfree_skb_any(skb);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(can_get_echo_skb);
-
-/* Remove the skb from the stack and free it.
- *
- * The function is typically called when TX failed.
- */
-void can_free_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- if (priv->echo_skb[idx]) {
- dev_kfree_skb_any(priv->echo_skb[idx]);
- priv->echo_skb[idx] = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(can_free_echo_skb);
-
-/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
- struct can_frame *cf;
- int err;
-
- BUG_ON(netif_carrier_ok(dev));
-
- /* No synchronization needed because the device is bus-off and
- * no messages can come in or go out.
- */
- can_flush_echo_skb(dev);
-
- /* send restart message upstream */
- skb = alloc_can_err_skb(dev, &cf);
- if (!skb) {
- err = -ENOMEM;
- goto restart;
- }
- cf->can_id |= CAN_ERR_RESTARTED;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
-
- netif_rx_ni(skb);
-
-restart:
- netdev_dbg(dev, "restarted\n");
- priv->can_stats.restarts++;
-
- /* Now restart the device */
- err = priv->do_set_mode(dev, CAN_MODE_START);
-
- netif_carrier_on(dev);
- if (err)
- netdev_err(dev, "Error %d during restart", err);
-}
-
-static void can_restart_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct can_priv *priv = container_of(dwork, struct can_priv,
- restart_work);
-
- can_restart(priv->dev);
-}
-
-int can_restart_now(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* A manual restart is only permitted if automatic restart is
- * disabled and the device is in the bus-off state
- */
- if (priv->restart_ms)
- return -EINVAL;
- if (priv->state != CAN_STATE_BUS_OFF)
- return -EBUSY;
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
-
- return 0;
-}
-
-/* CAN bus-off
- *
- * This functions should be called when the device goes bus-off to
- * tell the netif layer that no more packets can be sent or received.
- * If enabled, a timer is started to trigger bus-off recovery.
- */
-void can_bus_off(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- netdev_info(dev, "bus-off\n");
-
- netif_carrier_off(dev);
-
- if (priv->restart_ms)
- schedule_delayed_work(&priv->restart_work,
- msecs_to_jiffies(priv->restart_ms));
-}
-EXPORT_SYMBOL_GPL(can_bus_off);
-
-static void can_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_CAN;
- dev->mtu = CAN_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct can_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cf = skb_put_zero(skb, sizeof(struct can_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_skb);
-
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct canfd_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_canfd_skb);
-
-struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = alloc_can_skb(dev, cf);
- if (unlikely(!skb))
- return NULL;
-
- (*cf)->can_id = CAN_ERR_FLAG;
- (*cf)->can_dlc = CAN_ERR_DLC;
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_err_skb);
-
-/* Allocate and setup space for the CAN network device */
-struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
- unsigned int txqs, unsigned int rxqs)
-{
- struct net_device *dev;
- struct can_priv *priv;
- int size;
-
- /* We put the driver's priv, the CAN mid layer priv and the
- * echo skb into the netdevice's priv. The memory layout for
- * the netdev_priv is like this:
- *
- * +-------------------------+
- * | driver's priv |
- * +-------------------------+
- * | struct can_ml_priv |
- * +-------------------------+
- * | array of struct sk_buff |
- * +-------------------------+
- */
-
- size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
-
- if (echo_skb_max)
- size = ALIGN(size, sizeof(struct sk_buff *)) +
- echo_skb_max * sizeof(struct sk_buff *);
-
- dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
- txqs, rxqs);
- if (!dev)
- return NULL;
-
- priv = netdev_priv(dev);
- priv->dev = dev;
-
- dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
-
- if (echo_skb_max) {
- priv->echo_skb_max = echo_skb_max;
- priv->echo_skb = (void *)priv +
- (size - echo_skb_max * sizeof(struct sk_buff *));
- }
-
- priv->state = CAN_STATE_STOPPED;
-
- INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(alloc_candev_mqs);
-
-/* Free space of the CAN network device */
-void free_candev(struct net_device *dev)
-{
- free_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(free_candev);
-
-/* changing MTU and control mode for CAN/CANFD devices */
-int can_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Do not allow changing the MTU while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* allow change of MTU according to the CANFD ability of the device */
- switch (new_mtu) {
- case CAN_MTU:
- /* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
- return -EINVAL;
-
- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- break;
-
- case CANFD_MTU:
- /* check for potential CANFD ability */
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
- return -EINVAL;
-
- priv->ctrlmode |= CAN_CTRLMODE_FD;
- break;
-
- default:
- return -EINVAL;
- }
-
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_change_mtu);
-
-/* Common open function when the device gets opened.
- *
- * This function should be called in the open function of the device
- * driver.
- */
-int open_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (!priv->bittiming.bitrate) {
- netdev_err(dev, "bit-timing not yet defined\n");
- return -EINVAL;
- }
-
- /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
- if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
- netdev_err(dev, "incorrect/missing data bit-timing\n");
- return -EINVAL;
- }
-
- /* Switch carrier on if device was stopped while in bus-off state */
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(open_candev);
-
-#ifdef CONFIG_OF
-/* Common function that can be used to understand the limitation of
- * a transceiver when it provides no means to determine these limitations
- * at runtime.
- */
-void of_can_transceiver(struct net_device *dev)
-{
- struct device_node *dn;
- struct can_priv *priv = netdev_priv(dev);
- struct device_node *np = dev->dev.parent->of_node;
- int ret;
-
- dn = of_get_child_by_name(np, "can-transceiver");
- if (!dn)
- return;
-
- ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
- of_node_put(dn);
- if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
- netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
-}
-EXPORT_SYMBOL_GPL(of_can_transceiver);
-#endif
-
-/* Common close function for cleanup before the device gets closed.
- *
- * This function should be called in the close function of the device
- * driver.
- */
-void close_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_flush_echo_skb(dev);
-}
-EXPORT_SYMBOL_GPL(close_candev);
-
-/* CAN netlink interface */
-static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
- [IFLA_CAN_STATE] = { .type = NLA_U32 },
- [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
- [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
- [IFLA_CAN_RESTART] = { .type = NLA_U32 },
- [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
- [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
- [IFLA_CAN_DATA_BITTIMING]
- = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
-};
-
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- bool is_can_fd = false;
-
- /* Make sure that valid CAN FD configurations always consist of
- * - nominal/arbitration bittiming
- * - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
- */
-
- if (!data)
- return 0;
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
- }
-
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- struct can_priv *priv = netdev_priv(dev);
- int err;
-
- /* We need synchronization with dev->stop() */
- ASSERT_RTNL();
-
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->bittiming_const && !priv->do_set_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt,
- priv->bittiming_const,
- priv->bitrate_const,
- priv->bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->bittiming, &bt, sizeof(bt));
-
- if (priv->do_set_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = priv->ctrlmode_static;
- maskedflags = cm->flags & cm->mask;
-
- /* check whether provided bits are allowed to be passed */
- if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
-
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
-
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
- return -EOPNOTSUPP;
-
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
-
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
- else
- dev->mtu = CAN_MTU;
- }
-
- if (data[IFLA_CAN_RESTART_MS]) {
- /* Do not allow changing restart delay while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
- }
-
- if (data[IFLA_CAN_RESTART]) {
- /* Do not allow a restart while not running */
- if (!(dev->flags & IFF_UP))
- return -EINVAL;
- err = can_restart_now(dev);
- if (err)
- return err;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_TERMINATION]) {
- const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
- const unsigned int num_term = priv->termination_const_cnt;
- unsigned int i;
-
- if (!priv->do_set_termination)
- return -EOPNOTSUPP;
-
- /* check whether given value is supported by the interface */
- for (i = 0; i < num_term; i++) {
- if (termval == priv->termination_const[i])
- break;
- }
- if (i >= num_term)
- return -EINVAL;
-
- /* Finally, set the termination value */
- err = priv->do_set_termination(dev, termval);
- if (err)
- return err;
-
- priv->termination = termval;
- }
-
- return 0;
-}
-
-static size_t can_get_size(const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- size_t size = 0;
-
- if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- if (priv->termination_const) {
- size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
- size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
- priv->termination_const_cnt);
- }
- if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
- size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
-
- return size;
-}
-
-static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec = { };
- enum can_state state = priv->state;
-
- if (priv->do_get_state)
- priv->do_get_state(dev, &state);
-
- if ((priv->bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
-
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
-
- nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
- nla_put_u32(skb, IFLA_CAN_STATE, state) ||
- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
-
- (priv->do_get_berr_counter &&
- !priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
-
- (priv->data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
-
- (priv->data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
-
- (priv->termination_const &&
- (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
- nla_put(skb, IFLA_CAN_TERMINATION_CONST,
- sizeof(*priv->termination_const) *
- priv->termination_const_cnt,
- priv->termination_const))) ||
-
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
-
- (priv->data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
-
- (nla_put(skb, IFLA_CAN_BITRATE_MAX,
- sizeof(priv->bitrate_max),
- &priv->bitrate_max))
- )
-
- return -EMSGSIZE;
-
- return 0;
-}
-
-static size_t can_get_xstats_size(const struct net_device *dev)
-{
- return sizeof(struct can_device_stats);
-}
-
-static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (nla_put(skb, IFLA_INFO_XSTATS,
- sizeof(priv->can_stats), &priv->can_stats))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
-
-static void can_dellink(struct net_device *dev, struct list_head *head)
-{
-}
-
-static struct rtnl_link_ops can_link_ops __read_mostly = {
- .kind = "can",
- .netns_refund = true,
- .maxtype = IFLA_CAN_MAX,
- .policy = can_policy,
- .setup = can_setup,
- .validate = can_validate,
- .newlink = can_newlink,
- .changelink = can_changelink,
- .dellink = can_dellink,
- .get_size = can_get_size,
- .fill_info = can_fill_info,
- .get_xstats_size = can_get_xstats_size,
- .fill_xstats = can_fill_xstats,
-};
-
-/* Register the CAN network device */
-int register_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Ensure termination_const, termination_const_cnt and
- * do_set_termination consistency. All must be either set or
- * unset.
- */
- if ((!priv->termination_const != !priv->termination_const_cnt) ||
- (!priv->termination_const != !priv->do_set_termination))
- return -EINVAL;
-
- if (!priv->bitrate_const != !priv->bitrate_const_cnt)
- return -EINVAL;
-
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
- return -EINVAL;
-
- dev->rtnl_link_ops = &can_link_ops;
- netif_carrier_off(dev);
-
- return register_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(register_candev);
-
-/* Unregister the CAN network device */
-void unregister_candev(struct net_device *dev)
-{
- unregister_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(unregister_candev);
-
-/* Test if a network device is a candev based device
- * and return the can_priv* if so.
- */
-struct can_priv *safe_candev_priv(struct net_device *dev)
-{
- if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
- return NULL;
-
- return netdev_priv(dev);
-}
-EXPORT_SYMBOL_GPL(safe_candev_priv);
-
-static __init int can_dev_init(void)
-{
- int err;
-
- can_led_notifier_init();
-
- err = rtnl_link_register(&can_link_ops);
- if (!err)
- pr_info(MOD_DESC "\n");
-
- return err;
-}
-module_init(can_dev_init);
-
-static __exit void can_dev_exit(void)
-{
- rtnl_link_unregister(&can_link_ops);
-
- can_led_notifier_exit();
-}
-module_exit(can_dev_exit);
-
-MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
new file mode 100644
index 000000000000..cba92e6bcf6f
--- /dev/null
+++ b/drivers/net/can/dev/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+can-dev-y += dev.o
+can-dev-y += rx-offload.o
+
+can-dev-$(CONFIG_CAN_LEDS) += led.o
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
new file mode 100644
index 000000000000..322da89cb9c6
--- /dev/null
+++ b/drivers/net/can/dev/dev.c
@@ -0,0 +1,1312 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@...ndegger.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/can-ml.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+#include <linux/can/netlink.h>
+#include <linux/can/led.h>
+#include <linux/of.h>
+#include <net/rtnetlink.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@...ndegger.com>");
+
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64};
+
+/* get data length from can_dlc with sanitized can_dlc */
+u8 can_dlc2len(u8 can_dlc)
+{
+ return dlc2len[can_dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_dlc2len);
+
+static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+ 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_len2dlc(u8 len)
+{
+ if (unlikely(len > 64))
+ return 0xF;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_len2dlc);
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+#define CAN_CALC_SYNC_SEG 1
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@....felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ unsigned int sample_point_nominal, unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_CALC_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) /
+ (tseg + CAN_CALC_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800000)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500000)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error > best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ netdev_err(dev,
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EDOM;
+ }
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max) {
+ bt->sjw = 1;
+ } else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
+
+ return 0;
+}
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ netdev_err(dev, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+/* Checks the validity of the specified bit-timing parameters prop_seg,
+ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+ * prescaler value brp. You can find more information in the header
+ * file linux/can/netlink.h.
+ */
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int tseg1, alltseg;
+ u64 brp64;
+
+ tseg1 = bt->prop_seg + bt->phase_seg1;
+ if (!bt->sjw)
+ bt->sjw = 1;
+ if (bt->sjw > btc->sjw_max ||
+ tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
+ bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
+ return -ERANGE;
+
+ brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+ if (btc->brp_inc > 1)
+ do_div(brp64, btc->brp_inc);
+ brp64 += 500000000UL - 1;
+ do_div(brp64, 1000000000UL); /* the practicable BRP */
+ if (btc->brp_inc > 1)
+ brp64 *= btc->brp_inc;
+ bt->brp = (u32)brp64;
+
+ if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ return -EINVAL;
+
+ alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
+ bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
+ bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+
+ return 0;
+}
+
+/* Checks the validity of predefined bitrate settings */
+static int
+can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < bitrate_const_cnt; i++) {
+ if (bt->bitrate == bitrate_const[i])
+ break;
+ }
+
+ if (i >= priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ int err;
+
+ /* Depending on the given can_bittiming parameter structure the CAN
+ * timing parameters are calculated based on the provided bitrate OR
+ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+ * provided directly which are then checked and fixed up.
+ */
+ if (!bt->tq && bt->bitrate && btc)
+ err = can_calc_bittiming(dev, bt, btc);
+ else if (bt->tq && !bt->bitrate && btc)
+ err = can_fixup_bittiming(dev, bt, btc);
+ else if (!bt->tq && bt->bitrate && bitrate_const)
+ err = can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static void can_update_state_error_stats(struct net_device *dev,
+ enum can_state new_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (new_state <= priv->state)
+ return;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ priv->can_stats.error_warning++;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can_stats.error_passive++;
+ break;
+ case CAN_STATE_BUS_OFF:
+ priv->can_stats.bus_off++;
+ break;
+ default:
+ break;
+ }
+}
+
+static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_TX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_TX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_RX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_RX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ enum can_state new_state = max(tx_state, rx_state);
+
+ if (unlikely(new_state == priv->state)) {
+ netdev_warn(dev, "%s: oops, state did not change", __func__);
+ return;
+ }
+
+ netdev_dbg(dev, "New error state: %d\n", new_state);
+
+ can_update_state_error_stats(dev, new_state);
+ priv->state = new_state;
+
+ if (!cf)
+ return;
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= tx_state >= rx_state ?
+ can_tx_state_to_frame(dev, tx_state) : 0;
+ cf->data[1] |= tx_state <= rx_state ?
+ can_rx_state_to_frame(dev, rx_state) : 0;
+}
+EXPORT_SYMBOL_GPL(can_change_state);
+
+/* Local echo of CAN messages
+ *
+ * CAN network devices *should* support a local echo functionality
+ * (see Documentation/networking/can.rst). To test the handling of CAN
+ * interfaces that do not support the local echo both driver types are
+ * implemented. In the case that the driver does not support the echo
+ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+ * to perform the echo as a fallback solution.
+ */
+static void can_flush_echo_skb(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ int i;
+
+ for (i = 0; i < priv->echo_skb_max; i++) {
+ if (priv->echo_skb[i]) {
+ kfree_skb(priv->echo_skb[i]);
+ priv->echo_skb[i] = NULL;
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+ }
+ }
+}
+
+/* Put the skb on the stack to be looped backed locally lateron
+ *
+ * The function is typically called in the start_xmit function
+ * of the device driver. The driver must protect access to
+ * priv->echo_skb, if necessary.
+ */
+void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+ (skb->protocol != htons(ETH_P_CAN) &&
+ skb->protocol != htons(ETH_P_CANFD))) {
+ kfree_skb(skb);
+ return;
+ }
+
+ if (!priv->echo_skb[idx]) {
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
+
+ /* make settings for echo to reduce code in irq context */
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = dev;
+
+ /* save this skb for tx interrupt echo handling */
+ priv->echo_skb[idx] = skb;
+ } else {
+ /* locking problem with netif_stop_queue() ?? */
+ netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(can_put_echo_skb);
+
+struct sk_buff *
+__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return NULL;
+ }
+
+ if (priv->echo_skb[idx]) {
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
+ priv->echo_skb[idx] = NULL;
+
+ return skb;
+ }
+
+ return NULL;
+}
+
+/* Get the skb from the stack and loop it back locally
+ *
+ * The function is typically called when the TX done interrupt
+ * is handled in the device driver. The driver must protect
+ * access to priv->echo_skb, if necessary.
+ */
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+{
+ struct sk_buff *skb;
+ u8 len;
+
+ skb = __can_get_echo_skb(dev, idx, &len);
+ if (!skb)
+ return 0;
+
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_get_echo_skb);
+
+/* Remove the skb from the stack and free it.
+ *
+ * The function is typically called when TX failed.
+ */
+void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ if (priv->echo_skb[idx]) {
+ dev_kfree_skb_any(priv->echo_skb[idx]);
+ priv->echo_skb[idx] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(can_free_echo_skb);
+
+/* CAN device restart for bus-off recovery */
+static void can_restart(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int err;
+
+ BUG_ON(netif_carrier_ok(dev));
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+ */
+ can_flush_echo_skb(dev);
+
+ /* send restart message upstream */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb) {
+ err = -ENOMEM;
+ goto restart;
+ }
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ netif_rx_ni(skb);
+
+restart:
+ netdev_dbg(dev, "restarted\n");
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+ err = priv->do_set_mode(dev, CAN_MODE_START);
+
+ netif_carrier_on(dev);
+ if (err)
+ netdev_err(dev, "Error %d during restart", err);
+}
+
+static void can_restart_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct can_priv *priv = container_of(dwork, struct can_priv,
+ restart_work);
+
+ can_restart(priv->dev);
+}
+
+int can_restart_now(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* A manual restart is only permitted if automatic restart is
+ * disabled and the device is in the bus-off state
+ */
+ if (priv->restart_ms)
+ return -EINVAL;
+ if (priv->state != CAN_STATE_BUS_OFF)
+ return -EBUSY;
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_restart(dev);
+
+ return 0;
+}
+
+/* CAN bus-off
+ *
+ * This functions should be called when the device goes bus-off to
+ * tell the netif layer that no more packets can be sent or received.
+ * If enabled, a timer is started to trigger bus-off recovery.
+ */
+void can_bus_off(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ netdev_info(dev, "bus-off\n");
+
+ netif_carrier_off(dev);
+
+ if (priv->restart_ms)
+ schedule_delayed_work(&priv->restart_work,
+ msecs_to_jiffies(priv->restart_ms));
+}
+EXPORT_SYMBOL_GPL(can_bus_off);
+
+static void can_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = CAN_MTU;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_HW_CSUM;
+}
+
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct can_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_skb);
+
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
+struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, cf);
+ if (unlikely(!skb))
+ return NULL;
+
+ (*cf)->can_id = CAN_ERR_FLAG;
+ (*cf)->can_dlc = CAN_ERR_DLC;
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
+/* Allocate and setup space for the CAN network device */
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
+{
+ struct can_ml_priv *can_ml;
+ struct net_device *dev;
+ struct can_priv *priv;
+ int size;
+
+ /* We put the driver's priv, the CAN mid layer priv and the
+ * echo skb into the netdevice's priv. The memory layout for
+ * the netdev_priv is like this:
+ *
+ * +-------------------------+
+ * | driver's priv |
+ * +-------------------------+
+ * | struct can_ml_priv |
+ * +-------------------------+
+ * | array of struct sk_buff |
+ * +-------------------------+
+ */
+
+ size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
+
+ if (echo_skb_max)
+ size = ALIGN(size, sizeof(struct sk_buff *)) +
+ echo_skb_max * sizeof(struct sk_buff *);
+
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+
+ can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
+
+ if (echo_skb_max) {
+ priv->echo_skb_max = echo_skb_max;
+ priv->echo_skb = (void *)priv +
+ (size - echo_skb_max * sizeof(struct sk_buff *));
+ }
+
+ priv->state = CAN_STATE_STOPPED;
+
+ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
+
+/* Free space of the CAN network device */
+void free_candev(struct net_device *dev)
+{
+ free_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(free_candev);
+
+/* changing MTU and control mode for CAN/CANFD devices */
+int can_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* allow change of MTU according to the CANFD ability of the device */
+ switch (new_mtu) {
+ case CAN_MTU:
+ /* 'CANFD-only' controllers can not switch to CAN_MTU */
+ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+ return -EINVAL;
+
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ break;
+
+ case CANFD_MTU:
+ /* check for potential CANFD ability */
+ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ return -EINVAL;
+
+ priv->ctrlmode |= CAN_CTRLMODE_FD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_change_mtu);
+
+/* Common open function when the device gets opened.
+ *
+ * This function should be called in the open function of the device
+ * driver.
+ */
+int open_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (!priv->bittiming.bitrate) {
+ netdev_err(dev, "bit-timing not yet defined\n");
+ return -EINVAL;
+ }
+
+ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+ (!priv->data_bittiming.bitrate ||
+ priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ netdev_err(dev, "incorrect/missing data bit-timing\n");
+ return -EINVAL;
+ }
+
+ /* Switch carrier on if device was stopped while in bus-off state */
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(open_candev);
+
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+ struct device_node *dn;
+ struct can_priv *priv = netdev_priv(dev);
+ struct device_node *np = dev->dev.parent->of_node;
+ int ret;
+
+ dn = of_get_child_by_name(np, "can-transceiver");
+ if (!dn)
+ return;
+
+ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ of_node_put(dn);
+ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
+/* Common close function for cleanup before the device gets closed.
+ *
+ * This function should be called in the close function of the device
+ * driver.
+ */
+void close_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_flush_echo_skb(dev);
+}
+EXPORT_SYMBOL_GPL(close_candev);
+
+/* CAN netlink interface */
+static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ [IFLA_CAN_STATE] = { .type = NLA_U32 },
+ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING]
+ = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+};
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ bool is_can_fd = false;
+
+ /* Make sure that valid CAN FD configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_FD set
+ */
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ }
+
+ if (is_can_fd) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic;
+ u32 maskedflags;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = priv->ctrlmode_static;
+ maskedflags = cm->flags & cm->mask;
+
+ /* check whether provided bits are allowed to be passed */
+ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+ return -EOPNOTSUPP;
+
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ /* make sure static options are provided by configuration */
+ if ((maskedflags & ctrlstatic) != ctrlstatic)
+ return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+ else
+ dev->mtu = CAN_MTU;
+ }
+
+ if (data[IFLA_CAN_RESTART_MS]) {
+ /* Do not allow changing restart delay while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ }
+
+ if (data[IFLA_CAN_RESTART]) {
+ /* Do not allow a restart while not running */
+ if (!(dev->flags & IFF_UP))
+ return -EINVAL;
+ err = can_restart_now(dev);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming dbt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * data_bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+ sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt,
+ priv->data_bittiming_const,
+ priv->data_bitrate_const,
+ priv->data_bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+
+ if (priv->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_TERMINATION]) {
+ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+ const unsigned int num_term = priv->termination_const_cnt;
+ unsigned int i;
+
+ if (!priv->do_set_termination)
+ return -EOPNOTSUPP;
+
+ /* check whether given value is supported by the interface */
+ for (i = 0; i < num_term; i++) {
+ if (termval == priv->termination_const[i])
+ break;
+ }
+ if (i >= num_term)
+ return -EINVAL;
+
+ /* Finally, set the termination value */
+ err = priv->do_set_termination(dev, termval);
+ if (err)
+ return err;
+
+ priv->termination = termval;
+ }
+
+ return 0;
+}
+
+static size_t can_get_size(const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ size_t size = 0;
+
+ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ if (priv->termination_const) {
+ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+ priv->termination_const_cnt);
+ }
+ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt);
+ if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt);
+ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+
+ return size;
+}
+
+static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ struct can_berr_counter bec = { };
+ enum can_state state = priv->state;
+
+ if (priv->do_get_state)
+ priv->do_get_state(dev, &state);
+
+ if ((priv->bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_BITTIMING,
+ sizeof(priv->bittiming), &priv->bittiming)) ||
+
+ (priv->bittiming_const &&
+ nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+ sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+
+ nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
+ (priv->do_get_berr_counter &&
+ !priv->do_get_berr_counter(dev, &bec) &&
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+ (priv->data_bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+ sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+
+ (priv->data_bittiming_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ sizeof(*priv->data_bittiming_const),
+ priv->data_bittiming_const)) ||
+
+ (priv->termination_const &&
+ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+ sizeof(*priv->termination_const) *
+ priv->termination_const_cnt,
+ priv->termination_const))) ||
+
+ (priv->bitrate_const &&
+ nla_put(skb, IFLA_CAN_BITRATE_CONST,
+ sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt,
+ priv->bitrate_const)) ||
+
+ (priv->data_bitrate_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt,
+ priv->data_bitrate_const)) ||
+
+ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+ sizeof(priv->bitrate_max),
+ &priv->bitrate_max))
+ )
+
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t can_get_xstats_size(const struct net_device *dev)
+{
+ return sizeof(struct can_device_stats);
+}
+
+static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (nla_put(skb, IFLA_INFO_XSTATS,
+ sizeof(priv->can_stats), &priv->can_stats))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int can_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+}
+
+static struct rtnl_link_ops can_link_ops __read_mostly = {
+ .kind = "can",
+ .netns_refund = true,
+ .maxtype = IFLA_CAN_MAX,
+ .policy = can_policy,
+ .setup = can_setup,
+ .validate = can_validate,
+ .newlink = can_newlink,
+ .changelink = can_changelink,
+ .dellink = can_dellink,
+ .get_size = can_get_size,
+ .fill_info = can_fill_info,
+ .get_xstats_size = can_get_xstats_size,
+ .fill_xstats = can_fill_xstats,
+};
+
+/* Register the CAN network device */
+int register_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Ensure termination_const, termination_const_cnt and
+ * do_set_termination consistency. All must be either set or
+ * unset.
+ */
+ if ((!priv->termination_const != !priv->termination_const_cnt) ||
+ (!priv->termination_const != !priv->do_set_termination))
+ return -EINVAL;
+
+ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ return -EINVAL;
+
+ dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
+ return register_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(register_candev);
+
+/* Unregister the CAN network device */
+void unregister_candev(struct net_device *dev)
+{
+ unregister_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_candev);
+
+/* Test if a network device is a candev based device
+ * and return the can_priv* if so.
+ */
+struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
+ return NULL;
+
+ return netdev_priv(dev);
+}
+EXPORT_SYMBOL_GPL(safe_candev_priv);
+
+static __init int can_dev_init(void)
+{
+ int err;
+
+ can_led_notifier_init();
+
+ err = rtnl_link_register(&can_link_ops);
+ if (!err)
+ pr_info(MOD_DESC "\n");
+
+ return err;
+}
+module_init(can_dev_init);
+
+static __exit void can_dev_exit(void)
+{
+ rtnl_link_unregister(&can_link_ops);
+
+ can_led_notifier_exit();
+}
+module_exit(can_dev_exit);
+
+MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
new file mode 100644
index 000000000000..7e75a87a8a6a
--- /dev/null
+++ b/drivers/net/can/dev/rx-offload.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@...gutronix.de>
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+
+struct can_rx_offload_cb {
+ u32 timestamp;
+};
+
+static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
+
+ return (struct can_rx_offload_cb *)skb->cb;
+}
+
+static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
+{
+ if (offload->inc)
+ return a <= b;
+ else
+ return a >= b;
+}
+
+static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+{
+ if (offload->inc)
+ return (*val)++;
+ else
+ return (*val)--;
+}
+
+static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
+{
+ struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ int work_done = 0;
+
+ while ((work_done < quota) &&
+ (skb = skb_dequeue(&offload->skb_queue))) {
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ work_done++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+ }
+
+ if (work_done < quota) {
+ napi_complete_done(napi, work_done);
+
+ /* Check if there was another interrupt */
+ if (!skb_queue_empty(&offload->skb_queue))
+ napi_reschedule(&offload->napi);
+ }
+
+ can_led_event(offload->dev, CAN_LED_EVENT_RX);
+
+ return work_done;
+}
+
+static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+ int (*compare)(struct sk_buff *a, struct sk_buff *b))
+{
+ struct sk_buff *pos, *insert = NULL;
+
+ skb_queue_reverse_walk(head, pos) {
+ const struct can_rx_offload_cb *cb_pos, *cb_new;
+
+ cb_pos = can_rx_offload_get_cb(pos);
+ cb_new = can_rx_offload_get_cb(new);
+
+ netdev_dbg(new->dev,
+ "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
+ __func__,
+ cb_pos->timestamp, cb_new->timestamp,
+ cb_new->timestamp - cb_pos->timestamp,
+ skb_queue_len(head));
+
+ if (compare(pos, new) < 0)
+ continue;
+ insert = pos;
+ break;
+ }
+ if (!insert)
+ __skb_queue_head(head, new);
+ else
+ __skb_queue_after(head, insert, new);
+}
+
+static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
+{
+ const struct can_rx_offload_cb *cb_a, *cb_b;
+
+ cb_a = can_rx_offload_get_cb(a);
+ cb_b = can_rx_offload_get_cb(b);
+
+ /* Substract two u32 and return result as int, to keep
+ * difference steady around the u32 overflow.
+ */
+ return cb_b->timestamp - cb_a->timestamp;
+}
+
+/**
+ * can_rx_offload_offload_one() - Read one CAN frame from HW
+ * @offload: pointer to rx_offload context
+ * @n: number of mailbox to read
+ *
+ * The task of this function is to read a CAN frame from mailbox @n
+ * from the device and return the mailbox's content as a struct
+ * sk_buff.
+ *
+ * If the struct can_rx_offload::skb_queue exceeds the maximal queue
+ * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
+ * allocated, the mailbox contents is discarded by reading it into an
+ * overflow buffer. This way the mailbox is marked as free by the
+ * driver.
+ *
+ * Return: A pointer to skb containing the CAN frame on success.
+ *
+ * NULL if the mailbox @n is empty.
+ *
+ * ERR_PTR() in case of an error
+ */
+static struct sk_buff *
+can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+{
+ struct sk_buff *skb = NULL, *skb_error = NULL;
+ struct can_rx_offload_cb *cb;
+ struct can_frame *cf;
+ int ret;
+
+ if (likely(skb_queue_len(&offload->skb_queue) <
+ offload->skb_queue_len_max)) {
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (unlikely(!skb))
+ skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
+ } else {
+ skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
+ }
+
+ /* If queue is full or skb not available, drop by reading into
+ * overflow buffer.
+ */
+ if (unlikely(skb_error)) {
+ struct can_frame cf_overflow;
+ u32 timestamp;
+
+ ret = offload->mailbox_read(offload, &cf_overflow,
+ ×tamp, n);
+
+ /* Mailbox was empty. */
+ if (unlikely(!ret))
+ return NULL;
+
+ /* Mailbox has been read and we're dropping it or
+ * there was a problem reading the mailbox.
+ *
+ * Increment error counters in any case.
+ */
+ offload->dev->stats.rx_dropped++;
+ offload->dev->stats.rx_fifo_errors++;
+
+ /* There was a problem reading the mailbox, propagate
+ * error value.
+ */
+ if (unlikely(ret < 0))
+ return ERR_PTR(ret);
+
+ return skb_error;
+ }
+
+ cb = can_rx_offload_get_cb(skb);
+ ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
+
+ /* Mailbox was empty. */
+ if (unlikely(!ret)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ /* There was a problem reading the mailbox, propagate error value. */
+ if (unlikely(ret < 0)) {
+ kfree_skb(skb);
+
+ offload->dev->stats.rx_dropped++;
+ offload->dev->stats.rx_fifo_errors++;
+
+ return ERR_PTR(ret);
+ }
+
+ /* Mailbox was read. */
+ return skb;
+}
+
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
+{
+ struct sk_buff_head skb_queue;
+ unsigned int i;
+
+ __skb_queue_head_init(&skb_queue);
+
+ for (i = offload->mb_first;
+ can_rx_offload_le(offload, i, offload->mb_last);
+ can_rx_offload_inc(offload, &i)) {
+ struct sk_buff *skb;
+
+ if (!(pending & BIT_ULL(i)))
+ continue;
+
+ skb = can_rx_offload_offload_one(offload, i);
+ if (IS_ERR_OR_NULL(skb))
+ continue;
+
+ __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
+ }
+
+ if (!skb_queue_empty(&skb_queue)) {
+ unsigned long flags;
+ u32 queue_len;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ if ((queue_len = skb_queue_len(&offload->skb_queue)) >
+ (offload->skb_queue_len_max / 8))
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ can_rx_offload_schedule(offload);
+ }
+
+ return skb_queue_len(&skb_queue);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
+
+int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
+{
+ struct sk_buff *skb;
+ int received = 0;
+
+ while (1) {
+ skb = can_rx_offload_offload_one(offload, 0);
+ if (IS_ERR(skb))
+ continue;
+ if (!skb)
+ break;
+
+ skb_queue_tail(&offload->skb_queue, skb);
+ received++;
+ }
+
+ if (received)
+ can_rx_offload_schedule(offload);
+
+ return received;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct can_rx_offload_cb *cb;
+ unsigned long flags;
+
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max) {
+ dev_kfree_skb_any(skb);
+ return -ENOBUFS;
+ }
+
+ cb = can_rx_offload_get_cb(skb);
+ cb->timestamp = timestamp;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ can_rx_offload_schedule(offload);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp)
+{
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ u8 len;
+ int err;
+
+ skb = __can_get_echo_skb(dev, idx, &len);
+ if (!skb)
+ return 0;
+
+ err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ if (err) {
+ stats->rx_errors++;
+ stats->tx_fifo_errors++;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb)
+{
+ if (skb_queue_len(&offload->skb_queue) >
+ offload->skb_queue_len_max) {
+ dev_kfree_skb_any(skb);
+ return -ENOBUFS;
+ }
+
+ skb_queue_tail(&offload->skb_queue, skb);
+ can_rx_offload_schedule(offload);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+
+static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+{
+ offload->dev = dev;
+
+ /* Limit queue len to 4x the weight (rounted to next power of two) */
+ offload->skb_queue_len_max = 2 << fls(weight);
+ offload->skb_queue_len_max *= 4;
+ skb_queue_head_init(&offload->skb_queue);
+
+ can_rx_offload_reset(offload);
+ netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+
+ dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
+ __func__, offload->skb_queue_len_max);
+
+ return 0;
+}
+
+int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
+{
+ unsigned int weight;
+
+ if (offload->mb_first > BITS_PER_LONG_LONG ||
+ offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
+ return -EINVAL;
+
+ if (offload->mb_first < offload->mb_last) {
+ offload->inc = true;
+ weight = offload->mb_last - offload->mb_first;
+ } else {
+ offload->inc = false;
+ weight = offload->mb_first - offload->mb_last;
+ }
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
+
+int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+{
+ if (!offload->mailbox_read)
+ return -EINVAL;
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+
+void can_rx_offload_enable(struct can_rx_offload *offload)
+{
+ can_rx_offload_reset(offload);
+ napi_enable(&offload->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_enable);
+
+void can_rx_offload_del(struct can_rx_offload *offload)
+{
+ netif_napi_del(&offload->napi);
+ skb_queue_purge(&offload->skb_queue);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_del);
+
+void can_rx_offload_reset(struct can_rx_offload *offload)
+{
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_reset);
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 32cb479fe6ac..0d66582bd356 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -88,7 +88,7 @@
#define TCAN4X5X_MRAM_START 0x8000
#define TCAN4X5X_MCAN_OFFSET 0x1000
-#define TCAN4X5X_MAX_REGISTER 0x8fff
+#define TCAN4X5X_MAX_REGISTER 0x8ffc
#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
#define TCAN4X5X_SET_ALL_INT 0xffffffff
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
deleted file mode 100644
index 7e75a87a8a6a..000000000000
--- a/drivers/net/can/rx-offload.c
+++ /dev/null
@@ -1,395 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@...gutronix.de>
- */
-
-#include <linux/can/dev.h>
-#include <linux/can/rx-offload.h>
-
-struct can_rx_offload_cb {
- u32 timestamp;
-};
-
-static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
-{
- BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
-
- return (struct can_rx_offload_cb *)skb->cb;
-}
-
-static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
-{
- if (offload->inc)
- return a <= b;
- else
- return a >= b;
-}
-
-static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
-{
- if (offload->inc)
- return (*val)++;
- else
- return (*val)--;
-}
-
-static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
-{
- struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
- struct net_device *dev = offload->dev;
- struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
- int work_done = 0;
-
- while ((work_done < quota) &&
- (skb = skb_dequeue(&offload->skb_queue))) {
- struct can_frame *cf = (struct can_frame *)skb->data;
-
- work_done++;
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_receive_skb(skb);
- }
-
- if (work_done < quota) {
- napi_complete_done(napi, work_done);
-
- /* Check if there was another interrupt */
- if (!skb_queue_empty(&offload->skb_queue))
- napi_reschedule(&offload->napi);
- }
-
- can_led_event(offload->dev, CAN_LED_EVENT_RX);
-
- return work_done;
-}
-
-static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
- int (*compare)(struct sk_buff *a, struct sk_buff *b))
-{
- struct sk_buff *pos, *insert = NULL;
-
- skb_queue_reverse_walk(head, pos) {
- const struct can_rx_offload_cb *cb_pos, *cb_new;
-
- cb_pos = can_rx_offload_get_cb(pos);
- cb_new = can_rx_offload_get_cb(new);
-
- netdev_dbg(new->dev,
- "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
- __func__,
- cb_pos->timestamp, cb_new->timestamp,
- cb_new->timestamp - cb_pos->timestamp,
- skb_queue_len(head));
-
- if (compare(pos, new) < 0)
- continue;
- insert = pos;
- break;
- }
- if (!insert)
- __skb_queue_head(head, new);
- else
- __skb_queue_after(head, insert, new);
-}
-
-static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
-{
- const struct can_rx_offload_cb *cb_a, *cb_b;
-
- cb_a = can_rx_offload_get_cb(a);
- cb_b = can_rx_offload_get_cb(b);
-
- /* Substract two u32 and return result as int, to keep
- * difference steady around the u32 overflow.
- */
- return cb_b->timestamp - cb_a->timestamp;
-}
-
-/**
- * can_rx_offload_offload_one() - Read one CAN frame from HW
- * @offload: pointer to rx_offload context
- * @n: number of mailbox to read
- *
- * The task of this function is to read a CAN frame from mailbox @n
- * from the device and return the mailbox's content as a struct
- * sk_buff.
- *
- * If the struct can_rx_offload::skb_queue exceeds the maximal queue
- * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
- * allocated, the mailbox contents is discarded by reading it into an
- * overflow buffer. This way the mailbox is marked as free by the
- * driver.
- *
- * Return: A pointer to skb containing the CAN frame on success.
- *
- * NULL if the mailbox @n is empty.
- *
- * ERR_PTR() in case of an error
- */
-static struct sk_buff *
-can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
-{
- struct sk_buff *skb = NULL, *skb_error = NULL;
- struct can_rx_offload_cb *cb;
- struct can_frame *cf;
- int ret;
-
- if (likely(skb_queue_len(&offload->skb_queue) <
- offload->skb_queue_len_max)) {
- skb = alloc_can_skb(offload->dev, &cf);
- if (unlikely(!skb))
- skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
- } else {
- skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
- }
-
- /* If queue is full or skb not available, drop by reading into
- * overflow buffer.
- */
- if (unlikely(skb_error)) {
- struct can_frame cf_overflow;
- u32 timestamp;
-
- ret = offload->mailbox_read(offload, &cf_overflow,
- ×tamp, n);
-
- /* Mailbox was empty. */
- if (unlikely(!ret))
- return NULL;
-
- /* Mailbox has been read and we're dropping it or
- * there was a problem reading the mailbox.
- *
- * Increment error counters in any case.
- */
- offload->dev->stats.rx_dropped++;
- offload->dev->stats.rx_fifo_errors++;
-
- /* There was a problem reading the mailbox, propagate
- * error value.
- */
- if (unlikely(ret < 0))
- return ERR_PTR(ret);
-
- return skb_error;
- }
-
- cb = can_rx_offload_get_cb(skb);
- ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
-
- /* Mailbox was empty. */
- if (unlikely(!ret)) {
- kfree_skb(skb);
- return NULL;
- }
-
- /* There was a problem reading the mailbox, propagate error value. */
- if (unlikely(ret < 0)) {
- kfree_skb(skb);
-
- offload->dev->stats.rx_dropped++;
- offload->dev->stats.rx_fifo_errors++;
-
- return ERR_PTR(ret);
- }
-
- /* Mailbox was read. */
- return skb;
-}
-
-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
-{
- struct sk_buff_head skb_queue;
- unsigned int i;
-
- __skb_queue_head_init(&skb_queue);
-
- for (i = offload->mb_first;
- can_rx_offload_le(offload, i, offload->mb_last);
- can_rx_offload_inc(offload, &i)) {
- struct sk_buff *skb;
-
- if (!(pending & BIT_ULL(i)))
- continue;
-
- skb = can_rx_offload_offload_one(offload, i);
- if (IS_ERR_OR_NULL(skb))
- continue;
-
- __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
- }
-
- if (!skb_queue_empty(&skb_queue)) {
- unsigned long flags;
- u32 queue_len;
-
- spin_lock_irqsave(&offload->skb_queue.lock, flags);
- skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
- if ((queue_len = skb_queue_len(&offload->skb_queue)) >
- (offload->skb_queue_len_max / 8))
- netdev_dbg(offload->dev, "%s: queue_len=%d\n",
- __func__, queue_len);
-
- can_rx_offload_schedule(offload);
- }
-
- return skb_queue_len(&skb_queue);
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
-
-int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
-{
- struct sk_buff *skb;
- int received = 0;
-
- while (1) {
- skb = can_rx_offload_offload_one(offload, 0);
- if (IS_ERR(skb))
- continue;
- if (!skb)
- break;
-
- skb_queue_tail(&offload->skb_queue, skb);
- received++;
- }
-
- if (received)
- can_rx_offload_schedule(offload);
-
- return received;
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
- struct sk_buff *skb, u32 timestamp)
-{
- struct can_rx_offload_cb *cb;
- unsigned long flags;
-
- if (skb_queue_len(&offload->skb_queue) >
- offload->skb_queue_len_max) {
- dev_kfree_skb_any(skb);
- return -ENOBUFS;
- }
-
- cb = can_rx_offload_get_cb(skb);
- cb->timestamp = timestamp;
-
- spin_lock_irqsave(&offload->skb_queue.lock, flags);
- __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
- can_rx_offload_schedule(offload);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
-
-unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp)
-{
- struct net_device *dev = offload->dev;
- struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
- u8 len;
- int err;
-
- skb = __can_get_echo_skb(dev, idx, &len);
- if (!skb)
- return 0;
-
- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
- if (err) {
- stats->rx_errors++;
- stats->tx_fifo_errors++;
- }
-
- return len;
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
-
-int can_rx_offload_queue_tail(struct can_rx_offload *offload,
- struct sk_buff *skb)
-{
- if (skb_queue_len(&offload->skb_queue) >
- offload->skb_queue_len_max) {
- dev_kfree_skb_any(skb);
- return -ENOBUFS;
- }
-
- skb_queue_tail(&offload->skb_queue, skb);
- can_rx_offload_schedule(offload);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
-
-static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
-{
- offload->dev = dev;
-
- /* Limit queue len to 4x the weight (rounted to next power of two) */
- offload->skb_queue_len_max = 2 << fls(weight);
- offload->skb_queue_len_max *= 4;
- skb_queue_head_init(&offload->skb_queue);
-
- can_rx_offload_reset(offload);
- netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
-
- dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
- __func__, offload->skb_queue_len_max);
-
- return 0;
-}
-
-int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
-{
- unsigned int weight;
-
- if (offload->mb_first > BITS_PER_LONG_LONG ||
- offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
- return -EINVAL;
-
- if (offload->mb_first < offload->mb_last) {
- offload->inc = true;
- weight = offload->mb_last - offload->mb_first;
- } else {
- offload->inc = false;
- weight = offload->mb_first - offload->mb_last;
- }
-
- return can_rx_offload_init_queue(dev, offload, weight);
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
-
-int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
-{
- if (!offload->mailbox_read)
- return -EINVAL;
-
- return can_rx_offload_init_queue(dev, offload, weight);
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
-
-void can_rx_offload_enable(struct can_rx_offload *offload)
-{
- can_rx_offload_reset(offload);
- napi_enable(&offload->napi);
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_enable);
-
-void can_rx_offload_del(struct can_rx_offload *offload)
-{
- netif_napi_del(&offload->napi);
- skb_queue_purge(&offload->skb_queue);
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_del);
-
-void can_rx_offload_reset(struct can_rx_offload *offload)
-{
-}
-EXPORT_SYMBOL_GPL(can_rx_offload_reset);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 4dfa459ef5c7..95fefb1eef36 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -519,6 +519,7 @@ static struct slcan *slc_alloc(void)
int i;
char name[IFNAMSIZ];
struct net_device *dev = NULL;
+ struct can_ml_priv *can_ml;
struct slcan *sl;
int size;
@@ -541,7 +542,8 @@ static struct slcan *slc_alloc(void)
dev->base_addr = i;
sl = netdev_priv(dev);
- dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
+ can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
/* Initialize channel control data */
sl->magic = SLCAN_MAGIC;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 39ca14b0585d..067705e2850b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 0;
dev->flags = IFF_NOARP;
- dev->ml_priv = netdev_priv(dev);
+ can_set_ml_priv(dev, netdev_priv(dev));
/* set flags according to driver capabilities */
if (echo)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index b1baa4ac1d53..7000c6cd1e48 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -141,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = {
static void vxcan_setup(struct net_device *dev)
{
+ struct can_ml_priv *can_ml;
+
dev->type = ARPHRD_CAN;
dev->mtu = CANFD_MTU;
dev->hard_header_len = 0;
@@ -149,7 +151,9 @@ static void vxcan_setup(struct net_device *dev)
dev->flags = (IFF_NOARP|IFF_ECHO);
dev->netdev_ops = &vxcan_netdev_ops;
dev->needs_free_netdev = true;
- dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
+
+ can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
+ can_set_ml_priv(dev, can_ml);
}
/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index bb65dd39f847..72c7404ae6c5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -66,8 +66,10 @@ static int aq_ndev_open(struct net_device *ndev)
goto err_exit;
err = aq_nic_start(aq_nic);
- if (err < 0)
+ if (err < 0) {
+ aq_nic_stop(aq_nic);
goto err_exit;
+ }
err_exit:
if (err < 0)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 9a77b70ad601..491bcfd36ac2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1073,7 +1073,7 @@ static void mvpp2_interrupts_unmask(void *arg)
u32 val;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (smp_processor_id() >= port->priv->nthreads)
return;
val = MVPP2_CAUSE_MISC_SUM_MASK |
@@ -2078,7 +2078,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
int queue;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (smp_processor_id() >= port->priv->nthreads)
return;
for (queue = 0; queue < port->ntxqs; queue++) {
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 0e6a51525d91..f3deb2a2fa47 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -912,6 +912,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
default:
printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
+ unregister_hdlc_device(dev);
+ return -EIO;
break;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 3ec71f52e8fe..d38276ac375e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -445,13 +445,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
case WMI_TDLS_TEARDOWN_REASON_TX:
case WMI_TDLS_TEARDOWN_REASON_RSSI:
case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ rcu_read_lock();
station = ieee80211_find_sta_by_ifaddr(ar->hw,
ev->peer_macaddr.addr,
NULL);
if (!station) {
ath10k_warn(ar, "did not find station from tdls peer event");
- kfree(tb);
- return;
+ goto exit;
}
arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
ieee80211_tdls_oper_request(
@@ -462,6 +462,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
);
break;
}
+
+exit:
+ rcu_read_unlock();
kfree(tb);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 4ca50353538e..cd813c69a178 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5382,7 +5382,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
return false;
}
-static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_vif *vif,
+ const struct brcmf_event_msg *e)
{
u32 event = e->event_code;
u16 flags = e->flags;
@@ -5391,6 +5392,8 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
(event == BRCMF_E_DISASSOC_IND) ||
((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
brcmf_dbg(CONN, "Processing link down\n");
+ clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
return true;
}
return false;
@@ -5683,7 +5686,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
} else
brcmf_bss_connect_done(cfg, ndev, e, true);
brcmf_net_setcarrier(ifp, true);
- } else if (brcmf_is_linkdown(e)) {
+ } else if (brcmf_is_linkdown(ifp->vif, e)) {
brcmf_dbg(CONN, "Linkdown\n");
if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 930edfc32f59..59fe3204e965 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3386,12 +3386,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
{
struct rockchip_pinctrl *info = dev_get_drvdata(dev);
- int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
- rk3288_grf_gpio6c_iomux |
- GPIO6C6_SEL_WRITE_ENABLE);
+ int ret;
- if (ret)
- return ret;
+ if (info->ctrl->type == RK3288) {
+ ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+ rk3288_grf_gpio6c_iomux |
+ GPIO6C6_SEL_WRITE_ENABLE);
+ if (ret)
+ return ret;
+ }
return pinctrl_force_default(info->pctl_dev);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d006f0a97b8c..2236751a3a56 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -116,7 +116,6 @@
(min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
#endif
-#endif
#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
? le16_to_cpu((iocb)->u.isp2x.target.extended) \
@@ -244,6 +243,7 @@ struct ctio_to_2xxx {
#ifndef CTIO_RET_TYPE
#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+#endif
struct fcp_hdr {
uint8_t r_ctl;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e3266a64a477..2121e44c342f 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1267,8 +1267,8 @@ static int st_open(struct inode *inode, struct file *filp)
spin_lock(&st_use_lock);
if (STp->in_use) {
spin_unlock(&st_use_lock);
- scsi_tape_put(STp);
DEBC_printk(STp, "Device already in use.\n");
+ scsi_tape_put(STp);
return (-EBUSY);
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 1893c70de0b9..10f097a7d847 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
devpriv->amcc + AMCC_OP_REG_INTCSR);
ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
- dev->board_name, dev);
+ "cb_pcidas", dev);
if (ret) {
dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
pcidev->irq);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index e1774e09a320..9fe8b65cd9e3 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev,
init_stc_registers(dev);
retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
- dev->board_name, dev);
+ "cb_pcidas64", dev);
if (retval) {
dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
pcidev->irq);
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 328f410daa03..2eeb9a43734e 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1105,7 +1105,7 @@ struct rtllib_network {
bool bWithAironetIE;
bool bCkipSupported;
bool bCcxRmEnable;
- u16 CcxRmState[2];
+ u8 CcxRmState[2];
bool bMBssidValid;
u8 MBssidMask;
u8 MBssid[ETH_ALEN];
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 0bae0a0a4cbe..83c30e2d82f5 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1968,7 +1968,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data[2] == 0x96 &&
info_element->data[3] == 0x01) {
if (info_element->len == 6) {
- memcpy(network->CcxRmState, &info_element[4], 2);
+ memcpy(network->CcxRmState, &info_element->data[4], 2);
if (network->CcxRmState[0] != 0)
network->bCcxRmEnable = true;
else
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index aa99edb4dff7..4dce4a8f71ed 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -770,6 +770,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
{
struct cooling_dev_stats *stats = cdev->stats;
+ if (!stats)
+ return;
+
spin_lock(&stats->lock);
if (stats->state == new_state)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 00bfc81f2470..2b672840dfd9 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
#define acm_send_break(acm, ms) \
acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
-static void acm_kill_urbs(struct acm *acm)
+static void acm_poison_urbs(struct acm *acm)
{
int i;
- usb_kill_urb(acm->ctrlurb);
+ usb_poison_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
+ usb_poison_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
- usb_kill_urb(acm->read_urbs[i]);
+ usb_poison_urb(acm->read_urbs[i]);
}
+static void acm_unpoison_urbs(struct acm *acm)
+{
+ int i;
+
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_unpoison_urb(acm->read_urbs[i]);
+ for (i = 0; i < ACM_NW; i++)
+ usb_unpoison_urb(acm->wb[i].urb);
+ usb_unpoison_urb(acm->ctrlurb);
+}
+
+
/*
* Write buffer management.
* All of these assume proper locks taken by the caller.
@@ -225,9 +237,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
- dev_err(&acm->data->dev,
- "%s - usb_submit_urb(write bulk) failed: %d\n",
- __func__, rc);
+ if (rc != -EPERM)
+ dev_err(&acm->data->dev,
+ "%s - usb_submit_urb(write bulk) failed: %d\n",
+ __func__, rc);
acm_write_done(acm, wb);
}
return rc;
@@ -312,8 +325,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
acm->iocount.dsr++;
if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++;
- if (newctrl & ACM_CTRL_BRK)
+ if (newctrl & ACM_CTRL_BRK) {
acm->iocount.brk++;
+ tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
+ }
if (newctrl & ACM_CTRL_RI)
acm->iocount.rng++;
if (newctrl & ACM_CTRL_FRAMING)
@@ -479,11 +494,6 @@ static void acm_read_bulk_callback(struct urb *urb)
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
- if (!acm->dev) {
- dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
- return;
- }
-
switch (status) {
case 0:
usb_mark_last_busy(acm->dev);
@@ -648,7 +658,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
res = acm_set_control(acm, val);
if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
- dev_err(&acm->control->dev, "failed to set dtr/rts\n");
+ /* This is broken in too many devices to spam the logs */
+ dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
}
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
@@ -730,6 +741,7 @@ static void acm_port_shutdown(struct tty_port *port)
* Need to grab write_lock to prevent race with resume, but no need to
* hold it due to the tty-port initialised flag.
*/
+ acm_poison_urbs(acm);
spin_lock_irq(&acm->write_lock);
spin_unlock_irq(&acm->write_lock);
@@ -746,7 +758,8 @@ static void acm_port_shutdown(struct tty_port *port)
usb_autopm_put_interface_async(acm->control);
}
- acm_kill_urbs(acm);
+ acm_unpoison_urbs(acm);
+
}
static void acm_tty_cleanup(struct tty_struct *tty)
@@ -1516,12 +1529,16 @@ static int acm_probe(struct usb_interface *intf,
return 0;
alloc_fail6:
+ if (!acm->combined_interfaces) {
+ /* Clear driver data so that disconnect() returns early. */
+ usb_set_intfdata(data_interface, NULL);
+ usb_driver_release_interface(&acm_driver, data_interface);
+ }
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
- kfree(acm->country_codes);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
alloc_fail5:
@@ -1553,8 +1570,14 @@ static void acm_disconnect(struct usb_interface *intf)
if (!acm)
return;
- mutex_lock(&acm->mutex);
acm->disconnected = true;
+ /*
+ * there is a circular dependency. acm_softint() can resubmit
+ * the URBs in error handling so we need to block any
+ * submission right away
+ */
+ acm_poison_urbs(acm);
+ mutex_lock(&acm->mutex);
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1573,7 +1596,6 @@ static void acm_disconnect(struct usb_interface *intf)
tty_kref_put(tty);
}
- acm_kill_urbs(acm);
cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1615,7 +1637,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- acm_kill_urbs(acm);
+ acm_poison_urbs(acm);
cancel_delayed_work_sync(&acm->dwork);
acm->urbs_in_error_delay = 0;
@@ -1628,6 +1650,7 @@ static int acm_resume(struct usb_interface *intf)
struct urb *urb;
int rv = 0;
+ acm_unpoison_urbs(acm);
spin_lock_irq(&acm->write_lock);
if (--acm->susp_count)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 239443ce5200..b8a4707dfafa 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Fibocom L850-GL LTE Modem */
+ { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index a91f2aa24118..258056c82206 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
goto unlock;
- if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
+ if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
+ hsotg->flags.b.port_connect_status == 0)
goto skip_power_saving;
/*
@@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for the HPRT0.PrtSusp register field to be set */
- if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
+ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/*
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 57b6f66331cf..362284057d30 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -154,6 +154,11 @@ static int udc_pci_probe(
pci_set_master(pdev);
pci_try_set_mwi(pdev);
+ dev->phys_addr = resource;
+ dev->irq = pdev->irq;
+ dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+
/* init dma pools */
if (use_dma) {
retval = init_dma_pools(dev);
@@ -161,11 +166,6 @@ static int udc_pci_probe(
goto err_dma;
}
- dev->phys_addr = resource;
- dev->irq = pdev->irq;
- dev->pdev = pdev;
- dev->dev = &pdev->dev;
-
/* general probing */
if (udc_probe(dev)) {
retval = -ENODEV;
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 09b67219fd14..5fc3ea6d46c5 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
if (mtk->lpm_support)
xhci->quirks |= XHCI_LPM_SUPPORT;
+
+ /*
+ * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+ * and it's 3 when support it.
+ */
+ if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
}
/* called during probe() after chip reset completes */
@@ -553,7 +560,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
if (ret)
goto put_usb3_hcd;
- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
+ !(xhci->quirks & XHCI_BROKEN_STREAMS))
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 9fcff4e94484..166f68f639c2 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1866,10 +1866,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_DISCONNECT_99:
- musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
- schedule_delayed_work(&musb->irq_work,
- msecs_to_jiffies(1000));
- break;
+ if (musb->quirk_retries && !musb->flush_irq_work) {
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ musb->quirk_retries--;
+ break;
+ }
+ fallthrough;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index fee511437abe..1e0b618e2e6e 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -595,6 +595,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
pr_err("invalid port number %d\n", wIndex);
goto error;
}
+ if (wValue >= 32)
+ goto error;
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index ac3c1dd3edef..4abddbebd4b2 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -42,6 +42,6 @@ config VFIO_PCI_IGD
config VFIO_PCI_NVLINK2
def_bool y
- depends on VFIO_PCI && PPC_POWERNV
+ depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
help
VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 57ab79fbcee9..a279ecacbf60 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -320,8 +320,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->kick = NULL;
vq->call_ctx = NULL;
vq->log_ctx = NULL;
- vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
+ vhost_reset_is_le(vq);
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 4cf71ee0965a..0a9202176ffc 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1339,6 +1339,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+ if (!ops->cursor)
+ return;
+
ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 5aba67a504cf..031ff3f19018 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -612,27 +612,41 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
/**
* ext4_should_retry_alloc() - check if a block allocation should be retried
- * @sb: super block
- * @retries: number of attemps has been made
+ * @sb: superblock
+ * @retries: number of retry attempts made so far
*
- * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
- * it is profitable to retry the operation, this function will wait
- * for the current or committing transaction to complete, and then
- * return TRUE. We will only retry once.
+ * ext4_should_retry_alloc() is called when ENOSPC is returned while
+ * attempting to allocate blocks. If there's an indication that a pending
+ * journal transaction might free some space and allow another attempt to
+ * succeed, this function will wait for the current or committing transaction
+ * to complete and then return TRUE.
*/
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
{
- if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
- (*retries)++ > 1 ||
- !EXT4_SB(sb)->s_journal)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (!sbi->s_journal)
return 0;
- smp_mb();
- if (EXT4_SB(sb)->s_mb_free_pending == 0)
+ if (++(*retries) > 3) {
+ percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
return 0;
+ }
+ /*
+ * if there's no indication that blocks are about to be freed it's
+ * possible we just missed a transaction commit that did so
+ */
+ smp_mb();
+ if (sbi->s_mb_free_pending == 0)
+ return ext4_has_free_clusters(sbi, 1, 0);
+
+ /*
+ * it's possible we've just missed a transaction commit here,
+ * so ignore the returned status
+ */
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+ (void) jbd2_journal_force_commit_nested(sbi->s_journal);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 1c558b554788..bf3eaa903033 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1420,6 +1420,7 @@ struct ext4_sb_info {
struct percpu_counter s_freeinodes_counter;
struct percpu_counter s_dirs_counter;
struct percpu_counter s_dirtyclusters_counter;
+ struct percpu_counter s_sra_exceeded_retry_limit;
struct blockgroup_lock *s_blockgroup_lock;
struct proc_dir_entry *s_proc;
struct kobject s_kobj;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index efce97b938b7..1429d01d836b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2076,13 +2076,13 @@ static int __ext4_journalled_writepage(struct page *page,
if (!ret)
ret = err;
- if (!ext4_has_inline_data(inode))
- ext4_walk_page_buffers(NULL, page_bufs, 0, len,
- NULL, bput_one);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
unlock_page(page);
out_no_pagelock:
+ if (!inline_data && page_bufs)
+ ext4_walk_page_buffers(NULL, page_bufs, 0, len,
+ NULL, bput_one);
brelse(inode_bh);
return ret;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e992a9f15671..4c37abe76851 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3731,14 +3731,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
retval = -ENOENT;
if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
- goto end_rename;
+ goto release_bh;
new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
new.bh = NULL;
- goto end_rename;
+ goto release_bh;
}
if (new.bh) {
if (!new.inode) {
@@ -3755,15 +3755,13 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
- handle = NULL;
- goto end_rename;
+ goto release_bh;
}
} else {
whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
if (IS_ERR(whiteout)) {
retval = PTR_ERR(whiteout);
- whiteout = NULL;
- goto end_rename;
+ goto release_bh;
}
}
@@ -3871,16 +3869,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_resetent(handle, &old,
old.inode->i_ino, old_file_type);
drop_nlink(whiteout);
+ ext4_orphan_add(handle, whiteout);
}
unlock_new_inode(whiteout);
+ ext4_journal_stop(handle);
iput(whiteout);
-
+ } else {
+ ext4_journal_stop(handle);
}
+release_bh:
brelse(old.dir_bh);
brelse(old.bh);
brelse(new.bh);
- if (handle)
- ext4_journal_stop(handle);
return retval;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 06568467b0c2..2ecf4594a20d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1017,6 +1017,7 @@ static void ext4_put_super(struct super_block *sb)
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
percpu_free_rwsem(&sbi->s_writepages_rwsem);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
@@ -4597,6 +4598,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!err)
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
GFP_KERNEL);
+ if (!err)
+ err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
+ GFP_KERNEL);
if (!err)
err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
@@ -4699,6 +4703,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
percpu_free_rwsem(&sbi->s_writepages_rwsem);
failed_mount5:
ext4_ext_release(sb);
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index eb1efad0e20a..9394360ff137 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -23,6 +23,7 @@ typedef enum {
attr_session_write_kbytes,
attr_lifetime_write_kbytes,
attr_reserved_clusters,
+ attr_sra_exceeded_retry_limit,
attr_inode_readahead,
attr_trigger_test_error,
attr_first_error_time,
@@ -176,6 +177,7 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444);
EXT4_ATTR_FUNC(session_write_kbytes, 0444);
EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
EXT4_ATTR_FUNC(reserved_clusters, 0644);
+EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
ext4_sb_info, s_inode_readahead_blks);
@@ -207,6 +209,7 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(session_write_kbytes),
ATTR_LIST(lifetime_write_kbytes),
ATTR_LIST(reserved_clusters),
+ ATTR_LIST(sra_exceeded_retry_limit),
ATTR_LIST(inode_readahead_blks),
ATTR_LIST(inode_goal),
ATTR_LIST(mb_stats),
@@ -308,6 +311,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)
atomic64_read(&sbi->s_resv_clusters));
+ case attr_sra_exceeded_retry_limit:
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)
+ percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
case attr_inode_readahead:
case attr_pointer_ui:
if (!ptr)
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index 152a230f668d..bd0cc3dcc980 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -169,6 +169,16 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
return ret;
}
+ /*
+ * If this swapfile doesn't contain even a single page-aligned
+ * contiguous range of blocks, reject this useless swapfile to
+ * prevent confusion later on.
+ */
+ if (isi.nr_pages == 0) {
+ pr_warn("swapon: Cannot find a single usable page in file.\n");
+ return -EINVAL;
+ }
+
*pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
sis->max = isi.nr_pages;
sis->pages = isi.nr_pages - 1;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index f2f81561ebb6..4d6e71335bce 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -73,6 +73,7 @@ config NFSD_V4
select NFSD_V3
select FS_POSIX_ACL
select SUNRPC_GSS
+ select CRYPTO
select CRYPTO_MD5
select CRYPTO_SHA256
select GRACE_PERIOD
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index efe55d101b0e..3c50d18fe8a9 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1121,6 +1121,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
switch (task->tk_status) {
case -EIO:
case -ETIMEDOUT:
+ case -EACCES:
nfsd4_mark_cb_down(clp, task->tk_status);
}
break;
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index c764352447ba..81bec2c80b25 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -43,7 +43,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
static inline int reiserfs_xattrs_initialized(struct super_block *sb)
{
- return REISERFS_SB(sb)->priv_root != NULL;
+ return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
}
#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
index 2f5d731ae251..8afa92d15a66 100644
--- a/include/linux/can/can-ml.h
+++ b/include/linux/can/can-ml.h
@@ -44,6 +44,7 @@
#include <linux/can.h>
#include <linux/list.h>
+#include <linux/netdevice.h>
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
@@ -65,4 +66,15 @@ struct can_ml_priv {
#endif
};
+static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
+{
+ return netdev_get_ml_priv(dev, ML_PRIV_CAN);
+}
+
+static inline void can_set_ml_priv(struct net_device *dev,
+ struct can_ml_priv *ml_priv)
+{
+ netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
+}
+
#endif /* CAN_ML_H */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 2bdf643d8593..47d3e19a49ae 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -271,6 +271,29 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb) { }
+
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index db1b9623977c..11a52f2fa35d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1555,6 +1555,12 @@ enum netdev_priv_flags {
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
+/* Specifies the type of the struct net_device::ml_priv pointer */
+enum netdev_ml_priv_type {
+ ML_PRIV_NONE,
+ ML_PRIV_CAN,
+};
+
/**
* struct net_device - The DEVICE structure.
*
@@ -1732,6 +1738,7 @@ enum netdev_priv_flags {
* @nd_net: Network namespace this network device is inside
*
* @ml_priv: Mid-layer private
+ * @ml_priv_type: Mid-layer private type
* @lstats: Loopback statistics
* @tstats: Tunnel statistics
* @dstats: Dummy statistics
@@ -2019,8 +2026,10 @@ struct net_device {
possible_net_t nd_net;
/* mid-layer private */
+ void *ml_priv;
+ enum netdev_ml_priv_type ml_priv_type;
+
union {
- void *ml_priv;
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
@@ -2167,6 +2176,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
netdev_set_rx_headroom(dev, -1);
}
+static inline void *netdev_get_ml_priv(struct net_device *dev,
+ enum netdev_ml_priv_type type)
+{
+ if (dev->ml_priv_type != type)
+ return NULL;
+
+ return dev->ml_priv;
+}
+
+static inline void netdev_set_ml_priv(struct net_device *dev,
+ void *ml_priv,
+ enum netdev_ml_priv_type type)
+{
+ WARN(dev->ml_priv_type && dev->ml_priv_type != type,
+ "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
+ dev->ml_priv_type, type);
+ WARN(!dev->ml_priv_type && dev->ml_priv,
+ "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
+
+ dev->ml_priv = ml_priv;
+ dev->ml_priv_type = type;
+}
+
/*
* Net namespace inlines
*/
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 468a9b8422e3..07a9f9f46e03 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -636,7 +636,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
*/
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
- const bool use_ww_ctx, struct mutex_waiter *waiter)
+ struct mutex_waiter *waiter)
{
if (!waiter) {
/*
@@ -712,7 +712,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
#else
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
- const bool use_ww_ctx, struct mutex_waiter *waiter)
+ struct mutex_waiter *waiter)
{
return false;
}
@@ -932,6 +932,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct ww_mutex *ww;
int ret;
+ if (!use_ww_ctx)
+ ww_ctx = NULL;
+
might_sleep();
#ifdef CONFIG_DEBUG_MUTEXES
@@ -939,7 +942,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
#endif
ww = container_of(lock, struct ww_mutex, base);
- if (use_ww_ctx && ww_ctx) {
+ if (ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY;
@@ -956,10 +959,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
if (__mutex_trylock(lock) ||
- mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
+ mutex_optimistic_spin(lock, ww_ctx, NULL)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx && ww_ctx)
+ if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
preempt_enable();
return 0;
@@ -970,7 +973,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* After waiting to acquire the wait_lock, try again.
*/
if (__mutex_trylock(lock)) {
- if (use_ww_ctx && ww_ctx)
+ if (ww_ctx)
__ww_mutex_check_waiters(lock, ww_ctx);
goto skip_wait;
@@ -1023,7 +1026,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
- if (use_ww_ctx && ww_ctx) {
+ if (ww_ctx) {
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
if (ret)
goto err;
@@ -1036,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* ww_mutex needs to always recheck its position since its waiter
* list is not FIFO ordered.
*/
- if ((use_ww_ctx && ww_ctx) || !first) {
+ if (ww_ctx || !first) {
first = __mutex_waiter_is_first(lock, &waiter);
if (first)
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@@ -1049,7 +1052,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* or we must see its unlock and acquire.
*/
if (__mutex_trylock(lock) ||
- (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
+ (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
break;
spin_lock(&lock->wait_lock);
@@ -1058,7 +1061,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
acquired:
__set_current_state(TASK_RUNNING);
- if (use_ww_ctx && ww_ctx) {
+ if (ww_ctx) {
/*
* Wound-Wait; we stole the lock (!first_waiter), check the
* waiters as anyone might want to wound us.
@@ -1078,7 +1081,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx && ww_ctx)
+ if (ww_ctx)
ww_mutex_lock_acquired(ww, ww_ctx);
spin_unlock(&lock->wait_lock);
diff --git a/kernel/module.c b/kernel/module.c
index ab1f97cfe18d..c60559b5bf10 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2908,20 +2908,14 @@ static int module_sig_check(struct load_info *info, int flags)
* enforcing, certain errors are non-fatal.
*/
case -ENODATA:
- reason = "Loading of unsigned module";
- goto decide;
+ reason = "unsigned module";
+ break;
case -ENOPKG:
- reason = "Loading of module with unsupported crypto";
- goto decide;
+ reason = "module with unsupported crypto";
+ break;
case -ENOKEY:
- reason = "Loading of module with unavailable key";
- decide:
- if (is_module_sig_enforced()) {
- pr_notice("%s is rejected\n", reason);
- return -EKEYREJECTED;
- }
-
- return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
+ reason = "module with unavailable key";
+ break;
/* All other errors are fatal, including nomem, unparseable
* signatures and signature check failures - even if signatures
@@ -2930,6 +2924,13 @@ static int module_sig_check(struct load_info *info, int flags)
default:
return err;
}
+
+ if (is_module_sig_enforced()) {
+ pr_notice("Loading of %s is rejected\n", reason);
+ return -EKEYREJECTED;
+ }
+
+ return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
}
#else /* !CONFIG_MODULE_SIG */
static int module_sig_check(struct load_info *info, int flags)
@@ -2938,9 +2939,33 @@ static int module_sig_check(struct load_info *info, int flags)
}
#endif /* !CONFIG_MODULE_SIG */
-/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
-static int elf_header_check(struct load_info *info)
+static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
+{
+ unsigned long secend;
+
+ /*
+ * Check for both overflow and offset/size being
+ * too large.
+ */
+ secend = shdr->sh_offset + shdr->sh_size;
+ if (secend < shdr->sh_offset || secend > info->len)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+/*
+ * Sanity checks against invalid binaries, wrong arch, weird elf version.
+ *
+ * Also do basic validity checks against section offsets and sizes, the
+ * section name string table, and the indices used for it (sh_name).
+ */
+static int elf_validity_check(struct load_info *info)
{
+ unsigned int i;
+ Elf_Shdr *shdr, *strhdr;
+ int err;
+
if (info->len < sizeof(*(info->hdr)))
return -ENOEXEC;
@@ -2950,11 +2975,78 @@ static int elf_header_check(struct load_info *info)
|| info->hdr->e_shentsize != sizeof(Elf_Shdr))
return -ENOEXEC;
+ /*
+ * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
+ * known and small. So e_shnum * sizeof(Elf_Shdr)
+ * will not overflow unsigned long on any platform.
+ */
if (info->hdr->e_shoff >= info->len
|| (info->hdr->e_shnum * sizeof(Elf_Shdr) >
info->len - info->hdr->e_shoff))
return -ENOEXEC;
+ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
+
+ /*
+ * Verify if the section name table index is valid.
+ */
+ if (info->hdr->e_shstrndx == SHN_UNDEF
+ || info->hdr->e_shstrndx >= info->hdr->e_shnum)
+ return -ENOEXEC;
+
+ strhdr = &info->sechdrs[info->hdr->e_shstrndx];
+ err = validate_section_offset(info, strhdr);
+ if (err < 0)
+ return err;
+
+ /*
+ * The section name table must be NUL-terminated, as required
+ * by the spec. This makes strcmp and pr_* calls that access
+ * strings in the section safe.
+ */
+ info->secstrings = (void *)info->hdr + strhdr->sh_offset;
+ if (info->secstrings[strhdr->sh_size - 1] != '\0')
+ return -ENOEXEC;
+
+ /*
+ * The code assumes that section 0 has a length of zero and
+ * an addr of zero, so check for it.
+ */
+ if (info->sechdrs[0].sh_type != SHT_NULL
+ || info->sechdrs[0].sh_size != 0
+ || info->sechdrs[0].sh_addr != 0)
+ return -ENOEXEC;
+
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ shdr = &info->sechdrs[i];
+ switch (shdr->sh_type) {
+ case SHT_NULL:
+ case SHT_NOBITS:
+ continue;
+ case SHT_SYMTAB:
+ if (shdr->sh_link == SHN_UNDEF
+ || shdr->sh_link >= info->hdr->e_shnum)
+ return -ENOEXEC;
+ fallthrough;
+ default:
+ err = validate_section_offset(info, shdr);
+ if (err < 0) {
+ pr_err("Invalid ELF section in module (section %u type %u)\n",
+ i, shdr->sh_type);
+ return err;
+ }
+
+ if (shdr->sh_flags & SHF_ALLOC) {
+ if (shdr->sh_name >= strhdr->sh_size) {
+ pr_err("Invalid ELF section name in module (section %u type %u)\n",
+ i, shdr->sh_type);
+ return -ENOEXEC;
+ }
+ }
+ break;
+ }
+ }
+
return 0;
}
@@ -3051,11 +3143,6 @@ static int rewrite_section_headers(struct load_info *info, int flags)
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i];
- if (shdr->sh_type != SHT_NOBITS
- && info->len < shdr->sh_offset + shdr->sh_size) {
- pr_err("Module len %lu truncated\n", info->len);
- return -ENOEXEC;
- }
/* Mark all sections sh_addr with their address in the
temporary image. */
@@ -3087,11 +3174,6 @@ static int setup_load_info(struct load_info *info, int flags)
{
unsigned int i;
- /* Set up the convenience variables */
- info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
- info->secstrings = (void *)info->hdr
- + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
-
/* Try to find a name early so we can log errors with a module name */
info->index.info = find_sec(info, ".modinfo");
if (info->index.info)
@@ -3819,23 +3901,49 @@ static int load_module(struct load_info *info, const char __user *uargs,
long err = 0;
char *after_dashes;
- err = elf_header_check(info);
+ /*
+ * Do the signature check (if any) first. All that
+ * the signature check needs is info->len, it does
+ * not need any of the section info. That can be
+ * set up later. This will minimize the chances
+ * of a corrupt module causing problems before
+ * we even get to the signature check.
+ *
+ * The check will also adjust info->len by stripping
+ * off the sig length at the end of the module, making
+ * checks against info->len more correct.
+ */
+ err = module_sig_check(info, flags);
if (err)
goto free_copy;
+ /*
+ * Do basic sanity checks against the ELF header and
+ * sections.
+ */
+ err = elf_validity_check(info);
+ if (err) {
+ pr_err("Module has invalid ELF structures\n");
+ goto free_copy;
+ }
+
+ /*
+ * Everything checks out, so set up the section info
+ * in the info structure.
+ */
err = setup_load_info(info, flags);
if (err)
goto free_copy;
+ /*
+ * Now that we know we have the correct module name, check
+ * if it's blacklisted.
+ */
if (blacklisted(info->name)) {
err = -EPERM;
goto free_copy;
}
- err = module_sig_check(info, flags);
- if (err)
- goto free_copy;
-
err = rewrite_section_headers(info, flags);
if (err)
goto free_copy;
diff --git a/kernel/module_signature.c b/kernel/module_signature.c
index 4224a1086b7d..00132d12487c 100644
--- a/kernel/module_signature.c
+++ b/kernel/module_signature.c
@@ -25,7 +25,7 @@ int mod_check_sig(const struct module_signature *ms, size_t file_len,
return -EBADMSG;
if (ms->id_type != PKEY_ID_PKCS7) {
- pr_err("%s: Module is not signed with expected PKCS#7 message\n",
+ pr_err("%s: not signed with expected PKCS#7 message\n",
name);
return -ENOPKG;
}
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index 9d9fc678c91d..8723ae70ea1f 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -30,7 +30,7 @@ int mod_verify_sig(const void *mod, struct load_info *info)
memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
- ret = mod_check_sig(&ms, modlen, info->name);
+ ret = mod_check_sig(&ms, modlen, "module");
if (ret)
return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1a75610f5f57..1b5f54b309be 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2857,7 +2857,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
- sizeof(*entry) + size, flags, pc);
+ (sizeof(*entry) - sizeof(entry->caller)) + size,
+ flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
diff --git a/mm/memory.c b/mm/memory.c
index c432e7c76445..13a575ce2ec8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -150,7 +150,7 @@ static int __init init_zero_pfn(void)
zero_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
-core_initcall(init_zero_pfn);
+early_initcall(init_zero_pfn);
#if defined(SPLIT_RSS_COUNTING)
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index b41375d4d295..4610c352849b 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1568,8 +1568,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct sk_buff *skb;
struct net_device *dev;
struct ddpehdr *ddp;
- int size;
- struct atalk_route *rt;
+ int size, hard_header_len;
+ struct atalk_route *rt, *rt_lo = NULL;
int err;
if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
@@ -1632,7 +1632,22 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
sk, size, dev->name);
- size += dev->hard_header_len;
+ hard_header_len = dev->hard_header_len;
+ /* Leave room for loopback hardware header if necessary */
+ if (usat->sat_addr.s_node == ATADDR_BCAST &&
+ (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) {
+ struct atalk_addr at_lo;
+
+ at_lo.s_node = 0;
+ at_lo.s_net = 0;
+
+ rt_lo = atrtr_find(&at_lo);
+
+ if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len)
+ hard_header_len = rt_lo->dev->hard_header_len;
+ }
+
+ size += hard_header_len;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
lock_sock(sk);
@@ -1640,7 +1655,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
goto out;
skb_reserve(skb, ddp_dl->header_length);
- skb_reserve(skb, dev->hard_header_len);
+ skb_reserve(skb, hard_header_len);
skb->dev = dev;
SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
@@ -1691,18 +1706,12 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
/* loop back */
skb_orphan(skb);
if (ddp->deh_dnode == ATADDR_BCAST) {
- struct atalk_addr at_lo;
-
- at_lo.s_node = 0;
- at_lo.s_net = 0;
-
- rt = atrtr_find(&at_lo);
- if (!rt) {
+ if (!rt_lo) {
kfree_skb(skb);
err = -ENETUNREACH;
goto out;
}
- dev = rt->dev;
+ dev = rt_lo->dev;
skb->dev = dev;
}
ddp_dl->request(ddp_dl, skb, dev->dev_addr);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 306d3584a441..c758a12ffe46 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -304,8 +304,8 @@ static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
struct net_device *dev)
{
if (dev) {
- struct can_ml_priv *ml_priv = dev->ml_priv;
- return &ml_priv->dev_rcv_lists;
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+ return &can_ml->dev_rcv_lists;
} else {
return net->can.rx_alldev_list;
}
@@ -788,25 +788,6 @@ void can_proto_unregister(const struct can_proto *cp)
}
EXPORT_SYMBOL(can_proto_unregister);
-/* af_can notifier to create/remove CAN netdevice specific structs */
-static int can_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
-
- switch (msg) {
- case NETDEV_REGISTER:
- WARN(!dev->ml_priv,
- "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
- break;
- }
-
- return NOTIFY_DONE;
-}
-
static int can_pernet_init(struct net *net)
{
spin_lock_init(&net->can.rcvlists_lock);
@@ -874,11 +855,6 @@ static const struct net_proto_family can_family_ops = {
.owner = THIS_MODULE,
};
-/* notifier block for netdevice event */
-static struct notifier_block can_netdev_notifier __read_mostly = {
- .notifier_call = can_notifier,
-};
-
static struct pernet_operations can_pernet_ops __read_mostly = {
.init = can_pernet_init,
.exit = can_pernet_exit,
@@ -909,17 +885,12 @@ static __init int can_init(void)
err = sock_register(&can_family_ops);
if (err)
goto out_sock;
- err = register_netdevice_notifier(&can_netdev_notifier);
- if (err)
- goto out_notifier;
dev_add_pack(&can_packet);
dev_add_pack(&canfd_packet);
return 0;
-out_notifier:
- sock_unregister(PF_CAN);
out_sock:
unregister_pernet_subsys(&can_pernet_ops);
out_pernet:
@@ -933,7 +904,6 @@ static __exit void can_exit(void)
/* protocol unregister */
dev_remove_pack(&canfd_packet);
dev_remove_pack(&can_packet);
- unregister_netdevice_notifier(&can_netdev_notifier);
sock_unregister(PF_CAN);
unregister_pernet_subsys(&can_pernet_ops);
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 137054bff9ec..e52330f628c9 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -140,9 +140,9 @@ static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
static inline void j1939_priv_set(struct net_device *ndev,
struct j1939_priv *priv)
{
- struct can_ml_priv *can_ml_priv = ndev->ml_priv;
+ struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
- can_ml_priv->j1939_priv = priv;
+ can_ml->j1939_priv = priv;
}
static void __j1939_priv_release(struct kref *kref)
@@ -211,12 +211,9 @@ static void __j1939_rx_release(struct kref *kref)
/* get pointer to priv without increasing ref counter */
static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
{
- struct can_ml_priv *can_ml_priv = ndev->ml_priv;
+ struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
- if (!can_ml_priv)
- return NULL;
-
- return can_ml_priv->j1939_priv;
+ return can_ml->j1939_priv;
}
static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
@@ -225,9 +222,6 @@ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
lockdep_assert_held(&j1939_netdev_lock);
- if (ndev->type != ARPHRD_CAN)
- return NULL;
-
priv = j1939_ndev_to_priv(ndev);
if (priv)
j1939_priv_get(priv);
@@ -348,15 +342,16 @@ static int j1939_netdev_notify(struct notifier_block *nb,
unsigned long msg, void *data)
{
struct net_device *ndev = netdev_notifier_info_to_dev(data);
+ struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
struct j1939_priv *priv;
+ if (!can_ml)
+ goto notify_done;
+
priv = j1939_priv_get_by_ndev(ndev);
if (!priv)
goto notify_done;
- if (ndev->type != ARPHRD_CAN)
- goto notify_put;
-
switch (msg) {
case NETDEV_DOWN:
j1939_cancel_active_session(priv, NULL);
@@ -365,7 +360,6 @@ static int j1939_netdev_notify(struct notifier_block *nb,
break;
}
-notify_put:
j1939_priv_put(priv);
notify_done:
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 047090960539..d57475c8ba07 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/can/can-ml.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/errqueue.h>
@@ -453,6 +454,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
j1939_jsk_del(priv, jsk);
j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
} else {
+ struct can_ml_priv *can_ml;
struct net_device *ndev;
ndev = dev_get_by_index(net, addr->can_ifindex);
@@ -461,15 +463,8 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
goto out_release_sock;
}
- if (ndev->type != ARPHRD_CAN) {
- dev_put(ndev);
- ret = -ENODEV;
- goto out_release_sock;
- }
-
- if (!ndev->ml_priv) {
- netdev_warn_once(ndev,
- "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
+ can_ml = can_get_ml_priv(ndev);
+ if (!can_ml) {
dev_put(ndev);
ret = -ENODEV;
goto out_release_sock;
diff --git a/net/can/proc.c b/net/can/proc.c
index 077af42c26ba..a5fc63c78370 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -329,8 +329,11 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
/* receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
- if (dev->type == ARPHRD_CAN && dev->ml_priv)
- can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+
+ if (can_ml)
+ can_rcvlist_proc_show_one(m, idx, dev,
+ &can_ml->dev_rcv_lists);
}
rcu_read_unlock();
@@ -382,8 +385,10 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
/* sff receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
- if (dev->type == ARPHRD_CAN && dev->ml_priv) {
- dev_rcv_lists = dev->ml_priv;
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+
+ if (can_ml) {
+ dev_rcv_lists = &can_ml->dev_rcv_lists;
can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff,
ARRAY_SIZE(dev_rcv_lists->rx_sff));
}
@@ -413,8 +418,10 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
/* eff receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
- if (dev->type == ARPHRD_CAN && dev->ml_priv) {
- dev_rcv_lists = dev->ml_priv;
+ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
+
+ if (can_ml) {
+ dev_rcv_lists = &can_ml->dev_rcv_lists;
can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff,
ARRAY_SIZE(dev_rcv_lists->rx_eff));
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 524f3364f8a0..7fbb274b7fe3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3146,18 +3146,14 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
return 0;
}
-static u32 __bpf_skb_max_len(const struct sk_buff *skb)
-{
- return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
- SKB_MAX_ALLOC;
-}
+#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{
u32 len_cur, len_diff_abs = abs(len_diff);
u32 len_min = bpf_skb_net_base_len(skb);
- u32 len_max = __bpf_skb_max_len(skb);
+ u32 len_max = BPF_SKB_MAX_LEN;
__be16 proto = skb->protocol;
bool shrink = len_diff < 0;
u32 off;
@@ -3237,7 +3233,7 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
u64 flags)
{
- u32 max_len = __bpf_skb_max_len(skb);
+ u32 max_len = BPF_SKB_MAX_LEN;
u32 min_len = __bpf_skb_min_len(skb);
int ret;
@@ -3313,7 +3309,7 @@ static const struct bpf_func_proto sk_skb_change_tail_proto = {
static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
u64 flags)
{
- u32 max_len = __bpf_skb_max_len(skb);
+ u32 max_len = BPF_SKB_MAX_LEN;
u32 new_len = skb->len + head_room;
int ret;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index e3bdd859c895..da86c0e1b677 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1028,6 +1028,9 @@ bool __skb_flow_dissect(const struct net *net,
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
+ __skb_flow_dissect_ipv4(skb, flow_dissector,
+ target_container, data, iph);
+
if (ip_is_fragment(iph)) {
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
@@ -1044,9 +1047,6 @@ bool __skb_flow_dissect(const struct net *net,
}
}
- __skb_flow_dissect_ipv4(skb, flow_dissector,
- target_container, data, iph);
-
break;
}
case htons(ETH_P_IPV6): {
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 1e5e08cc0bfc..9f73ccf46c9b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
return 0; /* discard, don't send a reset here */
+ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+ return 0;
+ }
+
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 3d71c7d6102c..7e5df23cbe7b 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -223,16 +223,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (ipv6_addr_is_multicast(&hdr->saddr))
goto err;
- /* While RFC4291 is not explicit about v4mapped addresses
- * in IPv6 headers, it seems clear linux dual-stack
- * model can not deal properly with these.
- * Security models could be fooled by ::ffff:127.0.0.1 for example.
- *
- * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
- */
- if (ipv6_addr_v4mapped(&hdr->saddr))
- goto err;
-
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b42fa41cfceb..2f061a911bc2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1093,6 +1093,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
goto drop;
+ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+ return 0;
+ }
+
return tcp_conn_request(&tcp6_request_sock_ops,
&tcp_request_sock_ipv6_ops, sk, skb);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index cf4d6d7e7282..d5470c7fe879 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1782,11 +1782,14 @@ static int
svcauth_gss_release(struct svc_rqst *rqstp)
{
struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
- struct rpc_gss_wire_cred *gc = &gsd->clcred;
+ struct rpc_gss_wire_cred *gc;
struct xdr_buf *resbuf = &rqstp->rq_res;
int stat = -EINVAL;
struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+ if (!gsd)
+ goto out;
+ gc = &gsd->clcred;
if (gc->gc_proc != RPC_GSS_PROC_DATA)
goto out;
/* Release can be called twice, but we only wrap once. */
@@ -1827,10 +1830,10 @@ svcauth_gss_release(struct svc_rqst *rqstp)
if (rqstp->rq_cred.cr_group_info)
put_group_info(rqstp->rq_cred.cr_group_info);
rqstp->rq_cred.cr_group_info = NULL;
- if (gsd->rsci)
+ if (gsd && gsd->rsci) {
cache_put(&gsd->rsci->h, sn->rsc_cache);
- gsd->rsci = NULL;
-
+ gsd->rsci = NULL;
+ }
return stat;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 5d323574d04f..c82e7b52ab1f 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -620,6 +620,7 @@ struct sock *__vsock_create(struct net *net,
vsk->trusted = psk->trusted;
vsk->owner = get_cred(psk->owner);
vsk->connect_timeout = psk->connect_timeout;
+ security_sk_clone(parent, sk);
} else {
vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
vsk->owner = get_current_cred();
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index febd16c9efd7..ebb1ee69dd0c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1022,8 +1022,12 @@ static int azx_prepare(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
+ if (!azx_is_pm_ready(card))
+ return 0;
+
chip = card->private_data;
chip->pm_prepared = 1;
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
flush_work(&azx_bus(chip)->unsol_work);
@@ -1038,7 +1042,11 @@ static void azx_complete(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
+ if (!azx_is_pm_ready(card))
+ return;
+
chip = card->private_data;
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
chip->pm_prepared = 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 73580e8208ed..3c9e072db353 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5192,7 +5192,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
case 0x10ec0274:
case 0x10ec0294:
alc_process_coef_fw(codec, coef0274);
- msleep(80);
+ msleep(850);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x00f0) == 0x00f0;
break;
@@ -5376,6 +5376,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
snd_hda_gen_hp_automute(codec, jack);
+ alc_update_headset_mode(codec);
}
static void alc_probe_headset_mode(struct hda_codec *codec)
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index 5125bb9b37b5..dcd2acb2c3ce 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -401,7 +401,7 @@ static const struct regmap_config cs42l42_regmap = {
};
static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
-static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
+static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
static const char * const cs42l42_hpf_freq_text[] = {
"1.86Hz", "120Hz", "235Hz", "466Hz"
@@ -458,7 +458,7 @@ static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
CS42L42_DAC_HPF_EN_SHIFT, true, false),
SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
- 0x3e, 1, mixer_tlv)
+ 0x3f, 1, mixer_tlv)
};
static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
@@ -691,24 +691,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
CS42L42_CLK_OASRC_SEL_MASK,
CS42L42_CLK_OASRC_SEL_12 <<
CS42L42_CLK_OASRC_SEL_SHIFT);
- /* channel 1 on low LRCLK, 32 bit */
- snd_soc_component_update_bits(component,
- CS42L42_ASP_RX_DAI0_CH1_AP_RES,
- CS42L42_ASP_RX_CH_AP_MASK |
- CS42L42_ASP_RX_CH_RES_MASK,
- (CS42L42_ASP_RX_CH_AP_LOW <<
- CS42L42_ASP_RX_CH_AP_SHIFT) |
- (CS42L42_ASP_RX_CH_RES_32 <<
- CS42L42_ASP_RX_CH_RES_SHIFT));
- /* Channel 2 on high LRCLK, 32 bit */
- snd_soc_component_update_bits(component,
- CS42L42_ASP_RX_DAI0_CH2_AP_RES,
- CS42L42_ASP_RX_CH_AP_MASK |
- CS42L42_ASP_RX_CH_RES_MASK,
- (CS42L42_ASP_RX_CH_AP_HI <<
- CS42L42_ASP_RX_CH_AP_SHIFT) |
- (CS42L42_ASP_RX_CH_RES_32 <<
- CS42L42_ASP_RX_CH_RES_SHIFT));
if (pll_ratio_table[i].mclk_src_sel == 0) {
/* Pass the clock straight through */
snd_soc_component_update_bits(component,
@@ -797,27 +779,23 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
/* Bitclock/frame inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
+ asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
break;
case SND_SOC_DAIFMT_NB_IF:
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_LCPOL_IN_SHIFT;
+ asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
+ asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
break;
case SND_SOC_DAIFMT_IB_NF:
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
break;
case SND_SOC_DAIFMT_IB_IF:
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_LCPOL_IN_SHIFT;
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+ asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
break;
}
- snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG,
- CS42L42_ASP_MODE_MASK |
- CS42L42_ASP_SCPOL_IN_DAC_MASK |
- CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
+ snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK |
+ CS42L42_ASP_SCPOL_MASK |
+ CS42L42_ASP_LCPOL_MASK,
+ asp_cfg_val);
return 0;
}
@@ -828,14 +806,29 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_component *component = dai->component;
struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
- int retval;
+ unsigned int width = (params_width(params) / 8) - 1;
+ unsigned int val = 0;
cs42l42->srate = params_rate(params);
- cs42l42->swidth = params_width(params);
- retval = cs42l42_pll_config(component);
+ switch(substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ val |= width << CS42L42_ASP_RX_CH_RES_SHIFT;
+ /* channel 1 on low LRCLK */
+ snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES,
+ CS42L42_ASP_RX_CH_AP_MASK |
+ CS42L42_ASP_RX_CH_RES_MASK, val);
+ /* Channel 2 on high LRCLK */
+ val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT;
+ snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
+ CS42L42_ASP_RX_CH_AP_MASK |
+ CS42L42_ASP_RX_CH_RES_MASK, val);
+ break;
+ default:
+ break;
+ }
- return retval;
+ return cs42l42_pll_config(component);
}
static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
@@ -900,9 +893,9 @@ static int cs42l42_digital_mute(struct snd_soc_dai *dai, int mute)
return 0;
}
-#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
- SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE)
+#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE )
static const struct snd_soc_dai_ops cs42l42_ops = {
@@ -1803,7 +1796,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
}
- mdelay(3);
+ usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
/* Request IRQ */
ret = devm_request_threaded_irq(&i2c_client->dev,
@@ -1928,6 +1921,7 @@ static int cs42l42_runtime_resume(struct device *dev)
}
gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
regcache_cache_only(cs42l42->regmap, false);
regcache_sync(cs42l42->regmap);
diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
index 9e3cc528dcff..866d7c873e3c 100644
--- a/sound/soc/codecs/cs42l42.h
+++ b/sound/soc/codecs/cs42l42.h
@@ -258,11 +258,12 @@
#define CS42L42_ASP_SLAVE_MODE 0x00
#define CS42L42_ASP_MODE_SHIFT 4
#define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT)
-#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2
-#define CS42L42_ASP_SCPOL_IN_DAC_MASK (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
-#define CS42L42_ASP_LCPOL_IN_SHIFT 0
-#define CS42L42_ASP_LCPOL_IN_MASK (1 << CS42L42_ASP_LCPOL_IN_SHIFT)
-#define CS42L42_ASP_POL_INV 1
+#define CS42L42_ASP_SCPOL_SHIFT 2
+#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT)
+#define CS42L42_ASP_SCPOL_NOR 3
+#define CS42L42_ASP_LCPOL_SHIFT 0
+#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT)
+#define CS42L42_ASP_LCPOL_INV 3
#define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08)
#define CS42L42_ASP_STP_SHIFT 4
@@ -739,6 +740,7 @@
#define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
#define CS42L42_NUM_SUPPLIES 5
+#define CS42L42_BOOT_TIME_US 3000
static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
"VA",
@@ -756,7 +758,6 @@ struct cs42l42_private {
struct completion pdn_done;
u32 sclk;
u32 srate;
- u32 swidth;
u8 plug_state;
u8 hs_type;
u8 ts_inv;
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index 36eef1fb3d18..b781b28de012 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -63,13 +63,8 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(250, 0, 0),
3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
- 4, 4, TLV_DB_SCALE_ITEM(700, 0, 0),
- 5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0),
- 6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0),
- 7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0),
- 8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0),
- 9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0),
- 10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0),
+ 4, 7, TLV_DB_SCALE_ITEM(700, 300, 0),
+ 8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0),
);
static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 747ca248bf10..3bc63fbcb188 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -339,9 +339,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg)
}
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index c506c9305043..829cf552fe3e 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -285,9 +285,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg)
}
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index e66d08398f74..afd61599d94c 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -3463,12 +3463,17 @@ static int rt5659_set_component_sysclk(struct snd_soc_component *component, int
{
struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
unsigned int reg_val = 0;
+ int ret;
if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src)
return 0;
switch (clk_id) {
case RT5659_SCLK_S_MCLK:
+ ret = clk_set_rate(rt5659->mclk, freq);
+ if (ret)
+ return ret;
+
reg_val |= RT5659_SCLK_SRC_MCLK;
break;
case RT5659_SCLK_S_PLL1:
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index f5b59305c957..8a1e485982d8 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -71,7 +71,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
{ SGTL5000_DAP_EQ_BASS_BAND4, 0x002f },
{ SGTL5000_DAP_MAIN_CHAN, 0x8000 },
{ SGTL5000_DAP_MIX_CHAN, 0x0000 },
- { SGTL5000_DAP_AVC_CTRL, 0x0510 },
+ { SGTL5000_DAP_AVC_CTRL, 0x5100 },
{ SGTL5000_DAP_AVC_THRESHOLD, 0x1473 },
{ SGTL5000_DAP_AVC_ATTACK, 0x0028 },
{ SGTL5000_DAP_AVC_DECAY, 0x0050 },
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 9fb03c646a88..3d1585c12b07 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1453,6 +1453,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+ case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
return true;
}
diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
index 058c746ee300..b11d8e6b5bc1 100755
--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
+++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
@@ -3,7 +3,7 @@
ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
- match_ip_tos_test match_indev_test"
+ match_ip_tos_test match_indev_test match_ip_ttl_test"
NUM_NETIFS=2
source tc_common.sh
source lib.sh
@@ -310,6 +310,42 @@ match_ip_tos_test()
log_test "ip_tos match ($tcflags)"
}
+match_ip_ttl_test()
+{
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 ip_ttl 63 action drop
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip "ttl=63" -q
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip "ttl=63,mf,frag=256" -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_fail $? "Matched on the wrong filter (no check on ttl)"
+
+ tc_check_packets "dev $h2 ingress" 101 2
+ check_err $? "Did not match on correct filter (ttl=63)"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip "ttl=255" -q
+
+ tc_check_packets "dev $h2 ingress" 101 3
+ check_fail $? "Matched on a wrong filter (ttl=63)"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter (no check on ttl)"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ log_test "ip_ttl match ($tcflags)"
+}
+
match_indev_test()
{
RET=0
Powered by blists - more mailing lists