[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220517105526.114421-1-duoming@zju.edu.cn>
Date: Tue, 17 May 2022 18:55:26 +0800
From: Duoming Zhou <duoming@....edu.cn>
To: linux-kernel@...r.kernel.org, krzysztof.kozlowski@...aro.org
Cc: kuba@...nel.org, davem@...emloft.net, edumazet@...gle.com,
pabeni@...hat.com, gregkh@...uxfoundation.org,
alexander.deucher@....com, broonie@...nel.org,
netdev@...r.kernel.org, Duoming Zhou <duoming@....edu.cn>
Subject: [PATCH net v2] NFC: hci: fix sleep in atomic context bugs in nfc_hci_hcp_message_tx
There are sleep in atomic context bugs when the request to secure
element of st21nfca is timeout. The root cause is that kzalloc and
alloc_skb with GFP_KERNEL parameter and mutex_lock are called in
st21nfca_se_wt_timeout which is a timer handler. The call tree shows
the execution paths that could lead to bugs:
(Interrupt context)
st21nfca_se_wt_timeout
nfc_hci_send_event
nfc_hci_hcp_message_tx
kzalloc(..., GFP_KERNEL) //may sleep
alloc_skb(..., GFP_KERNEL) //may sleep
mutex_lock() //may sleep
This patch changes allocation mode of kzalloc and alloc_skb from
GFP_KERNEL to GFP_ATOMIC and changes mutex_lock to spin_lock in
order to prevent atomic context from sleeping.
Fixes: 2130fb97fecf ("NFC: st21nfca: Adding support for secure element")
Signed-off-by: Duoming Zhou <duoming@....edu.cn>
---
Changes in v2:
- Change mutex_lock to spin_lock.
include/net/nfc/hci.h | 3 ++-
net/nfc/hci/core.c | 18 +++++++++---------
net/nfc/hci/hcp.c | 10 +++++-----
3 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 756c11084f6..8f66e6e6b91 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -103,7 +103,8 @@ struct nfc_hci_dev {
bool shutting_down;
- struct mutex msg_tx_mutex;
+ /* The spinlock is used to protect resources related with hci message TX */
+ spinlock_t msg_tx_spin;
struct list_head msg_tx_queue;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ceb87db57cd..fa22f9fe5fc 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -68,7 +68,7 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
struct sk_buff *skb;
int r = 0;
- mutex_lock(&hdev->msg_tx_mutex);
+ spin_lock(&hdev->msg_tx_spin);
if (hdev->shutting_down)
goto exit;
@@ -120,7 +120,7 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay));
exit:
- mutex_unlock(&hdev->msg_tx_mutex);
+ spin_unlock(&hdev->msg_tx_spin);
}
static void nfc_hci_msg_rx_work(struct work_struct *work)
@@ -165,7 +165,7 @@ static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
struct sk_buff *skb)
{
- mutex_lock(&hdev->msg_tx_mutex);
+ spin_lock(&hdev->msg_tx_spin);
if (hdev->cmd_pending_msg == NULL) {
kfree_skb(skb);
@@ -175,7 +175,7 @@ void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
__nfc_hci_cmd_completion(hdev, nfc_hci_result_to_errno(result), skb);
exit:
- mutex_unlock(&hdev->msg_tx_mutex);
+ spin_unlock(&hdev->msg_tx_spin);
}
void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
@@ -833,7 +833,7 @@ static int hci_se_io(struct nfc_dev *nfc_dev, u32 se_idx,
static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
{
- mutex_lock(&hdev->msg_tx_mutex);
+ spin_lock(&hdev->msg_tx_spin);
if (hdev->cmd_pending_msg == NULL) {
nfc_driver_failure(hdev->ndev, err);
@@ -843,7 +843,7 @@ static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
__nfc_hci_cmd_completion(hdev, err, NULL);
exit:
- mutex_unlock(&hdev->msg_tx_mutex);
+ spin_unlock(&hdev->msg_tx_spin);
}
static void nfc_hci_llc_failure(struct nfc_hci_dev *hdev, int err)
@@ -1009,7 +1009,7 @@ EXPORT_SYMBOL(nfc_hci_free_device);
int nfc_hci_register_device(struct nfc_hci_dev *hdev)
{
- mutex_init(&hdev->msg_tx_mutex);
+ spin_lock_init(&hdev->msg_tx_spin);
INIT_LIST_HEAD(&hdev->msg_tx_queue);
@@ -1031,7 +1031,7 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
{
struct hci_msg *msg, *n;
- mutex_lock(&hdev->msg_tx_mutex);
+ spin_lock(&hdev->msg_tx_spin);
if (hdev->cmd_pending_msg) {
if (hdev->cmd_pending_msg->cb)
@@ -1044,7 +1044,7 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
hdev->shutting_down = true;
- mutex_unlock(&hdev->msg_tx_mutex);
+ spin_unlock(&hdev->msg_tx_spin);
del_timer_sync(&hdev->cmd_timer);
cancel_work_sync(&hdev->msg_tx_work);
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
index 05c60988f59..f7eccb4ce35 100644
--- a/net/nfc/hci/hcp.c
+++ b/net/nfc/hci/hcp.c
@@ -30,7 +30,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
int hci_len, err;
bool firstfrag = true;
- cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd == NULL)
return -ENOMEM;
@@ -58,7 +58,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
data_link_len + ndev->tx_tailroom;
hci_len -= data_link_len;
- skb = alloc_skb(skb_len, GFP_KERNEL);
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
if (skb == NULL) {
err = -ENOMEM;
goto out_skb_err;
@@ -90,16 +90,16 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
skb_queue_tail(&cmd->msg_frags, skb);
}
- mutex_lock(&hdev->msg_tx_mutex);
+ spin_lock(&hdev->msg_tx_spin);
if (hdev->shutting_down) {
err = -ESHUTDOWN;
- mutex_unlock(&hdev->msg_tx_mutex);
+ spin_unlock(&hdev->msg_tx_spin);
goto out_skb_err;
}
list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
- mutex_unlock(&hdev->msg_tx_mutex);
+ spin_unlock(&hdev->msg_tx_spin);
schedule_work(&hdev->msg_tx_work);
--
2.17.1
Powered by blists - more mailing lists