[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190712121623.716535924@linuxfoundation.org>
Date: Fri, 12 Jul 2019 14:20:08 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Nicolas Saenz Julienne <nsaenzjulienne@...e.de>,
Stefan Wahren <stefan.wahren@...e.com>
Subject: [PATCH 5.2 55/61] staging: vchiq: revert "switch to wait_for_completion_killable"
From: Nicolas Saenz Julienne <nsaenzjulienne@...e.de>
commit 086efbabdc04563268372aaef4d66039d85ee76c upstream.
The killable version of wait_for_completion() is meant to be used on
situations where it should not fail at all costs, but still have the
convenience of being able to kill it if really necessary. VCHIQ doesn't
fit this criteria, as it's mainly used as an interface to V4L2 and ALSA
devices.
Fixes: a772f116702e ("staging: vchiq: switch to wait_for_completion_killable")
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@...e.de>
Acked-by: Stefan Wahren <stefan.wahren@...e.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c | 21 +++++-----
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c | 21 +++++-----
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c | 6 +-
3 files changed, 25 insertions(+), 23 deletions(-)
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -532,7 +532,8 @@ add_completion(VCHIQ_INSTANCE_T instance
vchiq_log_trace(vchiq_arm_log_level,
"%s - completion queue full", __func__);
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
- if (wait_for_completion_killable(&instance->remove_event)) {
+ if (wait_for_completion_interruptible(
+ &instance->remove_event)) {
vchiq_log_info(vchiq_arm_log_level,
"service_callback interrupted");
return VCHIQ_RETRY;
@@ -643,7 +644,7 @@ service_callback(VCHIQ_REASON_T reason,
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
- if (wait_for_completion_killable(
+ if (wait_for_completion_interruptible(
&user_service->remove_event)
!= 0) {
vchiq_log_info(vchiq_arm_log_level,
@@ -978,7 +979,7 @@ vchiq_ioctl(struct file *file, unsigned
has been closed until the client library calls the
CLOSE_DELIVERED ioctl, signalling close_event. */
if (user_service->close_pending &&
- wait_for_completion_killable(
+ wait_for_completion_interruptible(
&user_service->close_event))
status = VCHIQ_RETRY;
break;
@@ -1154,7 +1155,7 @@ vchiq_ioctl(struct file *file, unsigned
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
mutex_unlock(&instance->completion_mutex);
- rc = wait_for_completion_killable(
+ rc = wait_for_completion_interruptible(
&instance->insert_event);
mutex_lock(&instance->completion_mutex);
if (rc != 0) {
@@ -1324,7 +1325,7 @@ vchiq_ioctl(struct file *file, unsigned
do {
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
- if (wait_for_completion_killable(
+ if (wait_for_completion_interruptible(
&user_service->insert_event)) {
vchiq_log_info(vchiq_arm_log_level,
"DEQUEUE_MESSAGE interrupted");
@@ -2328,7 +2329,7 @@ vchiq_keepalive_thread_func(void *v)
while (1) {
long rc = 0, uc = 0;
- if (wait_for_completion_killable(&arm_state->ka_evt)
+ if (wait_for_completion_interruptible(&arm_state->ka_evt)
!= 0) {
vchiq_log_error(vchiq_susp_log_level,
"%s interrupted", __func__);
@@ -2579,7 +2580,7 @@ block_resume(struct vchiq_arm_state *arm
write_unlock_bh(&arm_state->susp_res_lock);
vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
"blocked clients", __func__);
- if (wait_for_completion_killable_timeout(
+ if (wait_for_completion_interruptible_timeout(
&arm_state->blocked_blocker, timeout_val)
<= 0) {
vchiq_log_error(vchiq_susp_log_level, "%s wait for "
@@ -2605,7 +2606,7 @@ block_resume(struct vchiq_arm_state *arm
write_unlock_bh(&arm_state->susp_res_lock);
vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
__func__);
- if (wait_for_completion_killable_timeout(
+ if (wait_for_completion_interruptible_timeout(
&arm_state->vc_resume_complete, timeout_val)
<= 0) {
vchiq_log_error(vchiq_susp_log_level, "%s wait for "
@@ -2812,7 +2813,7 @@ vchiq_arm_force_suspend(struct vchiq_sta
do {
write_unlock_bh(&arm_state->susp_res_lock);
- rc = wait_for_completion_killable_timeout(
+ rc = wait_for_completion_interruptible_timeout(
&arm_state->vc_suspend_complete,
msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
@@ -2908,7 +2909,7 @@ vchiq_arm_allow_resume(struct vchiq_stat
write_unlock_bh(&arm_state->susp_res_lock);
if (resume) {
- if (wait_for_completion_killable(
+ if (wait_for_completion_interruptible(
&arm_state->vc_resume_complete) < 0) {
vchiq_log_error(vchiq_susp_log_level,
"%s interrupted", __func__);
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -568,7 +568,7 @@ reserve_space(struct vchiq_state *state,
remote_event_signal(&state->remote->trigger);
if (!is_blocking ||
- (wait_for_completion_killable(
+ (wait_for_completion_interruptible(
&state->slot_available_event)))
return NULL; /* No space available */
}
@@ -838,7 +838,7 @@ queue_message(struct vchiq_state *state,
spin_unlock("a_spinlock);
mutex_unlock(&state->slot_mutex);
- if (wait_for_completion_killable(
+ if (wait_for_completion_interruptible(
&state->data_quota_event))
return VCHIQ_RETRY;
@@ -869,7 +869,7 @@ queue_message(struct vchiq_state *state,
service_quota->slot_use_count);
VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
mutex_unlock(&state->slot_mutex);
- if (wait_for_completion_killable(
+ if (wait_for_completion_interruptible(
&service_quota->quota_event))
return VCHIQ_RETRY;
if (service->closing)
@@ -1718,7 +1718,8 @@ parse_rx_slots(struct vchiq_state *state
&service->bulk_rx : &service->bulk_tx;
DEBUG_TRACE(PARSE_LINE);
- if (mutex_lock_killable(&service->bulk_mutex)) {
+ if (mutex_lock_killable(
+ &service->bulk_mutex) != 0) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
}
@@ -2436,7 +2437,7 @@ vchiq_open_service_internal(struct vchiq
QMFLAGS_IS_BLOCKING);
if (status == VCHIQ_SUCCESS) {
/* Wait for the ACK/NAK */
- if (wait_for_completion_killable(&service->remove_event)) {
+ if (wait_for_completion_interruptible(&service->remove_event)) {
status = VCHIQ_RETRY;
vchiq_release_service_internal(service);
} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
@@ -2803,7 +2804,7 @@ vchiq_connect_internal(struct vchiq_stat
}
if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
- if (wait_for_completion_killable(&state->connect))
+ if (wait_for_completion_interruptible(&state->connect))
return VCHIQ_RETRY;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
@@ -2902,7 +2903,7 @@ vchiq_close_service(VCHIQ_SERVICE_HANDLE
}
while (1) {
- if (wait_for_completion_killable(&service->remove_event)) {
+ if (wait_for_completion_interruptible(&service->remove_event)) {
status = VCHIQ_RETRY;
break;
}
@@ -2963,7 +2964,7 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDL
request_poll(service->state, service, VCHIQ_POLL_REMOVE);
}
while (1) {
- if (wait_for_completion_killable(&service->remove_event)) {
+ if (wait_for_completion_interruptible(&service->remove_event)) {
status = VCHIQ_RETRY;
break;
}
@@ -3046,7 +3047,7 @@ VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ
VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
do {
mutex_unlock(&service->bulk_mutex);
- if (wait_for_completion_killable(
+ if (wait_for_completion_interruptible(
&service->bulk_remove_event)) {
status = VCHIQ_RETRY;
goto error_exit;
@@ -3123,7 +3124,7 @@ waiting:
if (bulk_waiter) {
bulk_waiter->bulk = bulk;
- if (wait_for_completion_killable(&bulk_waiter->event))
+ if (wait_for_completion_interruptible(&bulk_waiter->event))
status = VCHIQ_RETRY;
else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
status = VCHIQ_ERROR;
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -50,7 +50,7 @@ void vchiu_queue_push(struct vchiu_queue
return;
while (queue->write == queue->read + queue->size) {
- if (wait_for_completion_killable(&queue->pop))
+ if (wait_for_completion_interruptible(&queue->pop))
flush_signals(current);
}
@@ -63,7 +63,7 @@ void vchiu_queue_push(struct vchiu_queue
struct vchiq_header *vchiu_queue_peek(struct vchiu_queue *queue)
{
while (queue->write == queue->read) {
- if (wait_for_completion_killable(&queue->push))
+ if (wait_for_completion_interruptible(&queue->push))
flush_signals(current);
}
@@ -77,7 +77,7 @@ struct vchiq_header *vchiu_queue_pop(str
struct vchiq_header *header;
while (queue->write == queue->read) {
- if (wait_for_completion_killable(&queue->push))
+ if (wait_for_completion_interruptible(&queue->push))
flush_signals(current);
}
Powered by blists - more mailing lists