[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240817204639.132794-4-axboe@kernel.dk>
Date: Sat, 17 Aug 2024 14:45:12 -0600
From: Jens Axboe <axboe@...nel.dk>
To: linux-kernel@...r.kernel.org
Cc: peterz@...radead.org,
tglx@...utronix.de,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 3/4] sched/core: have io_schedule_prepare() return a long
In preparation for needing more state then 32-bit on 64-bit archs,
switch it to a long instead.
Signed-off-by: Jens Axboe <axboe@...nel.dk>
---
block/blk-cgroup.c | 2 +-
include/linux/sched.h | 4 ++--
kernel/locking/mutex.c | 4 ++--
kernel/locking/rtmutex_api.c | 4 ++--
kernel/sched/core.c | 6 +++---
5 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 69e70964398c..f8e6220c66a7 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1884,7 +1884,7 @@ static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
u64 now = blk_time_get_ns();
u64 exp;
u64 delay_nsec = 0;
- int tok;
+ long tok;
while (blkg->parent) {
int use_delay = atomic_read(&blkg->use_delay);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c1b4ee3234f..c1a65e19a3ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -319,8 +319,8 @@ asmlinkage void preempt_schedule_irq(void);
extern void schedule_rtlock(void);
#endif
-extern int __must_check io_schedule_prepare(void);
-extern void io_schedule_finish(int token);
+extern long __must_check io_schedule_prepare(void);
+extern void io_schedule_finish(long token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cbae8c0b89ab..4a86ea6c7f19 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -830,7 +830,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
void __sched
mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
{
- int token;
+ long token;
might_sleep();
@@ -1026,7 +1026,7 @@ EXPORT_SYMBOL(mutex_lock_killable);
*/
void __sched mutex_lock_io(struct mutex *lock)
{
- int token;
+ long token;
token = io_schedule_prepare();
mutex_lock(lock);
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index a6974d044593..ddf7f7f3f0b5 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -547,7 +547,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
{
- int token;
+ long token;
might_sleep();
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(mutex_lock_killable);
void __sched mutex_lock_io(struct mutex *lock)
{
- int token = io_schedule_prepare();
+ long token = io_schedule_prepare();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
io_schedule_finish(token);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ddabf20cd9e0..7cb7ca38fdfc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7427,16 +7427,16 @@ static inline void preempt_dynamic_init(void) { }
#endif /* CONFIG_PREEMPT_DYNAMIC */
-int io_schedule_prepare(void)
+long io_schedule_prepare(void)
{
- int old_iowait = current->in_iowait;
+ long old_iowait = current->in_iowait;
current->in_iowait = 1;
blk_flush_plug(current->plug, true);
return old_iowait;
}
-void io_schedule_finish(int token)
+void io_schedule_finish(long token)
{
current->in_iowait = token;
}
--
2.43.0
Powered by blists - more mailing lists