[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20220608142457.GA2400218@ubuntu20>
Date: Wed, 8 Jun 2022 22:24:57 +0800
From: Zhipeng Shi <zhipeng.shi0@...il.com>
To: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: peterz@...radead.org, mingo@...hat.com, will@...nel.org,
longman@...hat.com, boqun.feng@...il.com, tglx@...utronix.de,
schspa@...il.com, shengjian.xu@...izon.ai,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] locking/rtmutex: Provide proper spin_is_contended
On Tue, Jun 07, 2022 at 11:24:56AM +0200, Sebastian Andrzej Siewior wrote:
> On 2022-06-07 17:15:14 [+0800], Zhipeng Shi wrote:
> > Commit d89c70356acf ("locking/core: Remove break_lock field when
> > CONFIG_GENERIC_LOCKBREAK=y") removed GENERIC_LOCKBREAK, which caused
> > spin_is_contended depend on the implementation of arch_spin_is_contended.
> > But now in rt-spinlock, spin_is_contended returns 0 directly.
> >
> > This causes cond_resched_lock to fail to correctly detect lock contention
> > in RT-linux. In some scenarios (such as __purge_vmap_area_lazy in vmalloc),
> > this will cause a large latency.
> >
> > This patch provides the implementation of spin_is_contended for
> > rt-spinlock.
>
> Do you have a test-case or an example?
Sorry for late reply, here is my test-case.
After apply this patch, max-latency of vmalloc reduce from 10+ msec to
200+ usec, this because spin_lock is released halfway through
__purge_vmap_area_lazy.
But today I find if I 'chrt -f 10' to my test script, There will still be
a large latency in the vmalloc process. After trace, it seems update_curr_rt
leads to resched_curr. I haven't figured out the reason behind it, and I'm
still trying to figure out why. I would really appreciate your thoughts
on this issue.
Below is the test module
---------------------------------
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <trace/events/kmem.h>
#define TEST_MALLOC_SIZE 64
#define MAX_OFF 20000
void* buf[MAX_OFF];
void* fub[MAX_OFF];
int off = 0;
int ffo = 0;
u64 total_time1 = 0;
u64 total_time2 = 0;
u64 total_cnt1 = 0;
u64 total_cnt2 = 0;
u64 alloc_time1[16];
u64 alloc_time2[16];
#define DECIMAL_BASE 10
#define MODULE_NAME "vmalloc_test"
unsigned long long duration_max = 0;
ssize_t vmalloc_test_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char set_str[4] = {0};
uint32_t set_bits = 0;
ssize_t err = 0;
ktime_t time1, time2, delta;
unsigned long long duration;
int i = 0;
err = simple_write_to_buffer((void*)set_str, sizeof(set_str) - 1u,
pos, (const void*)buffer, count);
if (err == 0)
return err;
if (kstrtouint(set_str, DECIMAL_BASE, &set_bits) != 0)
return -EINVAL;
switch (set_bits) {
case 0:
if (off >= MAX_OFF) {
printk("buf number exceed\n");
return -1;
}
time1 = ktime_get();
buf[off] = vmalloc(TEST_MALLOC_SIZE);
if (NULL == buf[off]) {
printk("vmalloc buf failed\n");
return -1;
}
memset(buf[off], 1, TEST_MALLOC_SIZE);
off++;
time2 = ktime_get();
delta = ktime_sub(time2, time1);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
if (duration > 8000) {
printk("%s:%d, elapse %llu usec\n",
__func__, __LINE__, duration);
return -1;
}
total_cnt1++;
total_time1 += duration;
alloc_time1[fls(duration) - 1]++;
if (duration > duration_max)
duration_max = duration;
break;
case 1:
for (i = 0; i < MAX_OFF; i++) {
if (buf[i] == NULL)
break;
vfree(buf[i]);
buf[i] = NULL;
}
off = 0;
break;
case 2:
if (ffo >= MAX_OFF) {
printk("buf number exceed\n");
return -1;
}
time1 = ktime_get();
fub[ffo] = vmalloc(TEST_MALLOC_SIZE);
if (NULL == fub[ffo]) {
printk("vmalloc buf failed\n");
return -1;
}
memset(fub[ffo], 1, TEST_MALLOC_SIZE);
ffo++;
time2 = ktime_get();
delta = ktime_sub(time2, time1);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
if (duration > 8000) {
printk("%s:%d, elapse %llu usec\n",
__func__, __LINE__, duration);
//return -1;
}
total_cnt2++;
total_time2 += duration;
alloc_time1[fls(duration)- 1]++;
if (duration > duration_max)
duration_max = duration;
break;
case 3:
for (i = 0; i < MAX_OFF; i++) {
if (fub[i] == NULL)
break;
vfree(fub[i]);
fub[i] = NULL;
}
ffo = 0;
break;
default:
printk("input error\n");
return -EINVAL;
}
return (ssize_t)count;
}
static int32_t vmalloc_test_proc_show(struct seq_file *m, void *v)
{
int i = 0;
seq_printf(m, "max: %llu us\n", duration_max);
seq_printf(m, "avg1: %llu us\n", total_time1/total_cnt1);
seq_printf(m, "avg2: %llu us\n", total_time2/total_cnt2);
for (i = 0; i < 16; i++) {
seq_printf(m, "[%u ~ %u): %llu\n",
1 << i, 1 << (i + 1), alloc_time1[i]);
}
return 0;
}
static int32_t vmalloc_test_fault_injection_proc_open(struct inode *inode,
struct file *file)
{
return single_open(file, vmalloc_test_proc_show, PDE_DATA(inode));
}
static const struct proc_ops vmalloc_test_fault_inject_proc_fops = {
.proc_open = vmalloc_test_fault_injection_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = vmalloc_test_proc_write,
};
static struct proc_dir_entry *root_irq_dir = NULL;
static struct proc_dir_entry *entry;
#define vmalloc_test_FAULT_INJECTION_MODE 444
static int32_t vmalloc_test_register_procfs(void)
{
if (NULL == root_irq_dir) {
root_irq_dir = proc_mkdir("driver/vmalloc_test", NULL);
if (NULL == root_irq_dir) {
return -EINVAL;
}
}
entry = proc_create_data("test",
vmalloc_test_FAULT_INJECTION_MODE,
root_irq_dir,
&vmalloc_test_fault_inject_proc_fops, NULL);
if (entry == NULL) {
return -ENOMEM;
}
return 0;
}
static int32_t vmalloc_test_unregister_procfs(void)
{
proc_remove(root_irq_dir);
return 0;
}
static s32 __init vmalloc_test_init(void)
{
int ret;
ret = vmalloc_test_register_procfs();
if (ret) {
printk("register proc failed\n");
return -1;
}
return 0;
}
module_init(vmalloc_test_init);/*PRQA S 0605*/
static void __exit vmalloc_test_exit(void)
{
vmalloc_test_unregister_procfs();
return;
}
module_exit(vmalloc_test_exit);/*PRQA S 0605*/
MODULE_AUTHOR("Sun Kaikai<kaikai.sun@...izon.com>");
MODULE_DESCRIPTION("Hobot GDC driver");
MODULE_LICENSE("GPL v2");
-------------------------------
Below is the test script no.1
-------------------------------------
#!/bin/bash
x=0
while true
do
for i in {1..10000}
do
echo 0 > /proc/driver/vmalloc_test/test
if [ $? != 0 ];then
echo 0 > /sys/kernel/debug/tracing/tracing_on
echo 1 > /proc/driver/vmalloc_test/test
exit
fi
done
echo 1 > /proc/driver/vmalloc_test/test
echo "loop $x done"
x=$((x+1))
done
---------------------------------
Below is the test script no.2
------------------------------
#!/bin/bash
x=0
while true
do
for i in {1..10000}
do
echo 2 > /proc/driver/vmalloc_test/test
if [ $? != 0 ];then
echo 0 > /sys/kernel/debug/tracing/tracing_on
echo 3 > /proc/driver/vmalloc_test/test
exit
fi
done
echo 3 > /proc/driver/vmalloc_test/test
echo "loop1 $x done"
x=$((x+1))
done
------------------------------
>
> The thing if _this_ task has higher priority than other tasks waiting on
> this lock then a simple unlock+lock won't do a thing. That is why I
> ripped it all out while it was half way done.
>
> > Signed-off-by: Zhipeng Shi <zhipeng.shi0@...il.com>
>
> Sebastian
Zhipeng
Powered by blists - more mailing lists