xsang@inn:~$ lkp ncompare -s commit=1f4f7f0f8845d/tbox_group=lkp-spr-2sp4/testcase=stress-ng -o commit=6ccdcb6d3a741 -a | grep -v interrupts | grep -v softirq ========================================================================================= class/compiler/cpufreq_governor/disk/kconfig/nr_threads/rootfs/tbox_group/test/testcase/testtime: cpu-cache/gcc-12/performance/1SSD/x86_64-rhel-8.3/100%/debian-11.1-x86_64-20220510.cgz/lkp-spr-2sp4/judy/stress-ng/60s commit: 1f4f7f0f8845dbac40289cc3d50b81314c5a12b8 6ccdcb6d3a741c4e005ca6ffd4a62ddf8b5bead3 1f4f7f0f8845dbac 6ccdcb6d3a741c4e005ca6ffd4a ---------------- --------------------------- fail:runs %reproduction fail:runs | | | 10:10 0% 10:10 stress-ng.judy.Judy_delete_operations_per_sec.pass 10:10 0% 10:10 stress-ng.judy.Judy_find_operations_per_sec.pass 10:10 0% 10:10 stress-ng.judy.Judy_insert_operations_per_sec.pass 10:10 0% 10:10 stress-ng.judy.pass %stddev %change %stddev \ | \ 151.27 ± 56% +6.6% 161.22 ± 52% dmesg.timestamp:last 124.42 ± 11% +1.1% 125.79 ± 9% kmsg.timestamp:last 1.107e+09 ± 29% +67.3% 1.852e+09 ± 75% cpuidle..time 1084691 ± 30% +71.0% 1855097 ± 77% cpuidle..usage 125.81 ± 10% +3.3% 129.98 ± 11% uptime.boot 13667 ± 21% +6.8% 14591 ± 23% uptime.idle 53.76 ± 5% +0.6% 54.06 ± 4% boot-time.boot 30.41 -0.2% 30.36 boot-time.dhcp 11000 ± 5% +0.6% 11066 ± 4% boot-time.idle 10.11 ± 18% +35.2% 13.67 ± 51% iostat.cpu.idle 4.96 ± 2% +25.9% 6.25 ± 17% iostat.cpu.system 84.92 ± 2% -5.7% 80.08 ± 8% iostat.cpu.user 7.37 ± 27% +3.8 11.16 ± 66% mpstat.cpu.all.idle% 1.57 ± 3% -0.1 1.51 ± 5% mpstat.cpu.all.irq% 0.14 ± 3% +0.4 0.49 ± 33% mpstat.cpu.all.soft% 3.38 ± 2% +1.0 4.41 ± 22% mpstat.cpu.all.sys% 87.54 ± 2% -5.1 82.43 ± 8% mpstat.cpu.all.usr% 17731 ± 33% +7.7% 19098 ± 26% perf-c2c.DRAM.local 227.40 ± 39% +19.3% 271.30 ± 26% perf-c2c.DRAM.remote 396.50 ± 33% +21.0% 479.80 ± 29% perf-c2c.HITM.local 54.60 ± 51% +12.1% 61.20 ± 35% perf-c2c.HITM.remote 451.10 ± 35% +19.9% 541.00 ± 29% perf-c2c.HITM.total 0.00 -100.0% 0.00 numa-numastat.node0.interleave_hit 42179114 -12.9% 36727757 ± 8% numa-numastat.node0.local_node 42322224 -12.9% 36881294 ± 8% numa-numastat.node0.numa_hit 141985 ± 51% +8.1% 153548 ± 39% numa-numastat.node0.other_node 0.00 -100.0% 0.00 numa-numastat.node1.interleave_hit 41820129 -12.8% 36449421 ± 9% numa-numastat.node1.local_node 41909301 -12.8% 36528507 ± 9% numa-numastat.node1.numa_hit 89842 ± 81% -12.8% 78315 ± 77% numa-numastat.node1.other_node 9.94 ± 20% +37.6% 13.68 ± 50% vmstat.cpu.id 4.91 ± 2% +27.3% 6.25 ± 17% vmstat.cpu.sy 85.15 ± 2% -6.0% 80.06 ± 8% vmstat.cpu.us 0.02 ± 2% -4.2% 0.01 ± 8% vmstat.io.bi 4.00 +0.0% 4.00 vmstat.memory.buff 3275886 +0.7% 3300023 vmstat.memory.cache 5.199e+08 -0.1% 5.196e+08 vmstat.memory.free 202.27 ± 2% -3.2% 195.88 ± 7% vmstat.procs.r 9290 -4.0% 8915 ± 6% vmstat.system.cs 366738 -2.3% 358141 ± 2% vmstat.system.in 62.20 -0.0% 62.20 time.elapsed_time 62.20 -0.0% 62.20 time.elapsed_time.max 258360 -2.6% 251676 time.involuntary_context_switches 0.30 ±152% +166.7% 0.80 ±108% time.major_page_faults 10893 ± 7% +23.0% 13397 ± 29% time.maximum_resident_set_size 82141065 -13.1% 71375734 ± 9% time.minor_page_faults 4096 +0.0% 4096 time.page_size 21055 -1.2% 20804 time.percent_of_cpu_this_job_got 480.34 +35.5% 650.87 ± 21% time.system_time 12618 -2.6% 12291 time.user_time 563.40 ± 2% -0.7% 559.50 ± 3% time.voluntary_context_switches 2608 ± 2% -3.3% 2521 ± 7% turbostat.Avg_MHz 92.38 ± 2% -3.8 88.53 ± 8% turbostat.Busy% 2839 +0.9% 2864 turbostat.Bzy_MHz 1414 ± 18% +32.0% 1866 ± 35% turbostat.C1 527910 ± 28% +64.1% 866385 ± 73% turbostat.C1E 3.33 ± 26% +1.7 5.05 ± 64% turbostat.C1E% 612375 ± 28% +67.9% 1028102 ± 76% turbostat.C6 4.26 ± 24% +2.1 6.39 ± 62% turbostat.C6% 7.50 ± 25% +51.2% 11.34 ± 63% turbostat.CPU%c1 0.12 ± 17% +1.6% 0.13 ± 21% turbostat.CPU%c6 59.70 ± 3% -2.0% 58.50 ± 4% turbostat.CoreTmp 1.25 -2.6% 1.22 turbostat.IPC 24279278 +2.3% 24843604 ± 5% turbostat.IRQ 180.02 ± 2% -11.0 169.01 ± 8% turbostat.PKG_% 2789 ± 31% +33.1% 3712 ± 47% turbostat.POLL 60.00 ± 3% -2.3% 58.60 ± 4% turbostat.PkgTmp 672.54 -1.8% 660.52 ± 3% turbostat.PkgWatt 37.66 -2.4% 36.77 ± 2% turbostat.RAMWatt 1989 -0.0% 1988 turbostat.TSC_MHz 6925490 -0.9% 6862477 stress-ng.judy.Judy_delete_operations_per_sec 22515488 -0.4% 22420191 stress-ng.judy.Judy_find_operations_per_sec 9036524 -3.9% 8685310 ± 3% stress-ng.judy.Judy_insert_operations_per_sec 171299 -2.0% 167905 stress-ng.judy.ops 2853 -2.0% 2796 stress-ng.judy.ops_per_sec 62.20 -0.0% 62.20 stress-ng.time.elapsed_time 62.20 -0.0% 62.20 stress-ng.time.elapsed_time.max 258360 -2.6% 251676 stress-ng.time.involuntary_context_switches 0.30 ±152% +166.7% 0.80 ±108% stress-ng.time.major_page_faults 10893 ± 7% +23.0% 13397 ± 29% stress-ng.time.maximum_resident_set_size 82141065 -13.1% 71375734 ± 9% stress-ng.time.minor_page_faults 4096 +0.0% 4096 stress-ng.time.page_size 21055 -1.2% 20804 stress-ng.time.percent_of_cpu_this_job_got 480.34 +35.5% 650.87 ± 21% stress-ng.time.system_time 12618 -2.6% 12291 stress-ng.time.user_time 563.40 ± 2% -0.7% 559.50 ± 3% stress-ng.time.voluntary_context_switches 46768 ± 18% +73.1% 80961 ± 56% meminfo.Active 46689 ± 18% +73.2% 80881 ± 56% meminfo.Active(anon) 78.40 ± 6% +2.0% 79.98 meminfo.Active(file) 127245 ± 16% +5.8% 134611 ± 14% meminfo.AnonHugePages 1451846 -2.1% 1422053 ± 6% meminfo.AnonPages 4.00 +0.0% 4.00 meminfo.Buffers 3121206 +0.7% 3144477 meminfo.Cached 2.637e+08 +0.0% 2.637e+08 meminfo.CommitLimit 4368713 -2.2% 4271782 ± 5% meminfo.Committed_AS 5.245e+08 -0.1% 5.24e+08 meminfo.DirectMap1G 11274240 ± 12% +4.5% 11782144 ± 12% meminfo.DirectMap2M 198052 ± 5% +8.3% 214436 ± 5% meminfo.DirectMap4k 2048 +0.0% 2048 meminfo.Hugepagesize 1800847 -2.1% 1763898 ± 5% meminfo.Inactive 1800650 -2.1% 1763721 ± 5% meminfo.Inactive(anon) 197.33 ± 2% -10.5% 176.70 ± 8% meminfo.Inactive(file) 148140 +0.2% 148422 meminfo.KReclaimable 41102 -0.2% 41026 meminfo.KernelStack 517263 ± 2% +0.3% 518808 ± 2% meminfo.Mapped 5.178e+08 -0.1% 5.175e+08 meminfo.MemAvailable 5.199e+08 -0.1% 5.196e+08 meminfo.MemFree 5.274e+08 +0.0% 5.274e+08 meminfo.MemTotal 7517161 +4.3% 7841550 ± 4% meminfo.Memused 10.95 ± 2% -4.0% 10.51 ± 8% meminfo.Mlocked 25594 ± 3% -1.9% 25112 ± 7% meminfo.PageTables 109277 +0.3% 109597 meminfo.Percpu 148140 +0.2% 148422 meminfo.SReclaimable 462907 +0.2% 463832 meminfo.SUnreclaim 405085 ± 2% +5.8% 428379 ± 8% meminfo.Shmem 611047 +0.2% 612255 meminfo.Slab 2715849 -0.0% 2715845 meminfo.Unevictable 1.374e+13 +0.0% 1.374e+13 meminfo.VmallocTotal 292661 +0.0% 292707 meminfo.VmallocUsed 7721333 +6.6% 8231493 ± 4% meminfo.max_used_kB 6639 ± 70% +25.7% 8344 ±160% numa-meminfo.node0.Active 6609 ± 70% +25.5% 8296 ±161% numa-meminfo.node0.Active(anon) 30.40 ±123% +57.9% 48.00 ± 81% numa-meminfo.node0.Active(file) 65249 ± 56% -24.1% 49545 ± 74% numa-meminfo.node0.AnonHugePages 698546 ± 10% -10.1% 627683 ± 11% numa-meminfo.node0.AnonPages 783098 ± 9% -4.6% 746765 ± 10% numa-meminfo.node0.AnonPages.max 1849056 ± 60% -27.0% 1350657 ± 82% numa-meminfo.node0.FilePages 708083 ± 10% -8.9% 645187 ± 13% numa-meminfo.node0.Inactive 708005 ± 10% -8.9% 645081 ± 13% numa-meminfo.node0.Inactive(anon) 77.48 ±122% +37.3% 106.35 ± 82% numa-meminfo.node0.Inactive(file) 82324 ± 32% -8.0% 75702 ± 27% numa-meminfo.node0.KReclaimable 20512 ± 7% +1.4% 20790 ± 7% numa-meminfo.node0.KernelStack 175959 ± 9% -0.1% 175730 ± 7% numa-meminfo.node0.Mapped 2.594e+08 +0.2% 2.599e+08 numa-meminfo.node0.MemFree 2.635e+08 +0.0% 2.635e+08 numa-meminfo.node0.MemTotal 4076783 ± 28% -10.2% 3660809 ± 31% numa-meminfo.node0.MemUsed 6.66 ± 81% -2.3% 6.51 ± 81% numa-meminfo.node0.Mlocked 9901 ± 61% +4.5% 10346 ± 49% numa-meminfo.node0.PageTables 82324 ± 32% -8.0% 75702 ± 27% numa-meminfo.node0.SReclaimable 248888 ± 3% +0.5% 250078 ± 4% numa-meminfo.node0.SUnreclaim 20568 ± 40% +41.9% 29194 ± 98% numa-meminfo.node0.Shmem 331213 ± 9% -1.6% 325780 ± 7% numa-meminfo.node0.Slab 1828379 ± 61% -27.7% 1321308 ± 85% numa-meminfo.node0.Unevictable 40129 ± 22% +83.1% 73478 ± 53% numa-meminfo.node1.Active 40081 ± 22% +83.2% 73447 ± 53% numa-meminfo.node1.Active(anon) 48.00 ± 81% -33.4% 31.98 ±122% numa-meminfo.node1.Active(file) 62048 ± 50% +37.2% 85152 ± 49% numa-meminfo.node1.AnonHugePages 756214 ± 9% +3.8% 784679 ± 12% numa-meminfo.node1.AnonPages 924235 ± 6% +6.7% 986345 ± 7% numa-meminfo.node1.AnonPages.max 1271716 ± 88% +41.1% 1793782 ± 63% numa-meminfo.node1.FilePages 1095798 ± 5% +1.1% 1108364 ± 10% numa-meminfo.node1.Inactive 1095678 ± 5% +1.2% 1108294 ± 10% numa-meminfo.node1.Inactive(anon) 119.85 ± 77% -41.3% 70.35 ±118% numa-meminfo.node1.Inactive(file) 65822 ± 40% +10.5% 72715 ± 29% numa-meminfo.node1.KReclaimable 20590 ± 7% -1.7% 20233 ± 8% numa-meminfo.node1.KernelStack 341090 ± 4% +0.0% 341144 ± 4% numa-meminfo.node1.Mapped 2.605e+08 -0.3% 2.597e+08 numa-meminfo.node1.MemFree 2.639e+08 +0.0% 2.639e+08 numa-meminfo.node1.MemTotal 3445580 ± 33% +21.0% 4169560 ± 27% numa-meminfo.node1.MemUsed 4.33 ±122% -8.0% 3.98 ±123% numa-meminfo.node1.Mlocked 15703 ± 37% -6.1% 14739 ± 38% numa-meminfo.node1.PageTables 65822 ± 40% +10.5% 72715 ± 29% numa-meminfo.node1.SReclaimable 214196 ± 4% -0.2% 213692 ± 5% numa-meminfo.node1.SUnreclaim 384078 ± 3% +3.9% 399143 ± 7% numa-meminfo.node1.Shmem 280018 ± 11% +2.3% 286407 ± 9% numa-meminfo.node1.Slab 887469 ±126% +57.1% 1394536 ± 80% numa-meminfo.node1.Unevictable 1655 ± 70% +28.3% 2123 ±163% numa-vmstat.node0.nr_active_anon 7.60 ±123% +57.9% 12.00 ± 81% numa-vmstat.node0.nr_active_file 173571 ± 10% -9.6% 156867 ± 11% numa-vmstat.node0.nr_anon_pages 31.86 ± 57% -24.0% 24.20 ± 74% numa-vmstat.node0.nr_anon_transparent_hugepages 462268 ± 60% -27.0% 337669 ± 82% numa-vmstat.node0.nr_file_pages 64861249 +0.2% 64962956 numa-vmstat.node0.nr_free_pages 176031 ± 9% -8.5% 161129 ± 12% numa-vmstat.node0.nr_inactive_anon 19.37 ±122% +37.3% 26.59 ± 82% numa-vmstat.node0.nr_inactive_file 0.00 ±300% +1.6% 0.00 ±300% numa-vmstat.node0.nr_isolated_anon 20512 ± 7% +1.4% 20790 ± 7% numa-vmstat.node0.nr_kernel_stack 43945 ± 9% -0.2% 43864 ± 7% numa-vmstat.node0.nr_mapped 1.66 ± 81% -2.5% 1.62 ± 81% numa-vmstat.node0.nr_mlock 2475 ± 61% +4.5% 2585 ± 49% numa-vmstat.node0.nr_page_table_pages 5146 ± 40% +41.9% 7303 ± 98% numa-vmstat.node0.nr_shmem 20581 ± 32% -8.0% 18925 ± 27% numa-vmstat.node0.nr_slab_reclaimable 62224 ± 3% +0.5% 62514 ± 4% numa-vmstat.node0.nr_slab_unreclaimable 457094 ± 61% -27.7% 330327 ± 85% numa-vmstat.node0.nr_unevictable 1655 ± 70% +28.3% 2123 ±163% numa-vmstat.node0.nr_zone_active_anon 7.60 ±123% +57.9% 12.00 ± 81% numa-vmstat.node0.nr_zone_active_file 176029 ± 9% -8.5% 161125 ± 12% numa-vmstat.node0.nr_zone_inactive_anon 19.37 ±122% +37.3% 26.59 ± 82% numa-vmstat.node0.nr_zone_inactive_file 457094 ± 61% -27.7% 330327 ± 85% numa-vmstat.node0.nr_zone_unevictable 42322270 -12.9% 36881741 ± 8% numa-vmstat.node0.numa_hit 0.00 -100.0% 0.00 numa-vmstat.node0.numa_interleave 42179160 -12.9% 36728210 ± 8% numa-vmstat.node0.numa_local 141985 ± 51% +8.1% 153548 ± 39% numa-vmstat.node0.numa_other 10088 ± 22% +82.5% 18410 ± 53% numa-vmstat.node1.nr_active_anon 12.00 ± 81% -33.4% 7.99 ±122% numa-vmstat.node1.nr_active_file 187803 ± 9% +4.4% 196071 ± 12% numa-vmstat.node1.nr_anon_pages 30.30 ± 50% +37.2% 41.58 ± 49% numa-vmstat.node1.nr_anon_transparent_hugepages 317887 ± 88% +41.1% 448582 ± 63% numa-vmstat.node1.nr_file_pages 65116967 -0.3% 64935325 numa-vmstat.node1.nr_free_pages 272583 ± 6% +1.6% 276951 ± 10% numa-vmstat.node1.nr_inactive_anon 29.96 ± 77% -41.3% 17.59 ±118% numa-vmstat.node1.nr_inactive_file 0.00 +5.9e+101% 0.59 ±300% numa-vmstat.node1.nr_isolated_anon 20596 ± 7% -1.8% 20229 ± 8% numa-vmstat.node1.nr_kernel_stack 85156 ± 4% +0.3% 85436 ± 4% numa-vmstat.node1.nr_mapped 1.08 ±122% -8.0% 1.00 ±123% numa-vmstat.node1.nr_mlock 3929 ± 37% -6.4% 3678 ± 38% numa-vmstat.node1.nr_page_table_pages 95977 ± 3% +4.1% 99922 ± 7% numa-vmstat.node1.nr_shmem 16455 ± 40% +10.5% 18180 ± 29% numa-vmstat.node1.nr_slab_reclaimable 53550 ± 4% -0.2% 53423 ± 5% numa-vmstat.node1.nr_slab_unreclaimable 221867 ±126% +57.1% 348634 ± 80% numa-vmstat.node1.nr_unevictable 10088 ± 22% +82.5% 18410 ± 53% numa-vmstat.node1.nr_zone_active_anon 12.00 ± 81% -33.4% 7.99 ±122% numa-vmstat.node1.nr_zone_active_file 272580 ± 6% +1.6% 276963 ± 10% numa-vmstat.node1.nr_zone_inactive_anon 29.96 ± 77% -41.3% 17.59 ±118% numa-vmstat.node1.nr_zone_inactive_file 221867 ±126% +57.1% 348634 ± 80% numa-vmstat.node1.nr_zone_unevictable 41909012 -12.8% 36528578 ± 9% numa-vmstat.node1.numa_hit 0.00 -100.0% 0.00 numa-vmstat.node1.numa_interleave 41819840 -12.8% 36449493 ± 9% numa-vmstat.node1.numa_local 89842 ± 81% -12.8% 78314 ± 77% numa-vmstat.node1.numa_other 59.40 ± 9% +14.0% 67.70 ± 9% proc-vmstat.direct_map_level2_splits 5.00 ± 28% +12.0% 5.60 ± 27% proc-vmstat.direct_map_level3_splits 11737 ± 19% +72.4% 20235 ± 56% proc-vmstat.nr_active_anon 19.60 ± 6% +2.0% 19.99 proc-vmstat.nr_active_file 362955 -2.2% 354917 ± 6% proc-vmstat.nr_anon_pages 62.13 ± 16% +5.8% 65.73 ± 14% proc-vmstat.nr_anon_transparent_hugepages 12924190 -0.1% 12916030 proc-vmstat.nr_dirty_background_threshold 25879981 -0.1% 25863641 proc-vmstat.nr_dirty_threshold 780355 +0.7% 786173 proc-vmstat.nr_file_pages 1.3e+08 -0.1% 1.299e+08 proc-vmstat.nr_free_pages 450188 -2.2% 440379 ± 5% proc-vmstat.nr_inactive_anon 49.33 ± 2% -10.5% 44.18 ± 8% proc-vmstat.nr_inactive_file 0.57 ±300% -2.9% 0.56 ±299% proc-vmstat.nr_isolated_anon 41099 -0.2% 41022 proc-vmstat.nr_kernel_stack 129179 ± 2% +0.3% 129620 ± 2% proc-vmstat.nr_mapped 2.73 ± 2% -4.2% 2.62 ± 8% proc-vmstat.nr_mlock 6394 ± 3% -1.9% 6270 ± 7% proc-vmstat.nr_page_table_pages 101324 ± 2% +5.7% 107147 ± 8% proc-vmstat.nr_shmem 37036 +0.2% 37105 proc-vmstat.nr_slab_reclaimable 115732 +0.2% 115946 proc-vmstat.nr_slab_unreclaimable 678962 -0.0% 678961 proc-vmstat.nr_unevictable 11737 ± 19% +72.4% 20235 ± 56% proc-vmstat.nr_zone_active_anon 19.60 ± 6% +2.0% 19.99 proc-vmstat.nr_zone_active_file 450188 -2.2% 440380 ± 5% proc-vmstat.nr_zone_inactive_anon 49.33 ± 2% -10.5% 44.18 ± 8% proc-vmstat.nr_zone_inactive_file 678962 -0.0% 678961 proc-vmstat.nr_zone_unevictable 59062 ± 5% -2.1% 57815 ± 10% proc-vmstat.numa_hint_faults 55966 ± 7% -3.9% 53755 ± 9% proc-vmstat.numa_hint_faults_local 84235288 -12.8% 73412158 ± 8% proc-vmstat.numa_hit 36.70 ± 4% -1.9% 36.00 proc-vmstat.numa_huge_pte_updates 0.00 -100.0% 0.00 proc-vmstat.numa_interleave 84003235 -12.9% 73179535 ± 8% proc-vmstat.numa_local 231828 +0.0% 231864 proc-vmstat.numa_other 4365 ± 37% -0.1% 4362 ± 61% proc-vmstat.numa_pages_migrated 191149 +1.4% 193820 ± 4% proc-vmstat.numa_pte_updates 72087 ± 36% -8.6% 65920 ± 67% proc-vmstat.pgactivate 85864265 -12.5% 75117942 ± 8% proc-vmstat.pgalloc_normal 82879163 -13.0% 72119383 ± 9% proc-vmstat.pgfault 85427394 -12.6% 74693640 ± 8% proc-vmstat.pgfree 4365 ± 37% -0.1% 4362 ± 61% proc-vmstat.pgmigrate_success 0.00 -100.0% 0.00 proc-vmstat.pgpgin 27273 ± 3% +4.5% 28501 ± 11% proc-vmstat.pgreuse 46.70 ± 9% +11.3% 52.00 ± 14% proc-vmstat.thp_collapse_alloc 0.20 ±200% +200.0% 0.60 ± 81% proc-vmstat.thp_deferred_split_page 48.40 +0.6% 48.70 proc-vmstat.thp_fault_alloc 4.80 ± 75% -37.5% 3.00 ±100% proc-vmstat.thp_migration_success 0.20 ±200% +200.0% 0.60 ± 81% proc-vmstat.thp_split_pmd 0.00 -100.0% 0.00 proc-vmstat.thp_zero_page_alloc 35.00 +0.0% 35.00 proc-vmstat.unevictable_pgs_culled 3.00 +0.0% 3.00 proc-vmstat.unevictable_pgs_mlocked 3.00 +0.0% 3.00 proc-vmstat.unevictable_pgs_munlocked 0.00 -100.0% 0.00 proc-vmstat.unevictable_pgs_rescued 1563750 ± 2% +3.0% 1610100 ± 12% proc-vmstat.unevictable_pgs_scanned 0.61 ± 3% +0.4% 0.61 perf-stat.i.MPKI 1.501e+11 ± 2% -6.3% 1.407e+11 ± 8% perf-stat.i.branch-instructions 0.33 ± 3% +0.0 0.33 ± 6% perf-stat.i.branch-miss-rate% 3.963e+08 ± 2% -7.9% 3.649e+08 ± 9% perf-stat.i.branch-misses 65.13 -2.3 62.80 ± 7% perf-stat.i.cache-miss-rate% 7.382e+08 ± 2% -6.3% 6.919e+08 ± 8% perf-stat.i.cache-misses 1.106e+09 -6.6% 1.033e+09 ± 8% perf-stat.i.cache-references 9256 -4.6% 8828 ± 6% perf-stat.i.context-switches 0.50 ± 7% +33.3% 0.66 ± 43% perf-stat.i.cpi 224180 -0.0% 224171 perf-stat.i.cpu-clock 5.93e+11 ± 2% -3.8% 5.706e+11 ± 8% perf-stat.i.cpu-cycles 359.21 ± 2% -6.1% 337.30 ± 3% perf-stat.i.cpu-migrations 810.86 ± 3% +50.1% 1217 ± 61% perf-stat.i.cycles-between-cache-misses 0.01 ±134% +0.1 0.13 ±176% perf-stat.i.dTLB-load-miss-rate% 12662250 ± 2% +1.0% 12784614 ± 2% perf-stat.i.dTLB-load-misses 2.861e+11 ± 2% -6.5% 2.677e+11 ± 8% perf-stat.i.dTLB-loads 0.01 ± 9% +0.0 0.01 ± 43% perf-stat.i.dTLB-store-miss-rate% 13024682 ± 2% -14.4% 11149515 ± 8% perf-stat.i.dTLB-store-misses 1.757e+11 ± 2% -6.6% 1.641e+11 ± 8% perf-stat.i.dTLB-stores 1.275e+12 ± 2% -6.4% 1.194e+12 ± 8% perf-stat.i.instructions 2.10 -6.3% 1.97 ± 7% perf-stat.i.ipc 0.20 ± 48% +6.9% 0.21 ± 40% perf-stat.i.major-faults 1.26 ± 2% -6.7% 1.18 ± 9% perf-stat.i.metric.G/sec 2.65 ± 2% -3.8% 2.55 ± 8% perf-stat.i.metric.GHz 694.48 -3.0% 673.92 ± 2% perf-stat.i.metric.K/sec 1472 -6.2% 1380 ± 8% perf-stat.i.metric.M/sec 1305005 ± 2% -17.5% 1076189 ± 9% perf-stat.i.minor-faults 4.59 ± 8% +2.1 6.65 ± 51% perf-stat.i.node-load-miss-rate% 2077324 ± 2% -2.2% 2032308 ± 12% perf-stat.i.node-load-misses 1.353e+08 -5.4% 1.281e+08 ± 8% perf-stat.i.node-loads 1305005 ± 2% -17.5% 1076190 ± 9% perf-stat.i.page-faults 224180 -0.0% 224171 perf-stat.i.task-clock 0.46 ± 50% +25.1% 0.58 perf-stat.overall.MPKI 0.21 ± 50% +0.0 0.26 perf-stat.overall.branch-miss-rate% 53.47 ± 50% +13.6 67.06 perf-stat.overall.cache-miss-rate% 0.37 ± 50% +28.6% 0.48 perf-stat.overall.cpi 643.14 ± 50% +28.4% 826.00 perf-stat.overall.cycles-between-cache-misses 0.00 ± 50% +0.0 0.00 ± 10% perf-stat.overall.dTLB-load-miss-rate% 0.01 ± 50% +0.0 0.01 ± 7% perf-stat.overall.dTLB-store-miss-rate% 1.72 ± 50% +21.6% 2.09 perf-stat.overall.ipc 1.19 ± 50% +0.4 1.58 ± 12% perf-stat.overall.node-load-miss-rate% 1.173e+11 ± 50% +17.9% 1.383e+11 ± 7% perf-stat.ps.branch-instructions 3.098e+08 ± 50% +15.8% 3.586e+08 ± 8% perf-stat.ps.branch-misses 5.777e+08 ± 50% +17.9% 6.809e+08 ± 7% perf-stat.ps.cache-misses 8.643e+08 ± 50% +17.4% 1.015e+09 ± 7% perf-stat.ps.cache-references 7130 ± 50% +19.9% 8552 ± 5% perf-stat.ps.context-switches 174005 ± 50% +25.5% 218373 perf-stat.ps.cpu-clock 4.644e+11 ± 50% +21.0% 5.621e+11 ± 7% perf-stat.ps.cpu-cycles 272.51 ± 50% +16.6% 317.85 ± 2% perf-stat.ps.cpu-migrations 9924949 ± 50% +25.8% 12485559 ± 2% perf-stat.ps.dTLB-load-misses 2.237e+11 ± 50% +17.6% 2.631e+11 ± 7% perf-stat.ps.dTLB-loads 10159493 ± 50% +7.8% 10948561 ± 7% perf-stat.ps.dTLB-store-misses 1.374e+11 ± 50% +17.4% 1.613e+11 ± 7% perf-stat.ps.dTLB-stores 9.964e+11 ± 50% +17.7% 1.173e+12 ± 7% perf-stat.ps.instructions 0.16 ± 73% +21.3% 0.20 ± 35% perf-stat.ps.major-faults 1016189 ± 50% +3.9% 1056067 ± 9% perf-stat.ps.minor-faults 1612917 ± 50% +25.8% 2029077 ± 12% perf-stat.ps.node-load-misses 1.067e+08 ± 50% +18.9% 1.269e+08 ± 7% perf-stat.ps.node-loads 1016189 ± 50% +3.9% 1056067 ± 9% perf-stat.ps.page-faults 174005 ± 50% +25.5% 218373 perf-stat.ps.task-clock 6.343e+13 ± 50% +23.6% 7.843e+13 perf-stat.total.instructions 7114718 -1.2% 7027229 sched_debug.cfs_rq:/.avg_vruntime.avg 7180297 -1.0% 7105319 sched_debug.cfs_rq:/.avg_vruntime.max 4657471 -3.9% 4477025 ± 2% sched_debug.cfs_rq:/.avg_vruntime.min 174545 ± 2% +3.8% 181256 ± 3% sched_debug.cfs_rq:/.avg_vruntime.stddev 0.55 +2.0% 0.56 sched_debug.cfs_rq:/.h_nr_running.avg 2.10 ± 20% +9.5% 2.30 ± 17% sched_debug.cfs_rq:/.h_nr_running.max 0.50 +0.0% 0.50 sched_debug.cfs_rq:/.h_nr_running.min 0.22 ± 8% +12.7% 0.25 ± 10% sched_debug.cfs_rq:/.h_nr_running.stddev 43106 ± 33% +312.8% 177940 ± 52% sched_debug.cfs_rq:/.left_vruntime.avg 6406425 ± 17% +10.2% 7059947 sched_debug.cfs_rq:/.left_vruntime.max 0.00 +0.0% 0.00 sched_debug.cfs_rq:/.left_vruntime.min 490512 ± 23% +111.4% 1037149 ± 28% sched_debug.cfs_rq:/.left_vruntime.stddev 9516 ± 26% +105.3% 19537 ± 37% sched_debug.cfs_rq:/.load.avg 1009013 ± 48% +15.6% 1166009 ± 43% sched_debug.cfs_rq:/.load.max 2280 -0.4% 2271 sched_debug.cfs_rq:/.load.min 80417 ± 39% +48.9% 119761 ± 28% sched_debug.cfs_rq:/.load.stddev 615.03 ± 25% +70.0% 1045 ± 46% sched_debug.cfs_rq:/.load_avg.avg 43884 ± 3% +0.4% 44060 ± 3% sched_debug.cfs_rq:/.load_avg.max 2.15 ± 10% +0.0% 2.15 ± 14% sched_debug.cfs_rq:/.load_avg.min 4551 ± 15% +29.7% 5903 ± 27% sched_debug.cfs_rq:/.load_avg.stddev 7114719 -1.2% 7027233 sched_debug.cfs_rq:/.min_vruntime.avg 7180297 -1.0% 7105319 sched_debug.cfs_rq:/.min_vruntime.max 4657590 -3.9% 4477122 ± 2% sched_debug.cfs_rq:/.min_vruntime.min 174538 ± 2% +3.8% 181250 ± 3% sched_debug.cfs_rq:/.min_vruntime.stddev 0.54 +1.4% 0.54 sched_debug.cfs_rq:/.nr_running.avg 1.80 ± 22% +8.3% 1.95 ± 17% sched_debug.cfs_rq:/.nr_running.max 0.50 +0.0% 0.50 sched_debug.cfs_rq:/.nr_running.min 0.17 ± 12% +18.9% 0.21 ± 12% sched_debug.cfs_rq:/.nr_running.stddev 92.62 ±144% +105.7% 190.49 ± 96% sched_debug.cfs_rq:/.removed.load_avg.avg 13672 ±147% +96.3% 26833 ± 80% sched_debug.cfs_rq:/.removed.load_avg.max 1060 ±145% +102.2% 2144 ± 84% sched_debug.cfs_rq:/.removed.load_avg.stddev 6.84 ± 48% -1.3% 6.76 ± 45% sched_debug.cfs_rq:/.removed.runnable_avg.avg 265.30 ± 2% -1.6% 261.00 sched_debug.cfs_rq:/.removed.runnable_avg.max 39.85 ± 21% -1.3% 39.33 ± 20% sched_debug.cfs_rq:/.removed.runnable_avg.stddev 6.84 ± 48% -1.3% 6.76 ± 45% sched_debug.cfs_rq:/.removed.util_avg.avg 265.30 ± 2% -1.6% 261.00 sched_debug.cfs_rq:/.removed.util_avg.max 39.85 ± 21% -1.3% 39.33 ± 20% sched_debug.cfs_rq:/.removed.util_avg.stddev 43106 ± 33% +312.8% 177940 ± 52% sched_debug.cfs_rq:/.right_vruntime.avg 6406449 ± 17% +10.2% 7059947 sched_debug.cfs_rq:/.right_vruntime.max 0.00 +0.0% 0.00 sched_debug.cfs_rq:/.right_vruntime.min 490516 ± 23% +111.4% 1037152 ± 28% sched_debug.cfs_rq:/.right_vruntime.stddev 602.56 +1.7% 612.78 sched_debug.cfs_rq:/.runnable_avg.avg 1824 ± 15% +8.0% 1970 ± 13% sched_debug.cfs_rq:/.runnable_avg.max 511.80 -9.8% 461.45 ± 32% sched_debug.cfs_rq:/.runnable_avg.min 197.27 ± 7% +6.7% 210.57 ± 8% sched_debug.cfs_rq:/.runnable_avg.stddev 0.50 ± 99% +10.0% 0.55 ±124% sched_debug.cfs_rq:/.spread.avg 111.63 ± 99% +7.6% 120.07 ±126% sched_debug.cfs_rq:/.spread.max 7.44 ± 99% +7.7% 8.02 ±125% sched_debug.cfs_rq:/.spread.stddev 588.90 +0.3% 590.76 sched_debug.cfs_rq:/.util_avg.avg 1267 ± 16% +1.5% 1286 ± 17% sched_debug.cfs_rq:/.util_avg.max 32.70 ±290% +205.4% 99.85 ±172% sched_debug.cfs_rq:/.util_avg.min 163.16 ± 4% +0.8% 164.51 ± 9% sched_debug.cfs_rq:/.util_avg.stddev 261.48 +0.7% 263.22 sched_debug.cfs_rq:/.util_est_enqueued.avg 924.20 +9.8% 1014 ± 13% sched_debug.cfs_rq:/.util_est_enqueued.max 16.40 ± 90% +74.4% 28.60 ± 42% sched_debug.cfs_rq:/.util_est_enqueued.min 87.88 ± 8% +5.1% 92.40 ± 10% sched_debug.cfs_rq:/.util_est_enqueued.stddev 895647 +2.7% 919389 ± 5% sched_debug.cpu.avg_idle.avg 1584773 ± 25% -5.4% 1499839 ± 21% sched_debug.cpu.avg_idle.max 180026 ± 47% -17.3% 148883 ± 47% sched_debug.cpu.avg_idle.min 139458 ± 12% +17.5% 163799 ± 45% sched_debug.cpu.avg_idle.stddev 91837 ± 13% +0.9% 92634 ± 10% sched_debug.cpu.clock.avg 91883 ± 13% +0.9% 92682 ± 10% sched_debug.cpu.clock.max 91778 ± 13% +0.9% 92573 ± 10% sched_debug.cpu.clock.min 28.81 ± 15% +1.8% 29.34 ± 13% sched_debug.cpu.clock.stddev 91190 ± 13% +0.9% 91995 ± 10% sched_debug.cpu.clock_task.avg 91362 ± 13% +0.9% 92168 ± 10% sched_debug.cpu.clock_task.max 78321 ± 15% +1.2% 79242 ± 12% sched_debug.cpu.clock_task.min 873.05 -1.0% 864.68 sched_debug.cpu.clock_task.stddev 4249 -1.9% 4166 sched_debug.cpu.curr->pid.avg 7337 ± 3% +0.8% 7398 ± 2% sched_debug.cpu.curr->pid.max 1048 ± 97% -90.8% 96.65 ± 46% sched_debug.cpu.curr->pid.min 836.33 ± 10% +35.5% 1132 ± 14% sched_debug.cpu.curr->pid.stddev 502089 +1.2% 508197 ± 3% sched_debug.cpu.max_idle_balance_cost.avg 700999 ± 18% -2.1% 685975 ± 15% sched_debug.cpu.max_idle_balance_cost.max 500000 +0.0% 500000 sched_debug.cpu.max_idle_balance_cost.min 17056 ± 57% +39.7% 23833 ±121% sched_debug.cpu.max_idle_balance_cost.stddev 4294 +0.0% 4294 sched_debug.cpu.next_balance.avg 4294 +0.0% 4294 sched_debug.cpu.next_balance.max 4294 +0.0% 4294 sched_debug.cpu.next_balance.min 0.00 ± 57% -20.1% 0.00 ± 36% sched_debug.cpu.next_balance.stddev 0.55 +1.9% 0.56 ± 2% sched_debug.cpu.nr_running.avg 2.45 ± 19% +2.0% 2.50 ± 20% sched_debug.cpu.nr_running.max 0.50 +0.0% 0.50 sched_debug.cpu.nr_running.min 0.24 ± 10% +8.1% 0.26 ± 11% sched_debug.cpu.nr_running.stddev 2437 -0.6% 2423 sched_debug.cpu.nr_switches.avg 46748 ± 21% +2.2% 47779 ± 11% sched_debug.cpu.nr_switches.max 1195 -3.2% 1157 ± 2% sched_debug.cpu.nr_switches.min 3810 ± 11% +1.6% 3869 ± 6% sched_debug.cpu.nr_switches.stddev 0.01 ± 56% -54.2% 0.00 ±110% sched_debug.cpu.nr_uninterruptible.avg 42.85 ± 38% -0.9% 42.45 ± 38% sched_debug.cpu.nr_uninterruptible.max -17.50 +35.1% -23.65 sched_debug.cpu.nr_uninterruptible.min 5.78 ± 12% +4.1% 6.02 ± 12% sched_debug.cpu.nr_uninterruptible.stddev 91781 ± 13% +0.9% 92577 ± 10% sched_debug.cpu_clk 996147 +0.0% 996147 sched_debug.dl_rq:.dl_bw->bw.avg 996147 +0.0% 996147 sched_debug.dl_rq:.dl_bw->bw.max 996147 +0.0% 996147 sched_debug.dl_rq:.dl_bw->bw.min 4.295e+09 +0.0% 4.295e+09 sched_debug.jiffies 90574 ± 13% +0.9% 91369 ± 10% sched_debug.ktime 0.00 +0.0% 0.00 sched_debug.rt_rq:.rt_nr_migratory.avg 0.50 +0.0% 0.50 sched_debug.rt_rq:.rt_nr_migratory.max 0.03 +0.0% 0.03 sched_debug.rt_rq:.rt_nr_migratory.stddev 0.00 +0.0% 0.00 sched_debug.rt_rq:.rt_nr_running.avg 0.50 +0.0% 0.50 sched_debug.rt_rq:.rt_nr_running.max 0.03 +0.0% 0.03 sched_debug.rt_rq:.rt_nr_running.stddev 950.00 +0.0% 950.00 sched_debug.rt_rq:.rt_runtime.avg 950.00 +0.0% 950.00 sched_debug.rt_rq:.rt_runtime.max 950.00 +0.0% 950.00 sched_debug.rt_rq:.rt_runtime.min 0.00 +2.2e+98% 0.00 ±299% sched_debug.rt_rq:.rt_throttled.avg 0.00 +5e+100% 0.05 ±300% sched_debug.rt_rq:.rt_throttled.max 0.00 +3.3e+99% 0.00 ±299% sched_debug.rt_rq:.rt_throttled.stddev 0.91 ± 64% -7.2% 0.84 ± 91% sched_debug.rt_rq:.rt_time.avg 203.22 ± 64% -7.2% 188.67 ± 91% sched_debug.rt_rq:.rt_time.max 13.55 ± 64% -7.2% 12.58 ± 91% sched_debug.rt_rq:.rt_time.stddev 92678 ± 12% +0.9% 93471 ± 10% sched_debug.sched_clk 1.00 +0.0% 1.00 sched_debug.sched_clock_stable() 3.00 +0.0% 3.00 sched_debug.sysctl_sched.sysctl_sched_base_slice 25056823 +0.0% 25056823 sched_debug.sysctl_sched.sysctl_sched_features 1.00 +0.0% 1.00 sched_debug.sysctl_sched.sysctl_sched_tunable_scaling 0.13 ±200% -0.1 0.00 perf-profile.calltrace.cycles-pp.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt 0.13 ±200% -0.1 0.00 perf-profile.calltrace.cycles-pp.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt 0.12 ±200% -0.1 0.00 perf-profile.calltrace.cycles-pp.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues 0.10 ±200% -0.1 0.05 ±300% perf-profile.calltrace.cycles-pp.j__udyLAllocJBBJP 0.05 ±299% -0.1 0.00 perf-profile.calltrace.cycles-pp.get_mem_cgroup_from_mm.__mem_cgroup_charge.do_anonymous_page.__handle_mm_fault.handle_mm_fault 0.14 ±199% -0.0 0.10 ±200% perf-profile.calltrace.cycles-pp.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt.sysvec_apic_timer_interrupt 0.12 ±200% +0.0 0.16 ±152% perf-profile.calltrace.cycles-pp.page_counter_uncharge.uncharge_batch.__mem_cgroup_uncharge_list.release_pages.tlb_batch_pages_flush 0.13 ±200% +0.1 0.22 ±122% perf-profile.calltrace.cycles-pp.uncharge_batch.__mem_cgroup_uncharge_list.release_pages.tlb_batch_pages_flush.tlb_finish_mmu 0.25 ±200% +0.1 0.40 ± 65% perf-profile.calltrace.cycles-pp.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt 0.00 +0.2 0.19 ±152% perf-profile.calltrace.cycles-pp.rmqueue.get_page_from_freelist.__alloc_pages.pte_alloc_one.__pte_alloc 0.00 +0.2 0.19 ±152% perf-profile.calltrace.cycles-pp.__rmqueue_pcplist.rmqueue.get_page_from_freelist.__alloc_pages.pte_alloc_one 0.00 +0.2 0.19 ±153% perf-profile.calltrace.cycles-pp.get_page_from_freelist.__alloc_pages.pte_alloc_one.__pte_alloc.do_anonymous_page 0.00 +0.2 0.20 ±152% perf-profile.calltrace.cycles-pp.__alloc_pages.pte_alloc_one.__pte_alloc.do_anonymous_page.__handle_mm_fault 0.00 +0.2 0.20 ±152% perf-profile.calltrace.cycles-pp.__pte_alloc.do_anonymous_page.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 0.00 +0.2 0.20 ±152% perf-profile.calltrace.cycles-pp.pte_alloc_one.__pte_alloc.do_anonymous_page.__handle_mm_fault.handle_mm_fault 0.23 ±200% +0.2 0.43 ±122% perf-profile.calltrace.cycles-pp.JudyLDel@plt 0.14 ±200% +0.3 0.40 ± 66% perf-profile.calltrace.cycles-pp.__mem_cgroup_uncharge_list.release_pages.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region 0.12 ±200% +0.3 0.38 ± 65% perf-profile.calltrace.cycles-pp.hrtimer_interrupt.__sysvec_apic_timer_interrupt.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.JudyLGet 0.12 ±200% +0.3 0.38 ± 65% perf-profile.calltrace.cycles-pp.__sysvec_apic_timer_interrupt.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.JudyLGet 0.16 ±200% +0.3 0.42 ± 66% perf-profile.calltrace.cycles-pp.__mem_cgroup_charge.do_anonymous_page.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 0.13 ±200% +0.3 0.43 ± 65% perf-profile.calltrace.cycles-pp.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.JudyLGet 0.14 ±200% +0.3 0.47 ± 65% perf-profile.calltrace.cycles-pp.asm_sysvec_apic_timer_interrupt.JudyLGet 0.17 ±200% +0.4 0.53 ± 65% perf-profile.calltrace.cycles-pp.hrtimer_interrupt.__sysvec_apic_timer_interrupt.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt 0.17 ±200% +0.4 0.54 ± 65% perf-profile.calltrace.cycles-pp.__sysvec_apic_timer_interrupt.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt 0.16 ±200% +0.4 0.53 ± 68% perf-profile.calltrace.cycles-pp.ordered_events__queue.process_simple.reader__read_event.perf_session__process_events.record__finish_output 0.16 ±200% +0.4 0.53 ± 68% perf-profile.calltrace.cycles-pp.queue_event.ordered_events__queue.process_simple.reader__read_event.perf_session__process_events 0.17 ±200% +0.4 0.54 ± 65% perf-profile.calltrace.cycles-pp.cfree 0.16 ±200% +0.4 0.53 ± 68% perf-profile.calltrace.cycles-pp.process_simple.reader__read_event.perf_session__process_events.record__finish_output.__cmd_record 0.16 ±200% +0.4 0.53 ± 68% perf-profile.calltrace.cycles-pp.__cmd_record 0.16 ±200% +0.4 0.53 ± 68% perf-profile.calltrace.cycles-pp.record__finish_output.__cmd_record 0.16 ±200% +0.4 0.53 ± 68% perf-profile.calltrace.cycles-pp.perf_session__process_events.record__finish_output.__cmd_record 0.16 ±200% +0.4 0.53 ± 68% perf-profile.calltrace.cycles-pp.reader__read_event.perf_session__process_events.record__finish_output.__cmd_record 0.20 ±200% +0.4 0.65 ± 65% perf-profile.calltrace.cycles-pp.__split_vma.do_vmi_align_munmap.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.21 ±200% +0.5 0.67 ± 65% perf-profile.calltrace.cycles-pp.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt 0.22 ±200% +0.5 0.72 ± 65% perf-profile.calltrace.cycles-pp.asm_sysvec_apic_timer_interrupt 0.25 ±200% +0.5 0.77 ± 65% perf-profile.calltrace.cycles-pp.j__udyLCascade2 0.24 ±200% +0.5 0.76 ± 65% perf-profile.calltrace.cycles-pp.j__udyLCreateBranchU 0.27 ±200% +0.6 0.87 ± 65% perf-profile.calltrace.cycles-pp.JudyLGet@plt 0.27 ±200% +0.6 0.88 ± 65% perf-profile.calltrace.cycles-pp.JudyLIns@plt 0.00 +0.7 0.68 ± 92% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.free_pcppages_bulk.free_unref_page_commit.free_unref_page.rcu_do_batch 0.34 ±200% +0.8 1.11 ± 65% perf-profile.calltrace.cycles-pp.malloc 0.00 +0.8 0.82 ± 79% perf-profile.calltrace.cycles-pp.free_pcppages_bulk.free_unref_page_commit.free_unref_page.rcu_do_batch.rcu_core 0.00 +1.1 1.13 ± 84% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.free_pcppages_bulk.free_unref_page_commit.free_unref_page.__unfreeze_partials 0.00 +1.2 1.24 ± 83% perf-profile.calltrace.cycles-pp.free_pcppages_bulk.free_unref_page_commit.free_unref_page.__unfreeze_partials.rcu_do_batch 0.00 +1.2 1.24 ± 82% perf-profile.calltrace.cycles-pp.free_unref_page_commit.free_unref_page.__unfreeze_partials.rcu_do_batch.rcu_core 0.73 ±200% +1.6 2.29 ± 65% perf-profile.calltrace.cycles-pp.JudyLDel 0.00 +1.6 1.57 ±101% perf-profile.calltrace.cycles-pp.__rmqueue_pcplist.rmqueue.get_page_from_freelist.__alloc_pages.__folio_alloc 0.70 ±200% +1.6 2.27 ± 65% perf-profile.calltrace.cycles-pp.JudyLIns 0.00 +1.6 1.58 ±100% perf-profile.calltrace.cycles-pp.rmqueue.get_page_from_freelist.__alloc_pages.__folio_alloc.vma_alloc_folio 0.00 +1.6 1.59 ±110% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.rmqueue_bulk.__rmqueue_pcplist.rmqueue.get_page_from_freelist 0.00 +1.6 1.59 ±110% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.rmqueue_bulk.__rmqueue_pcplist.rmqueue 0.00 +1.7 1.71 ±108% perf-profile.calltrace.cycles-pp.rmqueue_bulk.__rmqueue_pcplist.rmqueue.get_page_from_freelist.__alloc_pages 0.12 ±200% +1.7 1.86 ± 92% perf-profile.calltrace.cycles-pp.get_page_from_freelist.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page 0.12 ±200% +1.8 1.88 ± 92% perf-profile.calltrace.cycles-pp.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page.__handle_mm_fault 0.12 ±200% +1.8 1.88 ± 92% perf-profile.calltrace.cycles-pp.__folio_alloc.vma_alloc_folio.do_anonymous_page.__handle_mm_fault.handle_mm_fault 0.13 ±200% +1.8 1.90 ± 91% perf-profile.calltrace.cycles-pp.vma_alloc_folio.do_anonymous_page.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 0.00 +1.8 1.81 ± 87% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.free_pcppages_bulk.free_unref_page_commit.free_unref_page 0.12 ±200% +2.2 2.30 ± 73% perf-profile.calltrace.cycles-pp._raw_spin_lock_irqsave.folio_lruvec_lock_irqsave.release_pages.tlb_batch_pages_flush.tlb_finish_mmu 0.13 ±200% +2.2 2.32 ± 73% perf-profile.calltrace.cycles-pp.folio_lruvec_lock_irqsave.release_pages.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region 0.00 +2.2 2.19 ± 74% perf-profile.calltrace.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.folio_lruvec_lock_irqsave.release_pages.tlb_batch_pages_flush 0.12 ±200% +2.3 2.42 ± 77% perf-profile.calltrace.cycles-pp.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 0.12 ±200% +2.3 2.45 ± 77% perf-profile.calltrace.cycles-pp.kthread.ret_from_fork.ret_from_fork_asm 0.12 ±200% +2.3 2.45 ± 77% perf-profile.calltrace.cycles-pp.ret_from_fork_asm 0.12 ±200% +2.3 2.45 ± 77% perf-profile.calltrace.cycles-pp.ret_from_fork.ret_from_fork_asm 1.05 ±200% +2.4 3.42 ± 65% perf-profile.calltrace.cycles-pp.stress_judy 0.33 ±200% +2.6 2.90 ± 70% perf-profile.calltrace.cycles-pp.release_pages.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region.do_vmi_align_munmap 0.34 ±200% +2.6 2.92 ± 70% perf-profile.calltrace.cycles-pp.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region.do_vmi_align_munmap.__do_sys_brk 0.46 ±200% +2.6 3.10 ± 80% perf-profile.calltrace.cycles-pp.do_anonymous_page.__handle_mm_fault.handle_mm_fault.do_user_addr_fault.exc_page_fault 0.38 ±200% +2.7 3.04 ± 69% perf-profile.calltrace.cycles-pp.tlb_finish_mmu.unmap_region.do_vmi_align_munmap.__do_sys_brk.do_syscall_64 0.48 ±200% +2.7 3.15 ± 79% perf-profile.calltrace.cycles-pp.__handle_mm_fault.handle_mm_fault.do_user_addr_fault.exc_page_fault.asm_exc_page_fault 0.50 ±200% +2.7 3.20 ± 79% perf-profile.calltrace.cycles-pp.handle_mm_fault.do_user_addr_fault.exc_page_fault.asm_exc_page_fault 0.53 ±200% +2.8 3.30 ± 78% perf-profile.calltrace.cycles-pp.do_user_addr_fault.exc_page_fault.asm_exc_page_fault 0.53 ±200% +2.8 3.31 ± 78% perf-profile.calltrace.cycles-pp.exc_page_fault.asm_exc_page_fault 0.57 ±200% +2.9 3.43 ± 77% perf-profile.calltrace.cycles-pp.asm_exc_page_fault 0.49 ±200% +2.9 3.36 ± 68% perf-profile.calltrace.cycles-pp.unmap_region.do_vmi_align_munmap.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.78 ±200% +3.5 4.26 ± 67% perf-profile.calltrace.cycles-pp.do_vmi_align_munmap.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe.brk 0.81 ±200% +3.6 4.36 ± 67% perf-profile.calltrace.cycles-pp.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe.brk 0.82 ±200% +3.6 4.38 ± 67% perf-profile.calltrace.cycles-pp.do_syscall_64.entry_SYSCALL_64_after_hwframe.brk 0.82 ±200% +3.6 4.38 ± 67% perf-profile.calltrace.cycles-pp.entry_SYSCALL_64_after_hwframe.brk 0.83 ±200% +3.6 4.40 ± 67% perf-profile.calltrace.cycles-pp.brk 13.33 ±200% +29.8 43.13 ± 65% perf-profile.calltrace.cycles-pp.JudyLGet 0.02 ±200% -0.0 0.00 perf-profile.children.cycles-pp.rcu_sched_clock_irq 0.01 ±200% -0.0 0.00 perf-profile.children.cycles-pp.rcu_pending 0.01 ±201% -0.0 0.00 perf-profile.children.cycles-pp.update_cfs_group 0.01 ±200% -0.0 0.00 perf-profile.children.cycles-pp.perf_rotate_context 0.01 ±200% -0.0 0.00 perf-profile.children.cycles-pp.check_cpu_stall 0.01 ±200% -0.0 0.00 perf-profile.children.cycles-pp.arch_scale_freq_tick 0.01 ±300% -0.0 0.00 perf-profile.children.cycles-pp.down_write 0.01 ±200% +0.0 0.01 ±200% perf-profile.children.cycles-pp.__get_obj_cgroup_from_memcg 0.01 ±300% +0.0 0.01 ±299% perf-profile.children.cycles-pp.mas_store_b_node 0.01 ±300% +0.0 0.01 ±299% perf-profile.children.cycles-pp.cgroup_rstat_updated 0.01 ±200% +0.0 0.02 ±152% perf-profile.children.cycles-pp.calc_global_load_tick 0.02 ±200% +0.0 0.02 ±122% perf-profile.children.cycles-pp._compound_head 0.00 +0.0 0.01 ±299% perf-profile.children.cycles-pp.perf_mmap__read_head 0.00 +0.0 0.01 ±300% perf-profile.children.cycles-pp.__get_free_pages 0.00 +0.0 0.01 ±300% perf-profile.children.cycles-pp.__tlb_remove_page_size 0.01 ±201% +0.0 0.02 ±122% perf-profile.children.cycles-pp.j__udyLAllocJLL3 0.01 ±200% +0.0 0.02 ±100% perf-profile.children.cycles-pp.rcu_nocb_try_bypass 0.00 +0.0 0.02 ±152% perf-profile.children.cycles-pp.__list_add_valid_or_report 0.02 ±200% +0.0 0.04 ± 65% perf-profile.children.cycles-pp.perf_trace_sched_stat_runtime 0.00 +0.0 0.02 ±153% perf-profile.children.cycles-pp.worker_thread 0.00 +0.0 0.02 ±153% perf-profile.children.cycles-pp.process_one_work 0.01 ±200% +0.0 0.03 ± 82% perf-profile.children.cycles-pp.mas_find 0.01 ±201% +0.0 0.03 ± 81% perf-profile.children.cycles-pp.mtree_range_walk 0.03 ±200% +0.0 0.05 ± 67% perf-profile.children.cycles-pp.propagate_protected_usage 0.01 ±200% +0.0 0.04 ± 65% perf-profile.children.cycles-pp.update_load_avg 0.01 ±201% +0.0 0.04 ± 65% perf-profile.children.cycles-pp.mas_destroy 0.01 ±201% +0.0 0.04 ±100% perf-profile.children.cycles-pp.tick_sched_do_timer 0.01 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.get_obj_cgroup_from_current 0.01 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.mem_cgroup_update_lru_size 0.01 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.__cond_resched 0.01 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.__mod_node_page_state 0.01 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.do_brk_flags 0.01 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.lock_vma_under_rcu 0.02 ±200% +0.0 0.05 ± 66% perf-profile.children.cycles-pp.hrtimer_active 0.02 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.free_unref_page_list 0.01 ±200% +0.0 0.04 ± 66% perf-profile.children.cycles-pp.unlink_anon_vmas 0.02 ±200% +0.0 0.05 ± 65% perf-profile.children.cycles-pp.___perf_sw_event 0.02 ±200% +0.0 0.05 ± 66% perf-profile.children.cycles-pp.__mod_lruvec_state 0.02 ±200% +0.0 0.05 ± 65% perf-profile.children.cycles-pp.try_charge_memcg 0.03 ±200% +0.0 0.07 ± 65% perf-profile.children.cycles-pp.update_curr 0.02 ±200% +0.0 0.05 ± 65% perf-profile.children.cycles-pp.mas_walk 0.01 ±200% +0.0 0.05 ± 65% perf-profile.children.cycles-pp.task_mm_cid_work 0.00 +0.0 0.04 ± 66% perf-profile.children.cycles-pp.note_gp_changes 0.02 ±200% +0.0 0.05 ± 65% perf-profile.children.cycles-pp.free_pgtables 0.02 ±200% +0.0 0.05 ± 65% perf-profile.children.cycles-pp.task_work_run 0.02 ±200% +0.0 0.06 ± 66% perf-profile.children.cycles-pp.__perf_sw_event 0.02 ±200% +0.0 0.06 ± 66% perf-profile.children.cycles-pp.j__udyLCascade3 0.02 ±200% +0.0 0.06 ± 65% perf-profile.children.cycles-pp.j__udyLFreeJLL2 0.02 ±200% +0.0 0.06 ± 65% perf-profile.children.cycles-pp.exit_to_user_mode_loop 0.02 ±200% +0.0 0.07 ± 67% perf-profile.children.cycles-pp.lru_add_fn 0.04 ±200% +0.0 0.08 ± 66% perf-profile.children.cycles-pp.mem_cgroup_commit_charge 0.02 ±200% +0.0 0.07 ± 65% perf-profile.children.cycles-pp.j__udyLAllocJBBJP@plt 0.02 ±200% +0.0 0.06 ± 65% perf-profile.children.cycles-pp.irqentry_exit_to_user_mode 0.04 ±200% +0.0 0.09 ± 66% perf-profile.children.cycles-pp.__count_memcg_events 0.02 ±200% +0.0 0.07 ± 65% perf-profile.children.cycles-pp.j__udyLLeaf2ToLeaf3 0.02 ±200% +0.0 0.07 ± 65% perf-profile.children.cycles-pp.exit_to_user_mode_prepare 0.02 ±200% +0.0 0.07 ± 65% perf-profile.children.cycles-pp.mas_commit_b_node 0.02 ±200% +0.0 0.07 ± 65% perf-profile.children.cycles-pp.__call_rcu_common 0.01 ±200% +0.1 0.06 ± 68% perf-profile.children.cycles-pp.generic_perform_write 0.02 ±200% +0.1 0.07 ± 65% perf-profile.children.cycles-pp.j__udyLFreeJBBJP@plt 0.03 ±200% +0.1 0.08 ± 65% perf-profile.children.cycles-pp.anon_vma_clone 0.05 ±200% +0.1 0.10 ± 66% perf-profile.children.cycles-pp.__mod_memcg_lruvec_state 0.01 ±200% +0.1 0.07 ± 69% perf-profile.children.cycles-pp.shmem_file_write_iter 0.02 ±200% +0.1 0.07 ± 70% perf-profile.children.cycles-pp.vfs_write 0.03 ±200% +0.1 0.09 ± 65% perf-profile.children.cycles-pp.mod_objcg_state 0.03 ±200% +0.1 0.08 ± 65% perf-profile.children.cycles-pp.memcg_slab_post_alloc_hook 0.02 ±200% +0.1 0.07 ± 69% perf-profile.children.cycles-pp.ksys_write 0.00 +0.1 0.06 ± 74% perf-profile.children.cycles-pp.allocate_slab 0.05 ±200% +0.1 0.10 ± 66% perf-profile.children.cycles-pp.page_remove_rmap 0.03 ±200% +0.1 0.09 ± 65% perf-profile.children.cycles-pp.j__udyLAllocJLL2 0.03 ±200% +0.1 0.09 ± 65% perf-profile.children.cycles-pp.vm_area_free_rcu_cb 0.02 ±200% +0.1 0.08 ± 69% perf-profile.children.cycles-pp.__libc_write 0.04 ±200% +0.1 0.10 ± 66% perf-profile.children.cycles-pp.native_flush_tlb_one_user 0.02 ±200% +0.1 0.08 ± 70% perf-profile.children.cycles-pp.record__pushfn 0.02 ±200% +0.1 0.08 ± 68% perf-profile.children.cycles-pp.writen 0.06 ±200% +0.1 0.12 ± 66% perf-profile.children.cycles-pp.folio_add_new_anon_rmap 0.04 ±200% +0.1 0.10 ± 66% perf-profile.children.cycles-pp.flush_tlb_func 0.02 ±200% +0.1 0.08 ± 71% perf-profile.children.cycles-pp.perf_mux_hrtimer_handler 0.02 ±200% +0.1 0.09 ± 68% perf-profile.children.cycles-pp.___slab_alloc 0.03 ±200% +0.1 0.10 ± 67% perf-profile.children.cycles-pp.ktime_get_update_offsets_now 0.04 ±200% +0.1 0.12 ± 66% perf-profile.children.cycles-pp.flush_tlb_mm_range 0.04 ±200% +0.1 0.13 ± 65% perf-profile.children.cycles-pp.sync_regs 0.04 ±200% +0.1 0.13 ± 65% perf-profile.children.cycles-pp.rcu_cblist_dequeue 0.04 ±200% +0.1 0.13 ± 66% perf-profile.children.cycles-pp.mas_wr_node_store 0.03 ±200% +0.1 0.12 ± 66% perf-profile.children.cycles-pp.__kmem_cache_alloc_bulk 0.04 ±200% +0.1 0.13 ± 65% perf-profile.children.cycles-pp.kmem_cache_free 0.04 ±200% +0.1 0.13 ± 65% perf-profile.children.cycles-pp.mas_wr_bnode 0.08 ±200% +0.1 0.18 ± 65% perf-profile.children.cycles-pp.task_tick_fair 0.02 ±200% +0.1 0.12 ± 69% perf-profile.children.cycles-pp.perf_mmap__push 0.03 ±199% +0.1 0.13 ± 65% perf-profile.children.cycles-pp.kmem_cache_alloc_bulk 0.02 ±200% +0.1 0.12 ± 69% perf-profile.children.cycles-pp.record__mmap_read_evlist 0.02 ±200% +0.1 0.13 ± 69% perf-profile.children.cycles-pp.cmd_record 0.10 ±200% +0.1 0.20 ± 66% perf-profile.children.cycles-pp.__mod_lruvec_page_state 0.05 ±200% +0.1 0.16 ± 65% perf-profile.children.cycles-pp.__slab_free 0.05 ±200% +0.1 0.16 ± 65% perf-profile.children.cycles-pp.__pte_offset_map_lock 0.05 ±200% +0.1 0.16 ± 65% perf-profile.children.cycles-pp.vm_area_dup 0.03 ±200% +0.1 0.14 ± 68% perf-profile.children.cycles-pp.__libc_start_main 0.03 ±200% +0.1 0.14 ± 68% perf-profile.children.cycles-pp.main 0.03 ±200% +0.1 0.14 ± 68% perf-profile.children.cycles-pp.run_builtin 0.00 +0.1 0.11 ± 68% perf-profile.children.cycles-pp.__free_one_page 0.05 ±200% +0.1 0.16 ± 65% perf-profile.children.cycles-pp.mas_wr_store_entry 0.05 ±200% +0.1 0.16 ± 65% perf-profile.children.cycles-pp.free@plt 0.05 ±200% +0.1 0.16 ± 65% perf-profile.children.cycles-pp.JudyFree 0.05 ±200% +0.1 0.16 ± 65% perf-profile.children.cycles-pp.malloc@plt 0.05 ±200% +0.1 0.17 ± 65% perf-profile.children.cycles-pp.JudyMalloc 0.08 ±200% +0.1 0.19 ± 67% perf-profile.children.cycles-pp.zap_pte_range 0.06 ±200% +0.1 0.18 ± 65% perf-profile.children.cycles-pp.mas_store_gfp 0.06 ±200% +0.1 0.18 ± 65% perf-profile.children.cycles-pp.mas_store_prealloc 0.08 ±200% +0.1 0.20 ± 66% perf-profile.children.cycles-pp.native_irq_return_iret 0.06 ±200% +0.1 0.18 ± 65% perf-profile.children.cycles-pp.vma_complete 0.08 ±200% +0.1 0.20 ± 67% perf-profile.children.cycles-pp.zap_pmd_range 0.05 ±200% +0.1 0.18 ± 68% perf-profile.children.cycles-pp.folio_add_lru_vma 0.06 ±200% +0.1 0.19 ± 65% perf-profile.children.cycles-pp.__intel_pmu_enable_all 0.05 ±200% +0.1 0.18 ± 68% perf-profile.children.cycles-pp.folio_batch_move_lru 0.08 ±200% +0.1 0.22 ± 67% perf-profile.children.cycles-pp.unmap_page_range 0.05 ±200% +0.1 0.18 ± 65% perf-profile.children.cycles-pp.mas_alloc_nodes 0.05 ±200% +0.1 0.19 ± 65% perf-profile.children.cycles-pp.mas_preallocate 0.09 ±200% +0.1 0.23 ± 67% perf-profile.children.cycles-pp.unmap_vmas 0.05 ±200% +0.1 0.20 ± 65% perf-profile.children.cycles-pp.__irq_exit_rcu 0.07 ±200% +0.2 0.22 ± 65% perf-profile.children.cycles-pp.kmem_cache_alloc 0.00 +0.2 0.16 ± 70% perf-profile.children.cycles-pp.clockevents_program_event 0.05 ±200% +0.2 0.21 ± 66% perf-profile.children.cycles-pp.__list_del_entry_valid_or_report 0.08 ±200% +0.2 0.24 ± 65% perf-profile.children.cycles-pp.JudyFree@plt 0.06 ±200% +0.2 0.23 ± 66% perf-profile.children.cycles-pp.clear_page_erms 0.08 ±200% +0.2 0.25 ± 65% perf-profile.children.cycles-pp.JudyMalloc@plt 0.10 ±200% +0.2 0.29 ± 66% perf-profile.children.cycles-pp.get_mem_cgroup_from_mm 0.10 ±200% +0.2 0.31 ± 65% perf-profile.children.cycles-pp.perf_event_task_tick 0.10 ±200% +0.2 0.31 ± 65% perf-profile.children.cycles-pp.perf_adjust_freq_unthr_context 0.00 +0.2 0.20 ± 68% perf-profile.children.cycles-pp.ktime_get 0.12 ±200% +0.2 0.33 ± 66% perf-profile.children.cycles-pp.page_counter_uncharge 0.13 ±200% +0.2 0.37 ± 66% perf-profile.children.cycles-pp.uncharge_batch 0.14 ±200% +0.3 0.40 ± 66% perf-profile.children.cycles-pp.__mem_cgroup_uncharge_list 0.16 ±200% +0.3 0.43 ± 66% perf-profile.children.cycles-pp.__mem_cgroup_charge 0.00 +0.3 0.28 ± 94% perf-profile.children.cycles-pp.__pte_alloc 0.00 +0.3 0.28 ± 94% perf-profile.children.cycles-pp.pte_alloc_one 0.12 ±200% +0.3 0.40 ± 65% perf-profile.children.cycles-pp.j__udyLFreeJBBJP 0.23 ±200% +0.4 0.58 ± 65% perf-profile.children.cycles-pp.scheduler_tick 0.16 ±200% +0.4 0.53 ± 65% perf-profile.children.cycles-pp.j__udyLAllocJBBJP 0.16 ±200% +0.4 0.53 ± 68% perf-profile.children.cycles-pp.ordered_events__queue 0.16 ±200% +0.4 0.53 ± 68% perf-profile.children.cycles-pp.queue_event 0.16 ±200% +0.4 0.53 ± 68% perf-profile.children.cycles-pp.process_simple 0.16 ±200% +0.4 0.53 ± 68% perf-profile.children.cycles-pp.record__finish_output 0.16 ±200% +0.4 0.53 ± 68% perf-profile.children.cycles-pp.perf_session__process_events 0.16 ±200% +0.4 0.53 ± 68% perf-profile.children.cycles-pp.reader__read_event 0.17 ±200% +0.4 0.55 ± 65% perf-profile.children.cycles-pp.JudyLDel@plt 0.26 ±200% +0.4 0.64 ± 65% perf-profile.children.cycles-pp.update_process_times 0.26 ±200% +0.4 0.64 ± 65% perf-profile.children.cycles-pp.tick_sched_handle 0.27 ±200% +0.4 0.70 ± 65% perf-profile.children.cycles-pp.tick_sched_timer 0.20 ±200% +0.4 0.63 ± 65% perf-profile.children.cycles-pp.cfree 0.20 ±200% +0.4 0.65 ± 65% perf-profile.children.cycles-pp.__split_vma 0.20 ±200% +0.5 0.67 ± 65% perf-profile.children.cycles-pp.JudyLIns@plt 0.18 ±200% +0.5 0.66 ± 67% perf-profile.children.cycles-pp.__cmd_record 0.29 ±200% +0.5 0.80 ± 65% perf-profile.children.cycles-pp.__hrtimer_run_queues 0.25 ±200% +0.5 0.77 ± 65% perf-profile.children.cycles-pp.j__udyLCascade2 0.24 ±199% +0.5 0.77 ± 65% perf-profile.children.cycles-pp.j__udyLCreateBranchU 0.32 ±200% +0.7 1.02 ± 65% perf-profile.children.cycles-pp.JudyLGet@plt 0.33 ±200% +0.7 1.07 ± 65% perf-profile.children.cycles-pp.hrtimer_interrupt 0.33 ±200% +0.7 1.08 ± 65% perf-profile.children.cycles-pp.__sysvec_apic_timer_interrupt 0.38 ±200% +0.9 1.25 ± 65% perf-profile.children.cycles-pp.malloc 0.39 ±200% +0.9 1.28 ± 65% perf-profile.children.cycles-pp.sysvec_apic_timer_interrupt 0.42 ±200% +1.0 1.39 ± 65% perf-profile.children.cycles-pp.asm_sysvec_apic_timer_interrupt 0.00 +1.3 1.27 ± 82% perf-profile.children.cycles-pp.__unfreeze_partials 0.13 ±200% +1.8 1.89 ± 91% perf-profile.children.cycles-pp.__folio_alloc 0.13 ±200% +1.8 1.91 ± 91% perf-profile.children.cycles-pp.vma_alloc_folio 0.00 +1.8 1.84 ±100% perf-profile.children.cycles-pp.rmqueue_bulk 0.05 ±200% +1.9 1.91 ± 98% perf-profile.children.cycles-pp.rmqueue 0.84 ±200% +1.9 2.72 ± 65% perf-profile.children.cycles-pp.JudyLIns 0.00 +1.9 1.89 ± 99% perf-profile.children.cycles-pp.__rmqueue_pcplist 0.92 ±200% +2.0 2.88 ± 65% perf-profile.children.cycles-pp.JudyLDel 0.12 ±200% +2.1 2.18 ± 92% perf-profile.children.cycles-pp.get_page_from_freelist 0.13 ±200% +2.1 2.21 ± 91% perf-profile.children.cycles-pp.__alloc_pages 0.01 ±300% +2.1 2.09 ± 81% perf-profile.children.cycles-pp.free_unref_page 0.01 ±300% +2.1 2.09 ± 81% perf-profile.children.cycles-pp.free_pcppages_bulk 0.00 +2.1 2.10 ± 81% perf-profile.children.cycles-pp.free_unref_page_commit 0.15 ±200% +2.3 2.40 ± 73% perf-profile.children.cycles-pp.folio_lruvec_lock_irqsave 0.12 ±200% +2.3 2.42 ± 77% perf-profile.children.cycles-pp.smpboot_thread_fn 0.12 ±200% +2.3 2.45 ± 77% perf-profile.children.cycles-pp.kthread 0.12 ±200% +2.3 2.45 ± 77% perf-profile.children.cycles-pp.ret_from_fork_asm 0.12 ±200% +2.3 2.45 ± 77% perf-profile.children.cycles-pp.ret_from_fork 0.14 ±199% +2.4 2.50 ± 76% perf-profile.children.cycles-pp.rcu_do_batch 0.16 ±200% +2.4 2.56 ± 75% perf-profile.children.cycles-pp.rcu_core 0.34 ±200% +2.6 2.92 ± 70% perf-profile.children.cycles-pp.tlb_batch_pages_flush 0.34 ±200% +2.6 2.92 ± 70% perf-profile.children.cycles-pp.release_pages 0.46 ±200% +2.6 3.11 ± 80% perf-profile.children.cycles-pp.do_anonymous_page 0.38 ±200% +2.7 3.04 ± 69% perf-profile.children.cycles-pp.tlb_finish_mmu 0.48 ±200% +2.7 3.16 ± 79% perf-profile.children.cycles-pp.__handle_mm_fault 0.50 ±200% +2.7 3.21 ± 79% perf-profile.children.cycles-pp.handle_mm_fault 0.53 ±200% +2.8 3.31 ± 78% perf-profile.children.cycles-pp.do_user_addr_fault 0.54 ±200% +2.8 3.32 ± 78% perf-profile.children.cycles-pp.exc_page_fault 0.58 ±200% +2.9 3.44 ± 77% perf-profile.children.cycles-pp.asm_exc_page_fault 0.49 ±200% +2.9 3.36 ± 68% perf-profile.children.cycles-pp.unmap_region 1.44 ±200% +3.2 4.66 ± 65% perf-profile.children.cycles-pp.stress_judy 0.78 ±200% +3.5 4.27 ± 67% perf-profile.children.cycles-pp.do_vmi_align_munmap 0.81 ±200% +3.6 4.37 ± 67% perf-profile.children.cycles-pp.__do_sys_brk 0.83 ±199% +3.6 4.42 ± 67% perf-profile.children.cycles-pp.brk 0.84 ±200% +3.6 4.48 ± 67% perf-profile.children.cycles-pp.do_syscall_64 0.84 ±200% +3.6 4.49 ± 67% perf-profile.children.cycles-pp.entry_SYSCALL_64_after_hwframe 0.12 ±200% +5.7 5.86 ± 83% perf-profile.children.cycles-pp.native_queued_spin_lock_slowpath 0.16 ±200% +5.8 6.00 ± 83% perf-profile.children.cycles-pp._raw_spin_lock_irqsave 13.47 ±200% +30.1 43.58 ± 65% perf-profile.children.cycles-pp.JudyLGet 0.02 ±200% -0.0 0.01 ±300% perf-profile.self.cycles-pp.perf_trace_sched_stat_runtime 0.01 ±201% -0.0 0.00 perf-profile.self.cycles-pp.update_cfs_group 0.01 ±200% -0.0 0.00 perf-profile.self.cycles-pp.update_curr 0.01 ±200% -0.0 0.00 perf-profile.self.cycles-pp.memcg_slab_post_alloc_hook 0.01 ±200% -0.0 0.00 perf-profile.self.cycles-pp.check_cpu_stall 0.01 ±200% -0.0 0.00 perf-profile.self.cycles-pp.arch_scale_freq_tick 0.01 ±300% -0.0 0.00 perf-profile.self.cycles-pp.cgroup_rstat_updated 0.01 ±200% +0.0 0.01 ±200% perf-profile.self.cycles-pp.__get_obj_cgroup_from_memcg 0.01 ±200% +0.0 0.02 ±152% perf-profile.self.cycles-pp.calc_global_load_tick 0.00 +0.0 0.01 ±299% perf-profile.self.cycles-pp.rcu_nocb_try_bypass 0.00 +0.0 0.01 ±300% perf-profile.self.cycles-pp.zap_pte_range 0.01 ±200% +0.0 0.02 ±152% perf-profile.self.cycles-pp.__handle_mm_fault 0.01 ±200% +0.0 0.02 ±122% perf-profile.self.cycles-pp._compound_head 0.01 ±201% +0.0 0.02 ±122% perf-profile.self.cycles-pp.mtree_range_walk 0.01 ±201% +0.0 0.02 ±123% perf-profile.self.cycles-pp.try_charge_memcg 0.01 ±200% +0.0 0.03 ±100% perf-profile.self.cycles-pp.__mod_node_page_state 0.02 ±200% +0.0 0.05 ± 68% perf-profile.self.cycles-pp.propagate_protected_usage 0.01 ±200% +0.0 0.04 ± 66% perf-profile.self.cycles-pp.mem_cgroup_update_lru_size 0.01 ±300% +0.0 0.03 ±101% perf-profile.self.cycles-pp.tick_sched_do_timer 0.02 ±200% +0.0 0.04 ± 66% perf-profile.self.cycles-pp.hrtimer_active 0.01 ±200% +0.0 0.04 ± 66% perf-profile.self.cycles-pp.___perf_sw_event 0.02 ±200% +0.0 0.05 ± 66% perf-profile.self.cycles-pp.__kmem_cache_alloc_bulk 0.01 ±200% +0.0 0.05 ± 66% perf-profile.self.cycles-pp.task_mm_cid_work 0.01 ±300% +0.0 0.04 ± 66% perf-profile.self.cycles-pp.get_page_from_freelist 0.04 ±201% +0.0 0.08 ± 66% perf-profile.self.cycles-pp.__count_memcg_events 0.02 ±200% +0.0 0.05 ± 65% perf-profile.self.cycles-pp.j__udyLFreeJLL2 0.02 ±200% +0.0 0.06 ± 66% perf-profile.self.cycles-pp.release_pages 0.02 ±200% +0.0 0.06 ± 65% perf-profile.self.cycles-pp.kmem_cache_alloc 0.02 ±200% +0.0 0.06 ± 65% perf-profile.self.cycles-pp.j__udyLCascade3 0.02 ±200% +0.0 0.06 ± 66% perf-profile.self.cycles-pp.j__udyLAllocJLL2 0.05 ±200% +0.0 0.10 ± 66% perf-profile.self.cycles-pp.__mod_memcg_lruvec_state 0.02 ±200% +0.0 0.06 ± 66% perf-profile.self.cycles-pp.mas_wr_node_store 0.02 ±200% +0.0 0.07 ± 65% perf-profile.self.cycles-pp.j__udyLFreeJBBJP@plt 0.02 ±200% +0.0 0.07 ± 65% perf-profile.self.cycles-pp.j__udyLAllocJBBJP@plt 0.02 ±200% +0.0 0.07 ± 65% perf-profile.self.cycles-pp.mod_objcg_state 0.02 ±200% +0.0 0.07 ± 65% perf-profile.self.cycles-pp.j__udyLLeaf2ToLeaf3 0.02 ±200% +0.1 0.08 ± 65% perf-profile.self.cycles-pp.malloc@plt 0.03 ±200% +0.1 0.08 ± 65% perf-profile.self.cycles-pp.free@plt 0.03 ±200% +0.1 0.08 ± 65% perf-profile.self.cycles-pp.JudyFree@plt 0.02 ±200% +0.1 0.08 ± 65% perf-profile.self.cycles-pp.JudyFree 0.03 ±200% +0.1 0.08 ± 65% perf-profile.self.cycles-pp.kmem_cache_free 0.02 ±200% +0.1 0.08 ± 65% perf-profile.self.cycles-pp.JudyMalloc 0.05 ±200% +0.1 0.11 ± 66% perf-profile.self.cycles-pp.__mod_lruvec_page_state 0.03 ±200% +0.1 0.08 ± 65% perf-profile.self.cycles-pp.JudyMalloc@plt 0.04 ±200% +0.1 0.10 ± 66% perf-profile.self.cycles-pp.native_flush_tlb_one_user 0.03 ±200% +0.1 0.10 ± 67% perf-profile.self.cycles-pp.ktime_get_update_offsets_now 0.00 +0.1 0.07 ± 76% perf-profile.self.cycles-pp.rmqueue_bulk 0.05 ±200% +0.1 0.13 ± 65% perf-profile.self.cycles-pp.perf_adjust_freq_unthr_context 0.04 ±200% +0.1 0.13 ± 65% perf-profile.self.cycles-pp.sync_regs 0.04 ±200% +0.1 0.13 ± 65% perf-profile.self.cycles-pp.rcu_cblist_dequeue 0.04 ±200% +0.1 0.14 ± 65% perf-profile.self.cycles-pp.__pte_offset_map_lock 0.00 +0.1 0.09 ± 69% perf-profile.self.cycles-pp.__free_one_page 0.04 ±200% +0.1 0.14 ± 65% perf-profile.self.cycles-pp._raw_spin_lock_irqsave 0.05 ±200% +0.1 0.15 ± 65% perf-profile.self.cycles-pp.__slab_free 0.08 ±200% +0.1 0.20 ± 66% perf-profile.self.cycles-pp.native_irq_return_iret 0.06 ±200% +0.1 0.19 ± 65% perf-profile.self.cycles-pp.JudyLDel@plt 0.06 ±200% +0.1 0.19 ± 65% perf-profile.self.cycles-pp.__intel_pmu_enable_all 0.07 ±200% +0.2 0.23 ± 65% perf-profile.self.cycles-pp.JudyLIns@plt 0.05 ±200% +0.2 0.21 ± 66% perf-profile.self.cycles-pp.__list_del_entry_valid_or_report 0.06 ±200% +0.2 0.23 ± 66% perf-profile.self.cycles-pp.clear_page_erms 0.10 ±200% +0.2 0.28 ± 66% perf-profile.self.cycles-pp.get_mem_cgroup_from_mm 0.09 ±200% +0.2 0.29 ± 66% perf-profile.self.cycles-pp.page_counter_uncharge 0.00 +0.2 0.20 ± 68% perf-profile.self.cycles-pp.ktime_get 0.10 ±200% +0.2 0.33 ± 65% perf-profile.self.cycles-pp.j__udyLFreeJBBJP 0.12 ±200% +0.3 0.39 ± 65% perf-profile.self.cycles-pp.JudyLGet@plt 0.12 ±200% +0.3 0.40 ± 65% perf-profile.self.cycles-pp.j__udyLAllocJBBJP 0.15 ±200% +0.4 0.52 ± 68% perf-profile.self.cycles-pp.queue_event 0.17 ±200% +0.4 0.54 ± 65% perf-profile.self.cycles-pp.cfree 0.25 ±200% +0.5 0.76 ± 65% perf-profile.self.cycles-pp.j__udyLCascade2 0.23 ±200% +0.5 0.75 ± 65% perf-profile.self.cycles-pp.j__udyLCreateBranchU 0.36 ±200% +0.8 1.15 ± 65% perf-profile.self.cycles-pp.malloc 0.78 ±200% +1.7 2.44 ± 65% perf-profile.self.cycles-pp.JudyLDel 0.76 ±200% +1.7 2.48 ± 65% perf-profile.self.cycles-pp.JudyLIns 1.25 ±200% +2.8 4.04 ± 65% perf-profile.self.cycles-pp.stress_judy 0.12 ±200% +5.7 5.86 ± 83% perf-profile.self.cycles-pp.native_queued_spin_lock_slowpath 13.20 ±200% +29.5 42.71 ± 65% perf-profile.self.cycles-pp.JudyLGet 2886 ± 8% +3.7% 2993 ± 12% slabinfo.Acpi-State.active_objs 56.60 ± 8% +3.7% 58.70 ± 12% slabinfo.Acpi-State.active_slabs 2886 ± 8% +3.7% 2993 ± 12% slabinfo.Acpi-State.num_objs 56.60 ± 8% +3.7% 58.70 ± 12% slabinfo.Acpi-State.num_slabs 36.00 +0.0% 36.00 slabinfo.DCCP.active_objs 2.00 +0.0% 2.00 slabinfo.DCCP.active_slabs 36.00 +0.0% 36.00 slabinfo.DCCP.num_objs 2.00 +0.0% 2.00 slabinfo.DCCP.num_slabs 34.00 +0.0% 34.00 slabinfo.DCCPv6.active_objs 2.00 +0.0% 2.00 slabinfo.DCCPv6.active_slabs 34.00 +0.0% 34.00 slabinfo.DCCPv6.num_objs 2.00 +0.0% 2.00 slabinfo.DCCPv6.num_slabs 451.20 ± 2% -0.7% 448.00 slabinfo.RAW.active_objs 14.10 ± 2% -0.7% 14.00 slabinfo.RAW.active_slabs 451.20 ± 2% -0.7% 448.00 slabinfo.RAW.num_objs 14.10 ± 2% -0.7% 14.00 slabinfo.RAW.num_slabs 278.20 ± 4% +0.0% 278.20 ± 4% slabinfo.RAWv6.active_objs 10.70 ± 4% +0.0% 10.70 ± 4% slabinfo.RAWv6.active_slabs 278.20 ± 4% +0.0% 278.20 ± 4% slabinfo.RAWv6.num_objs 10.70 ± 4% +0.0% 10.70 ± 4% slabinfo.RAWv6.num_slabs 55.02 ± 7% +3.5% 56.97 slabinfo.TCP.active_objs 3.93 ± 7% +3.5% 4.07 slabinfo.TCP.active_slabs 55.02 ± 7% +3.5% 56.97 slabinfo.TCP.num_objs 3.93 ± 7% +3.5% 4.07 slabinfo.TCP.num_slabs 37.70 ± 10% +3.4% 39.00 slabinfo.TCPv6.active_objs 2.90 ± 10% +3.4% 3.00 slabinfo.TCPv6.active_slabs 37.70 ± 10% +3.4% 39.00 slabinfo.TCPv6.num_objs 2.90 ± 10% +3.4% 3.00 slabinfo.TCPv6.num_slabs 110.40 ± 10% +6.5% 117.60 ± 6% slabinfo.UDPv6.active_objs 4.60 ± 10% +6.5% 4.90 ± 6% slabinfo.UDPv6.active_slabs 110.40 ± 10% +6.5% 117.60 ± 6% slabinfo.UDPv6.num_objs 4.60 ± 10% +6.5% 4.90 ± 6% slabinfo.UDPv6.num_slabs 2115 ± 8% -1.2% 2089 ± 10% slabinfo.UNIX.active_objs 70.51 ± 8% -1.2% 69.65 ± 10% slabinfo.UNIX.active_slabs 2115 ± 8% -1.2% 2089 ± 10% slabinfo.UNIX.num_objs 70.51 ± 8% -1.2% 69.65 ± 10% slabinfo.UNIX.num_slabs 40729 ± 4% +2.9% 41929 ± 3% slabinfo.anon_vma.active_objs 1047 ± 4% +2.9% 1077 ± 3% slabinfo.anon_vma.active_slabs 40838 ± 4% +2.9% 42028 ± 3% slabinfo.anon_vma.num_objs 1047 ± 4% +2.9% 1077 ± 3% slabinfo.anon_vma.num_slabs 61114 ± 2% +1.7% 62156 ± 2% slabinfo.anon_vma_chain.active_objs 959.75 ± 2% +1.8% 976.69 ± 2% slabinfo.anon_vma_chain.active_slabs 61424 ± 2% +1.8% 62508 ± 2% slabinfo.anon_vma_chain.num_objs 959.75 ± 2% +1.8% 976.69 ± 2% slabinfo.anon_vma_chain.num_slabs 76.00 ± 15% +10.5% 84.00 ± 17% slabinfo.bdev_cache.active_objs 3.80 ± 15% +10.5% 4.20 ± 17% slabinfo.bdev_cache.active_slabs 76.00 ± 15% +10.5% 84.00 ± 17% slabinfo.bdev_cache.num_objs 3.80 ± 15% +10.5% 4.20 ± 17% slabinfo.bdev_cache.num_slabs 454.40 ± 4% +1.4% 460.80 ± 5% slabinfo.bio-120.active_objs 7.10 ± 4% +1.4% 7.20 ± 5% slabinfo.bio-120.active_slabs 454.40 ± 4% +1.4% 460.80 ± 5% slabinfo.bio-120.num_objs 7.10 ± 4% +1.4% 7.20 ± 5% slabinfo.bio-120.num_slabs 537.60 ± 22% +3.1% 554.40 ± 8% slabinfo.bio-184.active_objs 12.80 ± 22% +3.1% 13.20 ± 8% slabinfo.bio-184.active_slabs 537.60 ± 22% +3.1% 554.40 ± 8% slabinfo.bio-184.num_objs 12.80 ± 22% +3.1% 13.20 ± 8% slabinfo.bio-184.num_slabs 128.00 +0.0% 128.00 slabinfo.bio-248.active_objs 2.00 +0.0% 2.00 slabinfo.bio-248.active_slabs 128.00 +0.0% 128.00 slabinfo.bio-248.num_objs 2.00 +0.0% 2.00 slabinfo.bio-248.num_slabs 51.00 +0.0% 51.00 slabinfo.bio-296.active_objs 1.00 +0.0% 1.00 slabinfo.bio-296.active_slabs 51.00 +0.0% 51.00 slabinfo.bio-296.num_objs 1.00 +0.0% 1.00 slabinfo.bio-296.num_slabs 121.80 ± 10% +3.4% 126.00 slabinfo.bio-360.active_objs 2.90 ± 10% +3.4% 3.00 slabinfo.bio-360.active_slabs 121.80 ± 10% +3.4% 126.00 slabinfo.bio-360.num_objs 2.90 ± 10% +3.4% 3.00 slabinfo.bio-360.num_slabs 42.00 +0.0% 42.00 slabinfo.bio-376.active_objs 1.00 +0.0% 1.00 slabinfo.bio-376.active_slabs 42.00 +0.0% 42.00 slabinfo.bio-376.num_objs 1.00 +0.0% 1.00 slabinfo.bio-376.num_slabs 36.00 +0.0% 36.00 slabinfo.bio-432.active_objs 1.00 +0.0% 1.00 slabinfo.bio-432.active_slabs 36.00 +0.0% 36.00 slabinfo.bio-432.num_objs 1.00 +0.0% 1.00 slabinfo.bio-432.num_slabs 170.00 +0.0% 170.00 slabinfo.bio_post_read_ctx.active_objs 2.00 +0.0% 2.00 slabinfo.bio_post_read_ctx.active_slabs 170.00 +0.0% 170.00 slabinfo.bio_post_read_ctx.num_objs 2.00 +0.0% 2.00 slabinfo.bio_post_read_ctx.num_slabs 33.60 ± 14% +9.5% 36.80 ± 19% slabinfo.biovec-128.active_objs 2.10 ± 14% +9.5% 2.30 ± 19% slabinfo.biovec-128.active_slabs 33.60 ± 14% +9.5% 36.80 ± 19% slabinfo.biovec-128.num_objs 2.10 ± 14% +9.5% 2.30 ± 19% slabinfo.biovec-128.num_slabs 300.80 ± 9% +7.4% 323.20 ± 12% slabinfo.biovec-64.active_objs 9.40 ± 9% +7.4% 10.10 ± 12% slabinfo.biovec-64.active_slabs 300.80 ± 9% +7.4% 323.20 ± 12% slabinfo.biovec-64.num_objs 9.40 ± 9% +7.4% 10.10 ± 12% slabinfo.biovec-64.num_slabs 56.00 +0.0% 56.00 slabinfo.biovec-max.active_objs 7.00 +0.0% 7.00 slabinfo.biovec-max.active_slabs 56.00 +0.0% 56.00 slabinfo.biovec-max.num_objs 7.00 +0.0% 7.00 slabinfo.biovec-max.num_slabs 136.00 +0.0% 136.00 slabinfo.btrfs_extent_buffer.active_objs 2.00 +0.0% 2.00 slabinfo.btrfs_extent_buffer.active_slabs 136.00 +0.0% 136.00 slabinfo.btrfs_extent_buffer.num_objs 2.00 +0.0% 2.00 slabinfo.btrfs_extent_buffer.num_slabs 0.00 +7.8e+102% 7.80 ±200% slabinfo.btrfs_free_space.active_objs 0.00 +2e+101% 0.20 ±200% slabinfo.btrfs_free_space.active_slabs 0.00 +7.8e+102% 7.80 ±200% slabinfo.btrfs_free_space.num_objs 0.00 +2e+101% 0.20 ±200% slabinfo.btrfs_free_space.num_slabs 104.40 ± 13% -11.1% 92.80 ± 12% slabinfo.btrfs_inode.active_objs 3.60 ± 13% -11.1% 3.20 ± 12% slabinfo.btrfs_inode.active_slabs 104.40 ± 13% -11.1% 92.80 ± 12% slabinfo.btrfs_inode.num_objs 3.60 ± 13% -11.1% 3.20 ± 12% slabinfo.btrfs_inode.num_slabs 211.85 ± 5% +1.7% 215.47 slabinfo.btrfs_path.active_objs 5.88 ± 5% +1.7% 5.99 slabinfo.btrfs_path.active_slabs 211.85 ± 5% +1.7% 215.47 slabinfo.btrfs_path.num_objs 5.88 ± 5% +1.7% 5.99 slabinfo.btrfs_path.num_slabs 156.00 +15.0% 179.40 ± 10% slabinfo.buffer_head.active_objs 4.00 +15.0% 4.60 ± 10% slabinfo.buffer_head.active_slabs 156.00 +15.0% 179.40 ± 10% slabinfo.buffer_head.num_objs 4.00 +15.0% 4.60 ± 10% slabinfo.buffer_head.num_slabs 14481 +0.1% 14494 slabinfo.cred_jar.active_objs 344.80 +0.1% 345.10 slabinfo.cred_jar.active_slabs 14481 +0.1% 14494 slabinfo.cred_jar.num_objs 344.80 +0.1% 345.10 slabinfo.cred_jar.num_slabs 39.00 +0.0% 39.00 slabinfo.dax_cache.active_objs 1.00 +0.0% 1.00 slabinfo.dax_cache.active_slabs 39.00 +0.0% 39.00 slabinfo.dax_cache.num_objs 1.00 +0.0% 1.00 slabinfo.dax_cache.num_slabs 165719 +0.4% 166422 slabinfo.dentry.active_objs 3967 +0.6% 3990 slabinfo.dentry.active_slabs 166647 +0.6% 167586 slabinfo.dentry.num_objs 3967 +0.6% 3990 slabinfo.dentry.num_slabs 30.00 +0.0% 30.00 slabinfo.dmaengine-unmap-128.active_objs 1.00 +0.0% 1.00 slabinfo.dmaengine-unmap-128.active_slabs 30.00 +0.0% 30.00 slabinfo.dmaengine-unmap-128.num_objs 1.00 +0.0% 1.00 slabinfo.dmaengine-unmap-128.num_slabs 64.00 +0.0% 64.00 slabinfo.dmaengine-unmap-2.active_objs 1.00 +0.0% 1.00 slabinfo.dmaengine-unmap-2.active_slabs 64.00 +0.0% 64.00 slabinfo.dmaengine-unmap-2.num_objs 1.00 +0.0% 1.00 slabinfo.dmaengine-unmap-2.num_slabs 15.00 +0.0% 15.00 slabinfo.dmaengine-unmap-256.active_objs 1.00 +0.0% 1.00 slabinfo.dmaengine-unmap-256.active_slabs 15.00 +0.0% 15.00 slabinfo.dmaengine-unmap-256.num_objs 1.00 +0.0% 1.00 slabinfo.dmaengine-unmap-256.num_slabs 24640 ± 10% -1.5% 24268 ± 6% slabinfo.ep_head.active_objs 96.25 ± 10% -1.5% 94.80 ± 6% slabinfo.ep_head.active_slabs 24640 ± 10% -1.5% 24268 ± 6% slabinfo.ep_head.num_objs 96.25 ± 10% -1.5% 94.80 ± 6% slabinfo.ep_head.num_slabs 706.56 ± 7% +1.7% 718.75 ± 9% slabinfo.file_lock_cache.active_objs 19.10 ± 7% +1.7% 19.43 ± 9% slabinfo.file_lock_cache.active_slabs 706.56 ± 7% +1.7% 718.75 ± 9% slabinfo.file_lock_cache.num_objs 19.10 ± 7% +1.7% 19.43 ± 9% slabinfo.file_lock_cache.num_slabs 9531 +1.5% 9673 ± 2% slabinfo.files_cache.active_objs 207.21 +1.5% 210.29 ± 2% slabinfo.files_cache.active_slabs 9531 +1.5% 9673 ± 2% slabinfo.files_cache.num_objs 207.21 +1.5% 210.29 ± 2% slabinfo.files_cache.num_slabs 40884 +0.5% 41085 slabinfo.filp.active_objs 672.40 +0.2% 674.07 ± 3% slabinfo.filp.active_slabs 43033 +0.2% 43140 ± 3% slabinfo.filp.num_objs 672.40 +0.2% 674.07 ± 3% slabinfo.filp.num_slabs 2316 ± 7% +2.8% 2380 ± 8% slabinfo.fsnotify_mark_connector.active_objs 18.10 ± 7% +2.8% 18.60 ± 8% slabinfo.fsnotify_mark_connector.active_slabs 2316 ± 7% +2.8% 2380 ± 8% slabinfo.fsnotify_mark_connector.num_objs 18.10 ± 7% +2.8% 18.60 ± 8% slabinfo.fsnotify_mark_connector.num_slabs 9300 ± 3% +1.2% 9409 ± 5% slabinfo.ftrace_event_field.active_objs 127.40 ± 3% +1.2% 128.90 ± 5% slabinfo.ftrace_event_field.active_slabs 9300 ± 3% +1.2% 9409 ± 5% slabinfo.ftrace_event_field.num_objs 127.40 ± 3% +1.2% 128.90 ± 5% slabinfo.ftrace_event_field.num_slabs 56.00 +0.0% 56.00 slabinfo.fuse_request.active_objs 1.00 +0.0% 1.00 slabinfo.fuse_request.active_slabs 56.00 +0.0% 56.00 slabinfo.fuse_request.num_objs 1.00 +0.0% 1.00 slabinfo.fuse_request.num_slabs 98.00 +0.0% 98.00 slabinfo.hugetlbfs_inode_cache.active_objs 2.00 +0.0% 2.00 slabinfo.hugetlbfs_inode_cache.active_slabs 98.00 +0.0% 98.00 slabinfo.hugetlbfs_inode_cache.num_objs 2.00 +0.0% 2.00 slabinfo.hugetlbfs_inode_cache.num_slabs 105824 +0.1% 105909 slabinfo.inode_cache.active_objs 2077 +0.1% 2079 slabinfo.inode_cache.active_slabs 105950 +0.1% 106056 slabinfo.inode_cache.num_objs 2077 +0.1% 2079 slabinfo.inode_cache.num_slabs 182.50 ± 20% +16.0% 211.70 ± 10% slabinfo.ip_fib_alias.active_objs 2.50 ± 20% +16.0% 2.90 ± 10% slabinfo.ip_fib_alias.active_slabs 182.50 ± 20% +16.0% 211.70 ± 10% slabinfo.ip_fib_alias.num_objs 2.50 ± 20% +16.0% 2.90 ± 10% slabinfo.ip_fib_alias.num_slabs 212.50 ± 20% +16.0% 246.50 ± 10% slabinfo.ip_fib_trie.active_objs 2.50 ± 20% +16.0% 2.90 ± 10% slabinfo.ip_fib_trie.active_slabs 212.50 ± 20% +16.0% 246.50 ± 10% slabinfo.ip_fib_trie.num_objs 2.50 ± 20% +16.0% 2.90 ± 10% slabinfo.ip_fib_trie.num_slabs 168800 +0.0% 168832 slabinfo.kernfs_node_cache.active_objs 2637 +0.0% 2638 slabinfo.kernfs_node_cache.active_slabs 168800 +0.0% 168832 slabinfo.kernfs_node_cache.num_objs 2637 +0.0% 2638 slabinfo.kernfs_node_cache.num_slabs 21600 -0.1% 21570 slabinfo.khugepaged_mm_slot.active_objs 211.77 -0.1% 211.48 slabinfo.khugepaged_mm_slot.active_slabs 21600 -0.1% 21570 slabinfo.khugepaged_mm_slot.num_objs 211.77 -0.1% 211.48 slabinfo.khugepaged_mm_slot.num_slabs 20061 +0.5% 20170 slabinfo.kmalloc-128.active_objs 320.03 +0.0% 320.16 slabinfo.kmalloc-128.active_slabs 20482 +0.0% 20490 slabinfo.kmalloc-128.num_objs 320.03 +0.0% 320.16 slabinfo.kmalloc-128.num_slabs 83404 -0.0% 83399 slabinfo.kmalloc-16.active_objs 325.80 +0.1% 326.10 slabinfo.kmalloc-16.active_slabs 83404 +0.1% 83481 slabinfo.kmalloc-16.num_objs 325.80 +0.1% 326.10 slabinfo.kmalloc-16.num_slabs 21409 +0.1% 21421 slabinfo.kmalloc-192.active_objs 509.82 +0.1% 510.14 slabinfo.kmalloc-192.active_slabs 21412 +0.1% 21425 slabinfo.kmalloc-192.num_objs 509.82 +0.1% 510.14 slabinfo.kmalloc-192.num_slabs 10382 -0.2% 10360 slabinfo.kmalloc-1k.active_objs 325.71 -0.1% 325.33 slabinfo.kmalloc-1k.active_slabs 10422 -0.1% 10410 slabinfo.kmalloc-1k.num_objs 325.71 -0.1% 325.33 slabinfo.kmalloc-1k.num_slabs 20195 +0.2% 20233 slabinfo.kmalloc-256.active_objs 323.34 ± 2% +0.2% 323.86 slabinfo.kmalloc-256.active_slabs 20693 ± 2% +0.2% 20727 slabinfo.kmalloc-256.num_objs 323.34 ± 2% +0.2% 323.86 slabinfo.kmalloc-256.num_slabs 9041 -0.3% 9015 slabinfo.kmalloc-2k.active_objs 566.68 -0.2% 565.41 slabinfo.kmalloc-2k.active_slabs 9066 -0.2% 9046 slabinfo.kmalloc-2k.num_objs 566.68 -0.2% 565.41 slabinfo.kmalloc-2k.num_slabs 175334 -0.1% 175183 slabinfo.kmalloc-32.active_objs 1370 -0.1% 1368 slabinfo.kmalloc-32.active_slabs 175365 -0.1% 175221 slabinfo.kmalloc-32.num_objs 1370 -0.1% 1368 slabinfo.kmalloc-32.num_slabs 3202 -0.6% 3184 slabinfo.kmalloc-4k.active_objs 400.65 -0.5% 398.60 slabinfo.kmalloc-4k.active_slabs 3205 -0.5% 3188 slabinfo.kmalloc-4k.num_objs 400.65 -0.5% 398.60 slabinfo.kmalloc-4k.num_slabs 33467 +0.5% 33643 slabinfo.kmalloc-512.active_objs 524.91 +0.4% 527.19 slabinfo.kmalloc-512.active_slabs 33594 +0.4% 33740 slabinfo.kmalloc-512.num_objs 524.91 +0.4% 527.19 slabinfo.kmalloc-512.num_slabs 172450 -0.1% 172322 slabinfo.kmalloc-64.active_objs 2694 -0.1% 2692 slabinfo.kmalloc-64.active_slabs 172473 -0.1% 172339 slabinfo.kmalloc-64.num_objs 2694 -0.1% 2692 slabinfo.kmalloc-64.num_slabs 138463 +0.5% 139132 slabinfo.kmalloc-8.active_objs 280.42 ± 2% -0.1% 280.24 slabinfo.kmalloc-8.active_slabs 143573 ± 2% -0.1% 143480 slabinfo.kmalloc-8.num_objs 280.42 ± 2% -0.1% 280.24 slabinfo.kmalloc-8.num_slabs 1429 -0.1% 1427 slabinfo.kmalloc-8k.active_objs 357.53 -0.1% 357.17 slabinfo.kmalloc-8k.active_slabs 1430 -0.1% 1428 slabinfo.kmalloc-8k.num_objs 357.53 -0.1% 357.17 slabinfo.kmalloc-8k.num_slabs 38336 +1.3% 38843 ± 2% slabinfo.kmalloc-96.active_objs 932.92 +1.5% 946.90 ± 2% slabinfo.kmalloc-96.active_slabs 39182 +1.5% 39769 ± 2% slabinfo.kmalloc-96.num_objs 932.92 +1.5% 946.90 ± 2% slabinfo.kmalloc-96.num_slabs 1542 ± 2% -0.4% 1536 ± 5% slabinfo.kmalloc-cg-128.active_objs 24.10 ± 2% -0.4% 24.00 ± 5% slabinfo.kmalloc-cg-128.active_slabs 1542 ± 2% -0.4% 1536 ± 5% slabinfo.kmalloc-cg-128.num_objs 24.10 ± 2% -0.4% 24.00 ± 5% slabinfo.kmalloc-cg-128.num_slabs 4325 ± 2% -1.7% 4252 ± 4% slabinfo.kmalloc-cg-16.active_objs 16.90 ± 2% -1.7% 16.61 ± 4% slabinfo.kmalloc-cg-16.active_slabs 4325 ± 2% -1.7% 4252 ± 4% slabinfo.kmalloc-cg-16.num_objs 16.90 ± 2% -1.7% 16.61 ± 4% slabinfo.kmalloc-cg-16.num_slabs 8798 ± 2% +2.3% 9004 slabinfo.kmalloc-cg-192.active_objs 209.49 ± 2% +2.3% 214.40 slabinfo.kmalloc-cg-192.active_slabs 8798 ± 2% +2.3% 9004 slabinfo.kmalloc-cg-192.num_objs 209.49 ± 2% +2.3% 214.40 slabinfo.kmalloc-cg-192.num_slabs 6641 ± 3% +2.4% 6802 ± 3% slabinfo.kmalloc-cg-1k.active_objs 207.56 ± 3% +2.4% 212.58 ± 3% slabinfo.kmalloc-cg-1k.active_slabs 6641 ± 3% +2.4% 6802 ± 3% slabinfo.kmalloc-cg-1k.num_objs 207.56 ± 3% +2.4% 212.58 ± 3% slabinfo.kmalloc-cg-1k.num_slabs 1472 ± 4% -6.1% 1382 ± 5% slabinfo.kmalloc-cg-256.active_objs 23.00 ± 4% -6.1% 21.60 ± 5% slabinfo.kmalloc-cg-256.active_slabs 1472 ± 4% -6.1% 1382 ± 5% slabinfo.kmalloc-cg-256.num_objs 23.00 ± 4% -6.1% 21.60 ± 5% slabinfo.kmalloc-cg-256.num_slabs 1018 ± 3% +2.2% 1040 ± 4% slabinfo.kmalloc-cg-2k.active_objs 63.66 ± 3% +2.2% 65.06 ± 4% slabinfo.kmalloc-cg-2k.active_slabs 1018 ± 3% +2.2% 1040 ± 4% slabinfo.kmalloc-cg-2k.num_objs 63.66 ± 3% +2.2% 65.06 ± 4% slabinfo.kmalloc-cg-2k.num_slabs 29379 +0.1% 29422 slabinfo.kmalloc-cg-32.active_objs 229.52 +0.1% 229.87 slabinfo.kmalloc-cg-32.active_slabs 29379 +0.1% 29422 slabinfo.kmalloc-cg-32.num_objs 229.52 +0.1% 229.87 slabinfo.kmalloc-cg-32.num_slabs 2697 -0.5% 2683 slabinfo.kmalloc-cg-4k.active_objs 340.10 -0.6% 338.10 slabinfo.kmalloc-cg-4k.active_slabs 2720 -0.6% 2704 slabinfo.kmalloc-cg-4k.num_objs 340.10 -0.6% 338.10 slabinfo.kmalloc-cg-4k.num_slabs 14431 -0.1% 14419 slabinfo.kmalloc-cg-512.active_objs 225.50 -0.1% 225.30 slabinfo.kmalloc-cg-512.active_slabs 14431 -0.1% 14419 slabinfo.kmalloc-cg-512.num_objs 225.50 -0.1% 225.30 slabinfo.kmalloc-cg-512.num_slabs 3134 ± 4% -0.2% 3127 ± 7% slabinfo.kmalloc-cg-64.active_objs 48.97 ± 4% -0.2% 48.87 ± 7% slabinfo.kmalloc-cg-64.active_slabs 3134 ± 4% -0.2% 3127 ± 7% slabinfo.kmalloc-cg-64.num_objs 48.97 ± 4% -0.2% 48.87 ± 7% slabinfo.kmalloc-cg-64.num_slabs 115595 +0.1% 115732 slabinfo.kmalloc-cg-8.active_objs 225.77 +0.1% 226.04 slabinfo.kmalloc-cg-8.active_slabs 115595 +0.1% 115732 slabinfo.kmalloc-cg-8.num_objs 225.77 +0.1% 226.04 slabinfo.kmalloc-cg-8.num_slabs 50.61 ± 3% +0.3% 50.74 ± 3% slabinfo.kmalloc-cg-8k.active_objs 12.65 ± 3% +0.3% 12.68 ± 3% slabinfo.kmalloc-cg-8k.active_slabs 50.61 ± 3% +0.3% 50.74 ± 3% slabinfo.kmalloc-cg-8k.num_objs 12.65 ± 3% +0.3% 12.68 ± 3% slabinfo.kmalloc-cg-8k.num_slabs 1931 ± 4% +2.8% 1986 ± 7% slabinfo.kmalloc-cg-96.active_objs 46.00 ± 4% +2.8% 47.30 ± 7% slabinfo.kmalloc-cg-96.active_slabs 1931 ± 4% +2.8% 1986 ± 7% slabinfo.kmalloc-cg-96.num_objs 46.00 ± 4% +2.8% 47.30 ± 7% slabinfo.kmalloc-cg-96.num_slabs 300.80 ± 13% +2.1% 307.20 ± 8% slabinfo.kmalloc-rcl-128.active_objs 4.70 ± 13% +2.1% 4.80 ± 8% slabinfo.kmalloc-rcl-128.active_slabs 300.80 ± 13% +2.1% 307.20 ± 8% slabinfo.kmalloc-rcl-128.num_objs 4.70 ± 13% +2.1% 4.80 ± 8% slabinfo.kmalloc-rcl-128.num_slabs 42.00 +0.0% 42.00 slabinfo.kmalloc-rcl-192.active_objs 1.00 +0.0% 1.00 slabinfo.kmalloc-rcl-192.active_slabs 42.00 +0.0% 42.00 slabinfo.kmalloc-rcl-192.num_objs 1.00 +0.0% 1.00 slabinfo.kmalloc-rcl-192.num_slabs 8078 ± 3% -0.7% 8020 ± 4% slabinfo.kmalloc-rcl-64.active_objs 126.36 ± 3% -0.7% 125.46 ± 3% slabinfo.kmalloc-rcl-64.active_slabs 8087 ± 3% -0.7% 8029 ± 3% slabinfo.kmalloc-rcl-64.num_objs 126.36 ± 3% -0.7% 125.46 ± 3% slabinfo.kmalloc-rcl-64.num_slabs 1247 ± 7% +0.0% 1247 ± 7% slabinfo.kmalloc-rcl-96.active_objs 29.70 ± 7% +0.0% 29.70 ± 7% slabinfo.kmalloc-rcl-96.active_slabs 1247 ± 7% +0.0% 1247 ± 7% slabinfo.kmalloc-rcl-96.num_objs 29.70 ± 7% +0.0% 29.70 ± 7% slabinfo.kmalloc-rcl-96.num_slabs 1113 ± 6% -9.2% 1011 ± 10% slabinfo.kmem_cache.active_objs 17.40 ± 6% -9.2% 15.80 ± 10% slabinfo.kmem_cache.active_slabs 1113 ± 6% -9.2% 1011 ± 10% slabinfo.kmem_cache.num_objs 17.40 ± 6% -9.2% 15.80 ± 10% slabinfo.kmem_cache.num_slabs 1297 ± 5% -7.4% 1201 ± 8% slabinfo.kmem_cache_node.active_objs 20.30 ± 5% -7.4% 18.80 ± 8% slabinfo.kmem_cache_node.active_slabs 1299 ± 5% -7.4% 1203 ± 8% slabinfo.kmem_cache_node.num_objs 20.30 ± 5% -7.4% 18.80 ± 8% slabinfo.kmem_cache_node.num_slabs 44655 -0.2% 44587 slabinfo.lsm_file_cache.active_objs 263.38 -0.1% 263.16 slabinfo.lsm_file_cache.active_slabs 44775 -0.1% 44737 slabinfo.lsm_file_cache.num_objs 263.38 -0.1% 263.16 slabinfo.lsm_file_cache.num_slabs 48621 ± 2% +7.3% 52173 ± 4% slabinfo.maple_node.active_objs 772.36 ± 2% +8.3% 836.19 ± 3% slabinfo.maple_node.active_slabs 49431 ± 2% +8.3% 53516 ± 3% slabinfo.maple_node.num_objs 772.36 ± 2% +8.3% 836.19 ± 3% slabinfo.maple_node.num_slabs 5734 +1.2% 5801 slabinfo.mm_struct.active_objs 238.95 +1.2% 241.73 slabinfo.mm_struct.active_slabs 5734 +1.2% 5801 slabinfo.mm_struct.num_objs 238.95 +1.2% 241.73 slabinfo.mm_struct.num_slabs 1116 ± 6% -0.5% 1111 ± 5% slabinfo.mnt_cache.active_objs 21.90 ± 6% -0.5% 21.80 ± 5% slabinfo.mnt_cache.active_slabs 1116 ± 6% -0.5% 1111 ± 5% slabinfo.mnt_cache.num_objs 21.90 ± 6% -0.5% 21.80 ± 5% slabinfo.mnt_cache.num_slabs 34.00 +0.0% 34.00 slabinfo.mqueue_inode_cache.active_objs 1.00 +0.0% 1.00 slabinfo.mqueue_inode_cache.active_slabs 34.00 +0.0% 34.00 slabinfo.mqueue_inode_cache.num_objs 1.00 +0.0% 1.00 slabinfo.mqueue_inode_cache.num_slabs 1792 +0.2% 1795 slabinfo.names_cache.active_objs 224.00 +0.2% 224.40 slabinfo.names_cache.active_slabs 1792 +0.2% 1795 slabinfo.names_cache.num_objs 224.00 +0.2% 224.40 slabinfo.names_cache.num_slabs 7.00 +0.0% 7.00 slabinfo.net_namespace.active_objs 1.00 +0.0% 1.00 slabinfo.net_namespace.active_slabs 7.00 +0.0% 7.00 slabinfo.net_namespace.num_objs 1.00 +0.0% 1.00 slabinfo.net_namespace.num_slabs 46.00 +0.0% 46.00 slabinfo.nfs_commit_data.active_objs 1.00 +0.0% 1.00 slabinfo.nfs_commit_data.active_slabs 46.00 +0.0% 46.00 slabinfo.nfs_commit_data.num_objs 1.00 +0.0% 1.00 slabinfo.nfs_commit_data.num_slabs 36.00 +0.0% 36.00 slabinfo.nfs_read_data.active_objs 1.00 +0.0% 1.00 slabinfo.nfs_read_data.active_slabs 36.00 +0.0% 36.00 slabinfo.nfs_read_data.num_objs 1.00 +0.0% 1.00 slabinfo.nfs_read_data.num_slabs 292.96 ± 8% -0.9% 290.20 ± 8% slabinfo.nsproxy.active_objs 5.23 ± 8% -0.9% 5.18 ± 8% slabinfo.nsproxy.active_slabs 292.96 ± 8% -0.9% 290.20 ± 8% slabinfo.nsproxy.num_objs 5.23 ± 8% -0.9% 5.18 ± 8% slabinfo.nsproxy.num_slabs 240.00 +0.0% 240.00 slabinfo.numa_policy.active_objs 4.00 +0.0% 4.00 slabinfo.numa_policy.active_slabs 240.00 +0.0% 240.00 slabinfo.numa_policy.num_objs 4.00 +0.0% 4.00 slabinfo.numa_policy.num_slabs 10075 -0.5% 10024 slabinfo.perf_event.active_objs 404.18 -0.4% 402.52 slabinfo.perf_event.active_slabs 10104 -0.4% 10062 slabinfo.perf_event.num_objs 404.18 -0.4% 402.52 slabinfo.perf_event.num_slabs 16201 -0.7% 16089 slabinfo.pid.active_objs 253.20 -0.7% 251.40 slabinfo.pid.active_slabs 16204 -0.7% 16089 slabinfo.pid.num_objs 253.20 -0.7% 251.40 slabinfo.pid.num_slabs 13300 -1.0% 13163 slabinfo.pool_workqueue.active_objs 208.12 -1.1% 205.93 slabinfo.pool_workqueue.active_slabs 13319 -1.1% 13179 slabinfo.pool_workqueue.num_objs 208.12 -1.1% 205.93 slabinfo.pool_workqueue.num_slabs 4792 -1.1% 4741 slabinfo.proc_dir_entry.active_objs 114.10 -1.1% 112.90 slabinfo.proc_dir_entry.active_slabs 4792 -1.1% 4741 slabinfo.proc_dir_entry.num_objs 114.10 -1.1% 112.90 slabinfo.proc_dir_entry.num_slabs 29409 +0.4% 29525 slabinfo.proc_inode_cache.active_objs 639.60 +0.4% 642.26 slabinfo.proc_inode_cache.active_slabs 29421 +0.4% 29544 slabinfo.proc_inode_cache.num_objs 639.60 +0.4% 642.26 slabinfo.proc_inode_cache.num_slabs 40983 +0.1% 41043 slabinfo.radix_tree_node.active_objs 732.66 +0.2% 733.78 slabinfo.radix_tree_node.active_slabs 41029 +0.2% 41091 slabinfo.radix_tree_node.num_objs 732.66 +0.2% 733.78 slabinfo.radix_tree_node.num_slabs 294.00 ± 18% +8.3% 318.50 ± 18% slabinfo.request_queue.active_objs 10.20 ± 9% +4.9% 10.70 ± 16% slabinfo.request_queue.active_slabs 357.00 ± 9% +4.9% 374.50 ± 16% slabinfo.request_queue.num_objs 10.20 ± 9% +4.9% 10.70 ± 16% slabinfo.request_queue.num_slabs 46.00 +0.0% 46.00 slabinfo.rpc_inode_cache.active_objs 1.00 +0.0% 1.00 slabinfo.rpc_inode_cache.active_slabs 46.00 +0.0% 46.00 slabinfo.rpc_inode_cache.num_objs 1.00 +0.0% 1.00 slabinfo.rpc_inode_cache.num_slabs 4812 +0.3% 4825 slabinfo.scsi_sense_cache.active_objs 76.20 +0.3% 76.40 slabinfo.scsi_sense_cache.active_slabs 4876 +0.3% 4889 slabinfo.scsi_sense_cache.num_objs 76.20 +0.3% 76.40 slabinfo.scsi_sense_cache.num_slabs 15979 -0.1% 15967 slabinfo.seq_file.active_objs 234.99 -0.1% 234.81 slabinfo.seq_file.active_slabs 15979 -0.1% 15967 slabinfo.seq_file.num_objs 234.99 -0.1% 234.81 slabinfo.seq_file.num_slabs 101821 +0.2% 101974 slabinfo.shared_policy_node.active_objs 1197 +0.2% 1199 slabinfo.shared_policy_node.active_slabs 101821 +0.2% 101974 slabinfo.shared_policy_node.num_objs 1197 +0.2% 1199 slabinfo.shared_policy_node.num_slabs 8779 +0.4% 8816 ± 3% slabinfo.shmem_inode_cache.active_objs 204.19 +0.4% 205.04 ± 3% slabinfo.shmem_inode_cache.active_slabs 8779 +0.4% 8816 ± 3% slabinfo.shmem_inode_cache.num_objs 204.19 +0.4% 205.04 ± 3% slabinfo.shmem_inode_cache.num_slabs 5473 +0.8% 5519 slabinfo.sighand_cache.active_objs 365.02 +0.9% 368.23 slabinfo.sighand_cache.active_slabs 5475 +0.9% 5523 slabinfo.sighand_cache.num_objs 365.02 +0.9% 368.23 slabinfo.sighand_cache.num_slabs 8990 +0.1% 8995 ± 2% slabinfo.signal_cache.active_objs 321.28 +0.1% 321.61 ± 2% slabinfo.signal_cache.active_slabs 8995 +0.1% 9005 ± 2% slabinfo.signal_cache.num_objs 321.28 +0.1% 321.61 ± 2% slabinfo.signal_cache.num_slabs 11923 +0.2% 11946 slabinfo.sigqueue.active_objs 233.79 +0.2% 234.24 slabinfo.sigqueue.active_slabs 11923 +0.2% 11946 slabinfo.sigqueue.num_objs 233.79 +0.2% 234.24 slabinfo.sigqueue.num_slabs 510.09 ± 8% +3.2% 526.37 ± 6% slabinfo.skbuff_ext_cache.active_objs 12.15 ± 8% +3.2% 12.53 ± 6% slabinfo.skbuff_ext_cache.active_slabs 510.09 ± 8% +3.2% 526.37 ± 6% slabinfo.skbuff_ext_cache.num_objs 12.15 ± 8% +3.2% 12.53 ± 6% slabinfo.skbuff_ext_cache.num_slabs 15648 -0.4% 15590 slabinfo.skbuff_head_cache.active_objs 245.30 -0.4% 244.20 slabinfo.skbuff_head_cache.active_slabs 15699 -0.4% 15628 slabinfo.skbuff_head_cache.num_objs 245.30 -0.4% 244.20 slabinfo.skbuff_head_cache.num_slabs 8653 ± 5% -1.9% 8491 ± 5% slabinfo.skbuff_small_head.active_objs 169.68 ± 5% -1.9% 166.50 ± 5% slabinfo.skbuff_small_head.active_slabs 8653 ± 5% -1.9% 8491 ± 5% slabinfo.skbuff_small_head.num_objs 169.68 ± 5% -1.9% 166.50 ± 5% slabinfo.skbuff_small_head.num_slabs 4352 ± 5% -2.8% 4228 ± 5% slabinfo.sock_inode_cache.active_objs 111.59 ± 5% -2.8% 108.42 ± 5% slabinfo.sock_inode_cache.active_slabs 4352 ± 5% -2.8% 4228 ± 5% slabinfo.sock_inode_cache.num_objs 111.59 ± 5% -2.8% 108.42 ± 5% slabinfo.sock_inode_cache.num_slabs 1634 ± 6% +4.9% 1714 ± 5% slabinfo.task_group.active_objs 32.05 ± 6% +4.9% 33.62 ± 5% slabinfo.task_group.active_slabs 1634 ± 6% +4.9% 1714 ± 5% slabinfo.task_group.num_objs 32.05 ± 6% +4.9% 33.62 ± 5% slabinfo.task_group.num_slabs 3787 -0.3% 3777 slabinfo.task_struct.active_objs 3790 -0.3% 3779 slabinfo.task_struct.active_slabs 3790 -0.3% 3779 slabinfo.task_struct.num_objs 3790 -0.3% 3779 slabinfo.task_struct.num_slabs 262.17 ± 8% -1.9% 257.14 ± 9% slabinfo.taskstats.active_objs 7.09 ± 8% -1.9% 6.95 ± 9% slabinfo.taskstats.active_slabs 262.17 ± 8% -1.9% 257.14 ± 9% slabinfo.taskstats.num_objs 7.09 ± 8% -1.9% 6.95 ± 9% slabinfo.taskstats.num_slabs 2350 -0.4% 2341 slabinfo.trace_event_file.active_objs 51.10 -0.4% 50.90 slabinfo.trace_event_file.active_slabs 2350 -0.4% 2341 slabinfo.trace_event_file.num_objs 51.10 -0.4% 50.90 slabinfo.trace_event_file.num_slabs 2394 +0.3% 2400 slabinfo.tracefs_inode_cache.active_objs 47.89 +0.3% 48.02 slabinfo.tracefs_inode_cache.active_slabs 2394 +0.3% 2400 slabinfo.tracefs_inode_cache.num_objs 47.89 +0.3% 48.02 slabinfo.tracefs_inode_cache.num_slabs 60.00 +0.0% 60.00 slabinfo.tw_sock_TCP.active_objs 1.00 +0.0% 1.00 slabinfo.tw_sock_TCP.active_slabs 60.00 +0.0% 60.00 slabinfo.tw_sock_TCP.num_objs 1.00 +0.0% 1.00 slabinfo.tw_sock_TCP.num_slabs 74.00 +0.0% 74.00 slabinfo.uts_namespace.active_objs 2.00 +0.0% 2.00 slabinfo.uts_namespace.active_slabs 74.00 +0.0% 74.00 slabinfo.uts_namespace.num_objs 2.00 +0.0% 2.00 slabinfo.uts_namespace.num_slabs 85556 -2.1% 83786 ± 4% slabinfo.vm_area_struct.active_objs 1952 -1.8% 1916 ± 4% slabinfo.vm_area_struct.active_slabs 85920 -1.8% 84334 ± 4% slabinfo.vm_area_struct.num_objs 1952 -1.8% 1916 ± 4% slabinfo.vm_area_struct.num_slabs 98458 -0.5% 97931 ± 2% slabinfo.vma_lock.active_objs 969.74 -0.4% 965.79 ± 2% slabinfo.vma_lock.active_slabs 98913 -0.4% 98510 ± 2% slabinfo.vma_lock.num_objs 969.74 -0.4% 965.79 ± 2% slabinfo.vma_lock.num_slabs 811570 +0.0% 811625 slabinfo.vmap_area.active_objs 14497 +0.0% 14499 slabinfo.vmap_area.active_slabs 811855 +0.0% 811960 slabinfo.vmap_area.num_objs 14497 +0.0% 14499 slabinfo.vmap_area.num_slabs 276.34 +0.1% 276.48 slabinfo.x86_lbr.active_objs 13.95 +0.0% 13.96 slabinfo.x86_lbr.active_slabs 293.03 +0.0% 293.07 slabinfo.x86_lbr.num_objs 13.95 +0.0% 13.96 slabinfo.x86_lbr.num_slabs 0.00 +1.4e+100% 0.01 ±146% perf-sched.sch_delay.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page 0.00 +4.1e+101% 0.41 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.__alloc_pages.__pud_alloc.__handle_mm_fault.handle_mm_fault 0.00 +7e+98% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.move_page_tables 0.01 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.__flush_work.isra.0.__lru_add_drain_all 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.acpi_ex_allocate_name_string.acpi_ex_get_name_string 0.00 +7e+98% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.load_elf_phdrs.load_elf_binary 0.00 +8e+99% 0.01 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_node_trace.alloc_fair_sched_group.sched_create_group 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.00 +1.4e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 0.00 +1.1e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 0.00 +1.4e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.__vmalloc_area_node.__vmalloc_node_range.alloc_thread_stack_node.dup_task_struct 0.00 ± 96% +50.0% 0.00 ± 80% perf-sched.sch_delay.avg.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 0.18 ±292% -97.9% 0.00 ±156% perf-sched.sch_delay.avg.ms.__cond_resched.__wait_for_common.wait_for_completion_state.kernel_clone.__x64_sys_vfork 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.cgroup_css_set_fork.cgroup_can_fork.copy_process.kernel_clone 0.00 +7.8e+100% 0.08 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.mprotect_fixup 0.00 +9e+98% 0.00 ±299% perf-sched.sch_delay.avg.ms.__cond_resched.copy_strings.isra.0.do_execveat_common 0.00 +3.7e+101% 0.37 ±299% perf-sched.sch_delay.avg.ms.__cond_resched.dentry_kill.dput.step_into.link_path_walk 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.down_read.walk_component.path_lookupat.filename_lookup 0.00 +1.1e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.vma_modify 0.05 ±298% +730.6% 0.39 ±106% perf-sched.sch_delay.avg.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 +7e+98% 0.00 ±299% perf-sched.sch_delay.avg.ms.__cond_resched.down_write_killable.setup_arg_pages.load_elf_binary.search_binary_handler 0.00 +5.1e+100% 0.05 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.dput.__fput.__x64_sys_close.do_syscall_64 0.00 +2.5e+101% 0.25 ±298% perf-sched.sch_delay.avg.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 +2.7e+101% 0.27 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.dput.step_into.path_lookupat.filename_lookup 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 +1e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.exit_mmap.__mmput.exec_mmap.begin_new_exec 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.filemap_read.__kernel_read.search_binary_handler.exec_binprm 0.00 +8e+98% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.khugepaged.kthread.ret_from_fork.ret_from_fork_asm 0.00 +1e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 0.00 +2.7e+99% 0.00 ±299% perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.acpi_ut_create_generic_state.acpi_ds_result_push.acpi_ds_exec_end_op 0.00 ±152% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.alloc_pid.copy_process.kernel_clone 0.00 +2.5e+101% 0.25 ±299% perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 0.05 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 +4.2e+100% 0.04 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_link 0.00 ±200% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.alloc_bprm.do_execveat_common 0.00 +1.3e+99% 0.00 ±299% perf-sched.sch_delay.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.dup_mmap.dup_mm 0.00 +1.6e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock.drm_property_free_blob.drm_gem_destroy_shadow_plane_state.drm_atomic_state_default_clear 0.00 +5.4e+101% 0.54 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock_interruptible.devkmsg_read.vfs_read.ksys_read 0.00 +9.1e+101% 0.91 ±130% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 0.00 +7.9e+99% 0.01 ±133% perf-sched.sch_delay.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.dup_mm 0.01 ±299% -65.0% 0.00 ±223% perf-sched.sch_delay.avg.ms.__cond_resched.process_one_work.worker_thread.kthread.ret_from_fork 0.00 +2.1e+99% 0.00 ±213% perf-sched.sch_delay.avg.ms.__cond_resched.refresh_cpu_vm_stats.vmstat_update.process_one_work.worker_thread 0.30 ±299% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.remove_vma.do_vmi_align_munmap.do_vmi_munmap.__vm_munmap 0.00 +8.1e+101% 0.81 ±155% perf-sched.sch_delay.avg.ms.__cond_resched.shmem_get_folio_gfp.shmem_read_folio_gfp.drm_gem_get_pages.drm_gem_shmem_get_pages 0.01 ± 3% -5.8% 0.01 ± 4% perf-sched.sch_delay.avg.ms.__cond_resched.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 1.77 ± 5% +4.7% 1.85 ± 10% perf-sched.sch_delay.avg.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 0.33 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 0.00 +2.2e+99% 0.00 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.task_work_run.get_signal.arch_do_signal_or_restart.exit_to_user_mode_loop 0.00 +5.9e+100% 0.06 ±300% perf-sched.sch_delay.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +3.1e+100% 0.03 ±201% perf-sched.sch_delay.avg.ms.__cond_resched.unmap_vmas.unmap_region.constprop.0 0.31 ±201% +246.6% 1.06 ±228% perf-sched.sch_delay.avg.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 1.67 ±152% +24.2% 2.08 ±122% perf-sched.sch_delay.avg.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 1.06 ±200% +40.8% 1.50 ±153% perf-sched.sch_delay.avg.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_gem_fb_vunmap.drm_atomic_helper_cleanup_planes 0.00 +4.7e+99% 0.00 ±192% perf-sched.sch_delay.avg.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 0.69 ± 83% -69.5% 0.21 ±172% perf-sched.sch_delay.avg.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 0.20 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.cleaner_kthread.kthread.ret_from_fork.ret_from_fork_asm 0.84 ± 69% +1807.1% 15.92 ±108% perf-sched.sch_delay.avg.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 0.42 ±119% -48.3% 0.22 ±121% perf-sched.sch_delay.avg.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 0.14 ± 54% +235.6% 0.46 ± 31% perf-sched.sch_delay.avg.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 0.16 ± 32% +37.1% 0.22 ± 20% perf-sched.sch_delay.avg.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 0.01 ±208% +6.1% 0.01 ±123% perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 0.02 ± 41% +249.5% 0.07 ± 68% perf-sched.sch_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.futex_wait_queue.futex_wait.do_futex.__x64_sys_futex 0.20 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 0.28 ±297% -99.2% 0.00 ±300% perf-sched.sch_delay.avg.ms.kthreadd.ret_from_fork.ret_from_fork_asm 0.04 ± 40% -28.4% 0.03 ± 28% perf-sched.sch_delay.avg.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 0.40 ± 23% -27.9% 0.29 ± 40% perf-sched.sch_delay.avg.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 2.21 ± 49% +9.3% 2.42 ± 42% perf-sched.sch_delay.avg.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 0.25 ± 62% +79.2% 0.46 ±168% perf-sched.sch_delay.avg.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.00 ±202% -28.6% 0.00 ±300% perf-sched.sch_delay.avg.ms.schedule_preempt_disabled.kthread.ret_from_fork.ret_from_fork_asm 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read_killable.__access_remote_vm 2.67 ±203% -30.4% 1.86 ±300% perf-sched.sch_delay.avg.ms.schedule_timeout.__wait_for_common.wait_for_completion_killable.__kthread_create_on_node 0.45 ± 23% +14.3% 0.52 ± 16% perf-sched.sch_delay.avg.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 0.02 ± 25% -13.2% 0.02 ± 23% perf-sched.sch_delay.avg.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 0.02 ± 45% +11.2% 0.03 ± 39% perf-sched.sch_delay.avg.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 0.00 ±299% -100.0% 0.00 perf-sched.sch_delay.avg.ms.schedule_timeout.transaction_kthread.kthread.ret_from_fork 0.01 +77.7% 0.02 ± 32% perf-sched.sch_delay.avg.ms.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 0.96 ± 77% +1111.2% 11.68 ± 99% perf-sched.sch_delay.avg.ms.syslog_print.do_syslog.kmsg_read.vfs_read 0.12 ±102% +410.5% 0.62 ± 19% perf-sched.sch_delay.avg.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 0.02 ± 10% +57.8% 0.04 ± 48% perf-sched.sch_delay.avg.ms.worker_thread.kthread.ret_from_fork.ret_from_fork_asm 0.00 +6.6e+101% 0.66 ±159% perf-sched.sch_delay.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page 0.00 +4.1e+101% 0.41 ±300% perf-sched.sch_delay.max.ms.__cond_resched.__alloc_pages.__pud_alloc.__handle_mm_fault.handle_mm_fault 0.00 +7e+98% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.move_page_tables 0.01 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.__flush_work.isra.0.__lru_add_drain_all 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.acpi_ex_allocate_name_string.acpi_ex_get_name_string 0.00 +7e+98% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.load_elf_phdrs.load_elf_binary 0.00 +1.6e+100% 0.02 ±300% perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_node_trace.alloc_fair_sched_group.sched_create_group 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.00 +1.4e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 0.00 +1.1e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.__put_anon_vma.unlink_anon_vmas.free_pgtables.exit_mmap 0.00 +1.4e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.__vmalloc_area_node.__vmalloc_node_range.alloc_thread_stack_node.dup_task_struct 1.62 ± 97% +65.5% 2.69 ± 82% perf-sched.sch_delay.max.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 0.18 ±292% -97.9% 0.00 ±156% perf-sched.sch_delay.max.ms.__cond_resched.__wait_for_common.wait_for_completion_state.kernel_clone.__x64_sys_vfork 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.cgroup_css_set_fork.cgroup_can_fork.copy_process.kernel_clone 0.00 +2.3e+101% 0.23 ±300% perf-sched.sch_delay.max.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.mprotect_fixup 0.00 +9e+98% 0.00 ±299% perf-sched.sch_delay.max.ms.__cond_resched.copy_strings.isra.0.do_execveat_common 0.00 +3.7e+101% 0.37 ±299% perf-sched.sch_delay.max.ms.__cond_resched.dentry_kill.dput.step_into.link_path_walk 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.down_read.walk_component.path_lookupat.filename_lookup 0.00 +1.1e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.vma_modify 0.19 ±298% +433.1% 1.00 ± 87% perf-sched.sch_delay.max.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 +7e+98% 0.00 ±299% perf-sched.sch_delay.max.ms.__cond_resched.down_write_killable.setup_arg_pages.load_elf_binary.search_binary_handler 0.00 +3.6e+101% 0.36 ±299% perf-sched.sch_delay.max.ms.__cond_resched.dput.__fput.__x64_sys_close.do_syscall_64 0.00 +2.5e+101% 0.25 ±298% perf-sched.sch_delay.max.ms.__cond_resched.dput.step_into.link_path_walk.part 0.00 +2.7e+101% 0.27 ±300% perf-sched.sch_delay.max.ms.__cond_resched.dput.step_into.path_lookupat.filename_lookup 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 +1e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.exit_mmap.__mmput.exec_mmap.begin_new_exec 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.filemap_read.__kernel_read.search_binary_handler.exec_binprm 0.00 +1.6e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.khugepaged.kthread.ret_from_fork.ret_from_fork_asm 0.00 +1e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 0.00 +2.7e+99% 0.00 ±299% perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.acpi_ut_create_generic_state.acpi_ds_result_push.acpi_ds_exec_end_op 0.00 ±152% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.alloc_pid.copy_process.kernel_clone 0.00 +2.5e+101% 0.25 ±299% perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.getname_flags.part.0 0.26 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 +4.2e+100% 0.04 ±300% perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_link 0.00 ±200% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.alloc_bprm.do_execveat_common 0.00 +1.3e+99% 0.00 ±299% perf-sched.sch_delay.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.dup_mmap.dup_mm 0.00 +1.6e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock.drm_property_free_blob.drm_gem_destroy_shadow_plane_state.drm_atomic_state_default_clear 0.00 +5.4e+101% 0.54 ±300% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock_interruptible.devkmsg_read.vfs_read.ksys_read 0.00 +1.2e+102% 1.24 ±129% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 0.00 +7.9e+99% 0.01 ±133% perf-sched.sch_delay.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.dup_mm 0.01 ±299% -65.0% 0.00 ±223% perf-sched.sch_delay.max.ms.__cond_resched.process_one_work.worker_thread.kthread.ret_from_fork 0.00 +2.1e+99% 0.00 ±213% perf-sched.sch_delay.max.ms.__cond_resched.refresh_cpu_vm_stats.vmstat_update.process_one_work.worker_thread 0.30 ±299% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.remove_vma.do_vmi_align_munmap.do_vmi_munmap.__vm_munmap 0.00 +1.3e+102% 1.30 ±158% perf-sched.sch_delay.max.ms.__cond_resched.shmem_get_folio_gfp.shmem_read_folio_gfp.drm_gem_get_pages.drm_gem_shmem_get_pages 0.02 ± 22% +4.5% 0.02 ± 23% perf-sched.sch_delay.max.ms.__cond_resched.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 4.01 ± 3% -0.8% 3.97 ± 2% perf-sched.sch_delay.max.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 0.33 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 0.00 +2.2e+99% 0.00 ±300% perf-sched.sch_delay.max.ms.__cond_resched.task_work_run.get_signal.arch_do_signal_or_restart.exit_to_user_mode_loop 0.00 +1.2e+101% 0.12 ±300% perf-sched.sch_delay.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.00 +5.7e+101% 0.57 ±200% perf-sched.sch_delay.max.ms.__cond_resched.unmap_vmas.unmap_region.constprop.0 0.44 ±205% +141.1% 1.06 ±228% perf-sched.sch_delay.max.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 1.67 ±152% +24.2% 2.08 ±122% perf-sched.sch_delay.max.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 1.06 ±200% +40.8% 1.50 ±153% perf-sched.sch_delay.max.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_gem_fb_vunmap.drm_atomic_helper_cleanup_planes 0.00 +3.7e+100% 0.04 ±191% perf-sched.sch_delay.max.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 3.10 ± 59% -60.7% 1.22 ±161% perf-sched.sch_delay.max.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 0.20 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.cleaner_kthread.kthread.ret_from_fork.ret_from_fork_asm 1.64 ± 70% +909.5% 16.51 ±101% perf-sched.sch_delay.max.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 2.08 ±108% -37.6% 1.30 ±122% perf-sched.sch_delay.max.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 3.28 ± 34% +29.2% 4.24 ± 13% perf-sched.sch_delay.max.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 3.32 ± 27% -2.9% 3.22 ± 23% perf-sched.sch_delay.max.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 0.53 ±209% +168.8% 1.43 ± 93% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 0.55 ±194% +60.5% 0.88 ±201% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 2.77 ± 30% +150.3% 6.93 ±142% perf-sched.sch_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.futex_wait_queue.futex_wait.do_futex.__x64_sys_futex 0.40 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 0.28 ±297% -99.2% 0.00 ±300% perf-sched.sch_delay.max.ms.kthreadd.ret_from_fork.ret_from_fork_asm 3.24 ± 30% -10.1% 2.91 ± 47% perf-sched.sch_delay.max.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 3.63 ± 18% -14.5% 3.10 ± 33% perf-sched.sch_delay.max.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 3.25 ± 30% +14.0% 3.70 ± 27% perf-sched.sch_delay.max.ms.schedule_hrtimeout_range_clock.do_select.core_sys_select.kern_select 2.83 ± 57% +81.2% 5.12 ±167% perf-sched.sch_delay.max.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.00 ±202% -28.6% 0.00 ±300% perf-sched.sch_delay.max.ms.schedule_preempt_disabled.kthread.ret_from_fork.ret_from_fork_asm 0.00 ±300% -100.0% 0.00 perf-sched.sch_delay.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read_killable.__access_remote_vm 2.67 ±203% -30.4% 1.86 ±300% perf-sched.sch_delay.max.ms.schedule_timeout.__wait_for_common.wait_for_completion_killable.__kthread_create_on_node 4.15 ± 10% +17.7% 4.89 ± 23% perf-sched.sch_delay.max.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 0.08 ±106% -26.8% 0.06 ±115% perf-sched.sch_delay.max.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 8.41 ±161% +26.1% 10.61 ± 97% perf-sched.sch_delay.max.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 0.00 ±299% -100.0% 0.00 perf-sched.sch_delay.max.ms.schedule_timeout.transaction_kthread.kthread.ret_from_fork 4.38 ± 45% +154.3% 11.15 ± 59% perf-sched.sch_delay.max.ms.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 1.90 ± 78% +518.9% 11.77 ± 98% perf-sched.sch_delay.max.ms.syslog_print.do_syslog.kmsg_read.vfs_read 2.30 ± 46% +56.5% 3.60 ± 7% perf-sched.sch_delay.max.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 3.74 ± 39% +32.3% 4.95 ± 55% perf-sched.sch_delay.max.ms.worker_thread.kthread.ret_from_fork.ret_from_fork_asm 0.01 ± 4% +41.1% 0.02 ± 19% perf-sched.total_sch_delay.average.ms 13.07 ± 84% +113.7% 27.94 ± 42% perf-sched.total_sch_delay.max.ms 73.81 +1.5% 74.92 perf-sched.total_wait_and_delay.average.ms 42853 -1.5% 42224 perf-sched.total_wait_and_delay.count.ms 4899 -0.2% 4892 perf-sched.total_wait_and_delay.max.ms 73.80 +1.5% 74.90 perf-sched.total_wait_time.average.ms 4898 -0.2% 4889 perf-sched.total_wait_time.max.ms 40.85 ±299% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.acpi_ex_allocate_name_string.acpi_ex_get_name_string 0.00 +1e+104% 100.01 ±300% perf-sched.wait_and_delay.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 6.73 -5.2% 6.37 ± 17% perf-sched.wait_and_delay.avg.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 0.00 +1.4e+103% 14.35 ±299% perf-sched.wait_and_delay.avg.ms.__cond_resched.dput.__fput.__x64_sys_close.do_syscall_64 0.00 +5e+103% 50.14 ±300% perf-sched.wait_and_delay.avg.ms.__cond_resched.kmem_cache_alloc.acpi_ut_create_generic_state.acpi_ds_result_push.acpi_ds_exec_end_op 0.00 +1e+104% 100.02 ±300% perf-sched.wait_and_delay.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.dup_mmap.dup_mm 0.00 +2.1e+103% 20.74 ±299% perf-sched.wait_and_delay.avg.ms.__cond_resched.mutex_lock.drm_property_free_blob.drm_gem_destroy_shadow_plane_state.drm_atomic_state_default_clear 0.00 +3e+104% 300.08 ±152% perf-sched.wait_and_delay.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.dup_mm 204.67 ±300% +0.1% 204.80 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.process_one_work.worker_thread.kthread.ret_from_fork 0.00 +2e+104% 204.67 ±200% perf-sched.wait_and_delay.avg.ms.__cond_resched.refresh_cpu_vm_stats.vmstat_update.process_one_work.worker_thread 0.00 +5.3e+103% 52.66 ±161% perf-sched.wait_and_delay.avg.ms.__cond_resched.shmem_get_folio_gfp.shmem_read_folio_gfp.drm_gem_get_pages.drm_gem_shmem_get_pages 0.00 +7.8e+102% 7.84 ±299% perf-sched.wait_and_delay.avg.ms.__cond_resched.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 0.00 +1e+104% 100.06 ±300% perf-sched.wait_and_delay.avg.ms.__cond_resched.task_work_run.get_signal.arch_do_signal_or_restart.exit_to_user_mode_loop 0.00 +2.6e+103% 25.66 ±202% perf-sched.wait_and_delay.avg.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 271.66 ± 68% -70.3% 80.71 ±155% perf-sched.wait_and_delay.avg.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 214.27 ± 86% -29.6% 150.78 ±104% perf-sched.wait_and_delay.avg.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 235.93 ± 5% +1.4% 239.24 ± 5% perf-sched.wait_and_delay.avg.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 0.00 +7.1e+101% 0.71 ±175% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 0.24 ± 6% +311.6% 1.00 ± 46% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.02 ±299% +545.1% 0.11 ± 45% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 0.24 ± 6% +338.2% 1.07 ± 53% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 23.21 ± 50% -6.7% 21.65 ± 34% perf-sched.wait_and_delay.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 169.92 ± 16% -30.1% 118.73 ± 28% perf-sched.wait_and_delay.avg.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 552.06 ± 5% +1.1% 558.36 ± 2% perf-sched.wait_and_delay.avg.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 370.29 ± 4% +4.6% 387.38 ± 3% perf-sched.wait_and_delay.avg.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 319.32 ±299% -100.0% 0.00 perf-sched.wait_and_delay.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read_killable.__access_remote_vm 452.79 +0.1% 453.34 perf-sched.wait_and_delay.avg.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 3.65 ± 2% -5.4% 3.45 ± 5% perf-sched.wait_and_delay.avg.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 121.38 +2.6% 124.58 perf-sched.wait_and_delay.avg.ms.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 724.27 ± 2% +1.6% 735.88 perf-sched.wait_and_delay.avg.ms.worker_thread.kthread.ret_from_fork.ret_from_fork_asm 0.10 ±300% -100.0% 0.00 perf-sched.wait_and_delay.count.__cond_resched.__kmem_cache_alloc_node.__kmalloc.acpi_ex_allocate_name_string.acpi_ex_get_name_string 0.00 +1e+101% 0.10 ±300% perf-sched.wait_and_delay.count.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 1344 +0.0% 1344 perf-sched.wait_and_delay.count.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 0.00 +7e+101% 0.70 ±300% perf-sched.wait_and_delay.count.__cond_resched.dput.__fput.__x64_sys_close.do_syscall_64 0.00 +1e+101% 0.10 ±300% perf-sched.wait_and_delay.count.__cond_resched.kmem_cache_alloc.acpi_ut_create_generic_state.acpi_ds_result_push.acpi_ds_exec_end_op 0.00 +1e+101% 0.10 ±300% perf-sched.wait_and_delay.count.__cond_resched.kmem_cache_alloc.vm_area_dup.dup_mmap.dup_mm 0.00 +1e+101% 0.10 ±300% perf-sched.wait_and_delay.count.__cond_resched.mutex_lock.drm_property_free_blob.drm_gem_destroy_shadow_plane_state.drm_atomic_state_default_clear 0.00 +3e+101% 0.30 ±152% perf-sched.wait_and_delay.count.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.dup_mm 0.10 ±300% +100.0% 0.20 ±200% perf-sched.wait_and_delay.count.__cond_resched.process_one_work.worker_thread.kthread.ret_from_fork 0.00 +2e+101% 0.20 ±200% perf-sched.wait_and_delay.count.__cond_resched.refresh_cpu_vm_stats.vmstat_update.process_one_work.worker_thread 0.00 +4e+101% 0.40 ±165% perf-sched.wait_and_delay.count.__cond_resched.shmem_get_folio_gfp.shmem_read_folio_gfp.drm_gem_get_pages.drm_gem_shmem_get_pages 0.00 +1.3e+103% 12.90 ±299% perf-sched.wait_and_delay.count.__cond_resched.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 0.00 +1e+101% 0.10 ±300% perf-sched.wait_and_delay.count.__cond_resched.task_work_run.get_signal.arch_do_signal_or_restart.exit_to_user_mode_loop 0.00 +1.6e+102% 1.60 ±201% perf-sched.wait_and_delay.count.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 8.00 ± 50% -50.0% 4.00 ±122% perf-sched.wait_and_delay.count.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 4.40 ± 66% -18.2% 3.60 ± 81% perf-sched.wait_and_delay.count.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 124.30 -0.2% 124.00 perf-sched.wait_and_delay.count.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 0.00 +1.1e+104% 112.40 ±152% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 18268 -4.4% 17455 ± 3% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 28.50 ±300% +834.4% 266.30 ± 34% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 1595 +1.4% 1617 ± 3% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 284.40 ± 37% +95.5% 556.00 ± 36% perf-sched.wait_and_delay.count.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 439.20 ± 15% +50.7% 662.00 ± 30% perf-sched.wait_and_delay.count.pipe_read.vfs_read.ksys_read.do_syscall_64 21.30 ± 5% -1.4% 21.00 ± 3% perf-sched.wait_and_delay.count.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 13.20 ± 7% -3.8% 12.70 ± 3% perf-sched.wait_and_delay.count.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 0.10 ±300% -100.0% 0.00 perf-sched.wait_and_delay.count.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read_killable.__access_remote_vm 19.70 ± 3% +1.0% 19.90 perf-sched.wait_and_delay.count.schedule_timeout.kcompactd.kthread.ret_from_fork 1365 +6.3% 1451 ± 5% perf-sched.wait_and_delay.count.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 17282 -3.5% 16674 ± 2% perf-sched.wait_and_delay.count.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 1249 ± 2% -1.4% 1231 perf-sched.wait_and_delay.count.worker_thread.kthread.ret_from_fork.ret_from_fork_asm 40.85 ±299% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.acpi_ex_allocate_name_string.acpi_ex_get_name_string 0.00 +1e+104% 100.01 ±300% perf-sched.wait_and_delay.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 4899 -8.0% 4506 ± 25% perf-sched.wait_and_delay.max.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 0.00 +1e+104% 100.35 ±300% perf-sched.wait_and_delay.max.ms.__cond_resched.dput.__fput.__x64_sys_close.do_syscall_64 0.00 +5e+103% 50.14 ±300% perf-sched.wait_and_delay.max.ms.__cond_resched.kmem_cache_alloc.acpi_ut_create_generic_state.acpi_ds_result_push.acpi_ds_exec_end_op 0.00 +1e+104% 100.02 ±300% perf-sched.wait_and_delay.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.dup_mmap.dup_mm 0.00 +2.1e+103% 20.74 ±299% perf-sched.wait_and_delay.max.ms.__cond_resched.mutex_lock.drm_property_free_blob.drm_gem_destroy_shadow_plane_state.drm_atomic_state_default_clear 0.00 +3e+104% 300.08 ±152% perf-sched.wait_and_delay.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.dup_mm 204.67 ±300% +0.1% 204.80 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.process_one_work.worker_thread.kthread.ret_from_fork 0.00 +2e+104% 204.67 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.refresh_cpu_vm_stats.vmstat_update.process_one_work.worker_thread 0.00 +6.3e+103% 62.80 ±152% perf-sched.wait_and_delay.max.ms.__cond_resched.shmem_get_folio_gfp.shmem_read_folio_gfp.drm_gem_get_pages.drm_gem_shmem_get_pages 0.00 +9.3e+102% 9.33 ±300% perf-sched.wait_and_delay.max.ms.__cond_resched.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 0.00 +1e+104% 100.06 ±300% perf-sched.wait_and_delay.max.ms.__cond_resched.task_work_run.get_signal.arch_do_signal_or_restart.exit_to_user_mode_loop 0.00 +2e+104% 200.44 ±200% perf-sched.wait_and_delay.max.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 804.48 ± 50% -50.2% 400.68 ±122% perf-sched.wait_and_delay.max.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 702.01 ± 65% -14.4% 600.63 ± 81% perf-sched.wait_and_delay.max.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 1323 ± 48% +37.2% 1815 ± 54% perf-sched.wait_and_delay.max.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 0.00 +1e+104% 102.23 ±292% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 128.86 ±226% +2.2% 131.68 ±219% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.25 ±299% +1670.4% 4.39 ± 60% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 5.66 ±156% +54.5% 8.74 ± 54% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 901.41 ± 33% +53.9% 1387 ± 83% perf-sched.wait_and_delay.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 1051 +0.8% 1059 perf-sched.wait_and_delay.max.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 1007 -0.1% 1006 perf-sched.wait_and_delay.max.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 499.87 +0.0% 499.88 perf-sched.wait_and_delay.max.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 319.32 ±299% -100.0% 0.00 perf-sched.wait_and_delay.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read_killable.__access_remote_vm 504.61 +0.1% 505.27 perf-sched.wait_and_delay.max.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 20.67 ±122% +10.5% 22.84 ± 81% perf-sched.wait_and_delay.max.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 1053 +10.8% 1167 ± 26% perf-sched.wait_and_delay.max.ms.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 2046 -0.0% 2046 perf-sched.wait_and_delay.max.ms.worker_thread.kthread.ret_from_fork.ret_from_fork_asm 0.20 ± 25% +330.8% 0.87 ± 40% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page 0.30 ±300% +166.9% 0.80 ±228% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.shmem_alloc_folio 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.wp_page_copy 0.00 +1.2e+102% 1.19 ±225% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.__pud_alloc.alloc_new_pud.constprop 0.40 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.__do_fault.do_read_fault 0.00 +4.7e+101% 0.47 ±300% perf-sched.wait_time.avg.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.do_anonymous_page 2.19 ±124% -88.6% 0.25 ±299% perf-sched.wait_time.avg.ms.__cond_resched.__do_fault.do_read_fault.do_fault.__handle_mm_fault 40.85 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.acpi_ex_allocate_name_string.acpi_ex_get_name_string 0.00 +2e+99% 0.00 ±300% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.alloc_cpumask_var_node.__sched_setaffinity 0.00 +3.1e+101% 0.31 ±300% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 0.00 ±300% +308.3% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.00 +3.9e+99% 0.00 ±200% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.single_open.single_open_size 0.00 +1e+104% 100.01 ±299% perf-sched.wait_time.avg.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 6.72 -5.2% 6.37 ± 17% perf-sched.wait_time.avg.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 0.84 ±132% -63.0% 0.31 ±194% perf-sched.wait_time.avg.ms.__cond_resched.__wait_for_common.wait_for_completion_state.kernel_clone.__x64_sys_vfork 0.15 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.cgroup_css_set_fork.cgroup_can_fork.copy_process.kernel_clone 0.00 ±300% +327.3% 0.01 ±299% perf-sched.wait_time.avg.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.mprotect_fixup 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.dentry_kill.dput.step_into.link_path_walk 0.15 ±154% +506.7% 0.92 ±112% perf-sched.wait_time.avg.ms.__cond_resched.down_write.__split_vma.do_vmi_align_munmap.__do_sys_brk 0.20 ± 41% +161.6% 0.53 ± 98% perf-sched.wait_time.avg.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.do_vmi_align_munmap 0.00 +2.2e+99% 0.00 ±300% perf-sched.wait_time.avg.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.vma_modify 0.00 +2.6e+101% 0.26 ±201% perf-sched.wait_time.avg.ms.__cond_resched.down_write.do_brk_flags.__do_sys_brk.do_syscall_64 0.01 ±204% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 ±300% +1245.2% 0.04 ±299% perf-sched.wait_time.avg.ms.__cond_resched.down_write.mmap_region.do_mmap.vm_mmap_pgoff 0.05 ±194% +126.6% 0.12 ±223% perf-sched.wait_time.avg.ms.__cond_resched.down_write.unlink_anon_vmas.free_pgtables.unmap_region 0.00 ±299% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.07 ±125% -33.7% 0.05 ±300% perf-sched.wait_time.avg.ms.__cond_resched.down_write.vma_prepare.__split_vma.do_vmi_align_munmap 0.02 ±300% +723.8% 0.19 ±259% perf-sched.wait_time.avg.ms.__cond_resched.down_write.vma_prepare.do_brk_flags.__do_sys_brk 0.25 ± 76% +104.2% 0.51 ±114% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.32 ±194% -79.5% 0.07 ±248% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 ±299% +12272.7% 0.41 ±300% perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.setup_arg_pages.load_elf_binary.search_binary_handler 0.21 ±299% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_binary 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_interp 0.38 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.ksys_mmap_pgoff.do_syscall_64 0.08 ±198% +17398.4% 14.54 ±294% perf-sched.wait_time.avg.ms.__cond_resched.dput.__fput.__x64_sys_close.do_syscall_64 0.01 ±122% +290.4% 0.04 ±282% perf-sched.wait_time.avg.ms.__cond_resched.dput.step_into.link_path_walk.part 0.58 ±206% -99.7% 0.00 ±299% perf-sched.wait_time.avg.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 ±201% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 0.00 ±299% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.filemap_read.__kernel_read.search_binary_handler.exec_binprm 0.00 ±200% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.filemap_read.vfs_read.ksys_read.do_syscall_64 0.20 ±300% +1341.7% 2.87 ±188% perf-sched.wait_time.avg.ms.__cond_resched.generic_perform_write.shmem_file_write_iter.vfs_write.ksys_write 0.00 ±300% +2110.0% 0.02 ±300% perf-sched.wait_time.avg.ms.__cond_resched.khugepaged.kthread.ret_from_fork.ret_from_fork_asm 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 0.00 +3.9e+101% 0.39 ±300% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.wp_page_copy.__handle_mm_fault 0.00 +5e+103% 50.14 ±300% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.acpi_ut_create_generic_state.acpi_ds_result_push.acpi_ds_exec_end_op 0.42 ±154% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.alloc_pid.copy_process.kernel_clone 0.20 ± 30% +241.4% 0.68 ±123% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.__split_vma 0.21 ±128% +117.3% 0.46 ±137% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.do_brk_flags 0.16 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.mmap_region 0.00 +2.4e+99% 0.00 ±300% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_expand 0.00 ±299% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.security_file_alloc.init_file.alloc_empty_file 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.__install_special_mapping.map_vdso 0.32 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.do_brk_flags.__do_sys_brk 0.10 ±299% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.mmap_region.do_mmap 0.32 ± 65% +94.9% 0.62 ±100% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.do_vmi_align_munmap 0.00 +1.7e+101% 0.17 ±300% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.vma_modify 0.00 +1e+104% 100.01 ±300% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.dup_mmap.dup_mm 0.12 ± 94% -11.6% 0.10 ±152% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 ±300% +24600.0% 0.42 ±250% perf-sched.wait_time.avg.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.do_brk_flags 0.00 +2.1e+103% 20.73 ±300% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.drm_property_free_blob.drm_gem_destroy_shadow_plane_state.drm_atomic_state_default_clear 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.futex_exit_release.exit_mm_release.exit_mm 0.00 ±200% -47.7% 0.00 ±300% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.perf_event_ctx_lock_nested.constprop.0 2.39 ± 81% -18.7% 1.94 ±100% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock.perf_poll.do_poll.constprop 0.00 +7.7e+100% 0.08 ±299% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 0.00 +3e+104% 300.07 ±152% perf-sched.wait_time.avg.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.dup_mm 204.66 ±300% +0.1% 204.80 ±200% perf-sched.wait_time.avg.ms.__cond_resched.process_one_work.worker_thread.kthread.ret_from_fork 0.00 +2e+104% 204.66 ±200% perf-sched.wait_time.avg.ms.__cond_resched.refresh_cpu_vm_stats.vmstat_update.process_one_work.worker_thread 0.20 ±122% +125.5% 0.44 ±244% perf-sched.wait_time.avg.ms.__cond_resched.remove_vma.do_vmi_align_munmap.__do_sys_brk.do_syscall_64 0.00 ±300% +1.3e+06% 57.48 ±143% perf-sched.wait_time.avg.ms.__cond_resched.shmem_get_folio_gfp.shmem_read_folio_gfp.drm_gem_get_pages.drm_gem_shmem_get_pages 0.15 ±300% +362.8% 0.69 ±202% perf-sched.wait_time.avg.ms.__cond_resched.shmem_inode_acct_blocks.shmem_alloc_and_add_folio.shmem_get_folio_gfp.shmem_write_begin 0.00 +2.8e+99% 0.00 ±300% perf-sched.wait_time.avg.ms.__cond_resched.slab_pre_alloc_hook.constprop.0.kmem_cache_alloc_lru 39.66 ± 14% +56.8% 62.18 ± 16% perf-sched.wait_time.avg.ms.__cond_resched.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 0.02 ±106% +448.4% 0.08 ±146% perf-sched.wait_time.avg.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.switch_task_namespaces.do_exit.do_group_exit.__x64_sys_exit_group 0.04 ± 89% +47.0% 0.07 ±174% perf-sched.wait_time.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 0.00 +4.6e+100% 0.05 ±271% perf-sched.wait_time.avg.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 0.00 +1e+104% 100.06 ±300% perf-sched.wait_time.avg.ms.__cond_resched.task_work_run.get_signal.arch_do_signal_or_restart.exit_to_user_mode_loop 0.40 ±295% -99.0% 0.00 ±209% perf-sched.wait_time.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 0.28 ± 15% +483.6% 1.63 ± 48% perf-sched.wait_time.avg.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region.constprop 0.02 ±300% +1253.6% 0.30 ±300% perf-sched.wait_time.avg.ms.__cond_resched.unmap_page_range.unmap_vmas.unmap_region.constprop 0.28 ± 28% +193.3% 0.81 ± 66% perf-sched.wait_time.avg.ms.__cond_resched.unmap_vmas.unmap_region.constprop.0 0.00 ±214% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 5.09 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 0.00 +3.7e+100% 0.04 ±253% perf-sched.wait_time.avg.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_gem_fb_vunmap.drm_atomic_helper_cleanup_planes 0.27 ± 29% +9684.8% 26.29 ±195% perf-sched.wait_time.avg.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 271.34 ± 67% -69.9% 81.76 ±152% perf-sched.wait_time.avg.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 2.37 ± 40% -67.9% 0.76 ±154% perf-sched.wait_time.avg.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 214.20 ± 86% -29.4% 151.17 ±104% perf-sched.wait_time.avg.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 235.79 ± 5% +1.3% 238.78 ± 5% perf-sched.wait_time.avg.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 2.94 ± 4% +30.9% 3.84 ± 85% perf-sched.wait_time.avg.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 0.00 +2.6e+101% 0.26 ±300% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_common_interrupt 0.26 ± 22% +375.6% 1.22 ± 80% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 0.24 ± 6% +311.6% 1.00 ± 46% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 0.13 ± 18% -9.5% 0.11 ± 29% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 0.24 ± 6% +338.2% 1.07 ± 53% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 27.55 ± 36% -21.7% 21.58 ± 34% perf-sched.wait_time.avg.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 0.73 ±300% -100.0% 0.00 perf-sched.wait_time.avg.ms.futex_wait_queue.futex_wait.do_futex.__x64_sys_futex 0.00 ±300% +32366.7% 0.39 ±300% perf-sched.wait_time.avg.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 169.88 ± 16% -30.1% 118.71 ± 28% perf-sched.wait_time.avg.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 551.67 ± 5% +1.2% 558.07 ± 2% perf-sched.wait_time.avg.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 370.03 ± 4% +4.6% 386.92 ± 3% perf-sched.wait_time.avg.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 319.32 ±299% -100.0% 0.00 perf-sched.wait_time.avg.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read_killable.__access_remote_vm 0.01 ±122% +6.5% 0.01 ±123% perf-sched.wait_time.avg.ms.schedule_timeout.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr 2.65 ± 10% +17.7% 3.12 ± 11% perf-sched.wait_time.avg.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 452.77 +0.1% 453.32 perf-sched.wait_time.avg.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 0.18 ±300% +102.3% 0.36 ±299% perf-sched.wait_time.avg.ms.schedule_timeout.khugepaged_wait_work.khugepaged.kthread 3.63 -5.5% 3.43 ± 5% perf-sched.wait_time.avg.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 121.37 +2.6% 124.56 perf-sched.wait_time.avg.ms.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 3.83 ± 19% -71.1% 1.11 ±123% perf-sched.wait_time.avg.ms.syslog_print.do_syslog.kmsg_read.vfs_read 0.44 ± 43% -21.7% 0.34 ± 40% perf-sched.wait_time.avg.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 724.24 ± 2% +1.6% 735.84 perf-sched.wait_time.avg.ms.worker_thread.kthread.ret_from_fork.ret_from_fork_asm 0.83 ± 62% +573.9% 5.62 ± 59% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.do_anonymous_page 0.30 ±300% +166.9% 0.80 ±228% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.shmem_alloc_folio 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__folio_alloc.vma_alloc_folio.wp_page_copy 0.00 +1.2e+102% 1.19 ±225% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.__pud_alloc.alloc_new_pud.constprop 0.40 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.__do_fault.do_read_fault 0.00 +8.8e+101% 0.88 ±300% perf-sched.wait_time.max.ms.__cond_resched.__alloc_pages.pte_alloc_one.__pte_alloc.do_anonymous_page 2.19 ±124% -77.3% 0.50 ±300% perf-sched.wait_time.max.ms.__cond_resched.__do_fault.do_read_fault.do_fault.__handle_mm_fault 40.85 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc.acpi_ex_allocate_name_string.acpi_ex_get_name_string 0.00 +2e+99% 0.00 ±300% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.alloc_cpumask_var_node.__sched_setaffinity 0.00 +5.5e+101% 0.55 ±300% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.__kmalloc_node.memcg_alloc_slab_cgroups.allocate_slab 0.00 ±300% +104.2% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.perf_event_mmap_event.perf_event_mmap 0.00 +3.9e+99% 0.00 ±200% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.single_open.single_open_size 0.00 +1e+104% 100.01 ±299% perf-sched.wait_time.max.ms.__cond_resched.__kmem_cache_alloc_node.kmalloc_trace.vmstat_start.seq_read_iter 4898 -8.1% 4504 ± 25% perf-sched.wait_time.max.ms.__cond_resched.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr.__sched_setaffinity 0.84 ±132% -63.0% 0.31 ±194% perf-sched.wait_time.max.ms.__cond_resched.__wait_for_common.wait_for_completion_state.kernel_clone.__x64_sys_vfork 0.15 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.cgroup_css_set_fork.cgroup_can_fork.copy_process.kernel_clone 0.00 ±300% +1100.0% 0.03 ±300% perf-sched.wait_time.max.ms.__cond_resched.change_pmd_range.change_p4d_range.change_protection_range.mprotect_fixup 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.dentry_kill.dput.step_into.link_path_walk 0.16 ±148% +731.9% 1.33 ±146% perf-sched.wait_time.max.ms.__cond_resched.down_write.__split_vma.do_vmi_align_munmap.__do_sys_brk 0.52 ± 53% +238.0% 1.75 ±108% perf-sched.wait_time.max.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.do_vmi_align_munmap 0.00 +2.2e+99% 0.00 ±300% perf-sched.wait_time.max.ms.__cond_resched.down_write.anon_vma_clone.__split_vma.vma_modify 0.00 +2.9e+101% 0.29 ±178% perf-sched.wait_time.max.ms.__cond_resched.down_write.do_brk_flags.__do_sys_brk.do_syscall_64 0.01 ±212% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write.free_pgtables.exit_mmap.__mmput 0.00 ±300% +1245.2% 0.04 ±299% perf-sched.wait_time.max.ms.__cond_resched.down_write.mmap_region.do_mmap.vm_mmap_pgoff 0.06 ±199% +321.8% 0.24 ±258% perf-sched.wait_time.max.ms.__cond_resched.down_write.unlink_anon_vmas.free_pgtables.unmap_region 0.00 ±299% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write.unlink_file_vma.free_pgtables.exit_mmap 0.08 ±119% -37.4% 0.05 ±300% perf-sched.wait_time.max.ms.__cond_resched.down_write.vma_prepare.__split_vma.do_vmi_align_munmap 0.02 ±300% +763.4% 0.20 ±247% perf-sched.wait_time.max.ms.__cond_resched.down_write.vma_prepare.do_brk_flags.__do_sys_brk 0.48 ±115% +96.0% 0.94 ± 98% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.__do_sys_brk.do_syscall_64.entry_SYSCALL_64_after_hwframe 0.32 ±192% -12.3% 0.28 ±276% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.exec_mmap.begin_new_exec.load_elf_binary 0.00 ±299% +12272.7% 0.41 ±300% perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.setup_arg_pages.load_elf_binary.search_binary_handler 0.21 ±299% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_binary 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.elf_map.load_elf_interp 0.38 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.down_write_killable.vm_mmap_pgoff.ksys_mmap_pgoff.do_syscall_64 0.14 ±230% +71629.0% 101.64 ±294% perf-sched.wait_time.max.ms.__cond_resched.dput.__fput.__x64_sys_close.do_syscall_64 0.01 ±122% +290.4% 0.04 ±282% perf-sched.wait_time.max.ms.__cond_resched.dput.step_into.link_path_walk.part 0.58 ±206% -99.7% 0.00 ±299% perf-sched.wait_time.max.ms.__cond_resched.dput.terminate_walk.path_openat.do_filp_open 0.00 ±201% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.exit_mmap.__mmput.exit_mm.do_exit 0.00 ±299% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.filemap_read.__kernel_read.search_binary_handler.exec_binprm 0.00 ±200% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.filemap_read.vfs_read.ksys_read.do_syscall_64 0.40 ±300% +961.6% 4.23 ±219% perf-sched.wait_time.max.ms.__cond_resched.generic_perform_write.shmem_file_write_iter.vfs_write.ksys_write 0.00 ±300% +4310.0% 0.04 ±300% perf-sched.wait_time.max.ms.__cond_resched.khugepaged.kthread.ret_from_fork.ret_from_fork_asm 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.do_anonymous_page.__handle_mm_fault 0.00 +3.9e+101% 0.39 ±300% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.__anon_vma_prepare.wp_page_copy.__handle_mm_fault 0.00 +5e+103% 50.14 ±300% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.acpi_ut_create_generic_state.acpi_ds_result_push.acpi_ds_exec_end_op 0.42 ±154% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.alloc_pid.copy_process.kernel_clone 0.53 ±101% +124.1% 1.19 ± 90% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.__split_vma 0.28 ±154% +291.2% 1.11 ±157% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.do_brk_flags 0.16 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.mmap_region 0.00 +2.4e+99% 0.00 ±300% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.mas_alloc_nodes.mas_preallocate.vma_expand 0.00 ±299% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.security_file_alloc.init_file.alloc_empty_file 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.__install_special_mapping.map_vdso 0.32 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.do_brk_flags.__do_sys_brk 0.10 ±299% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_alloc.mmap_region.do_mmap 1.20 ± 83% +66.0% 2.00 ±101% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.do_vmi_align_munmap 0.00 +1.7e+101% 0.17 ±300% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.__split_vma.vma_modify 0.00 +1e+104% 100.01 ±300% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc.vm_area_dup.dup_mmap.dup_mm 0.12 ± 92% -16.6% 0.10 ±152% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.__split_vma 0.00 ±300% +38405.9% 0.65 ±244% perf-sched.wait_time.max.ms.__cond_resched.kmem_cache_alloc_bulk.mas_alloc_nodes.mas_preallocate.do_brk_flags 0.00 +2.1e+103% 20.73 ±300% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.drm_property_free_blob.drm_gem_destroy_shadow_plane_state.drm_atomic_state_default_clear 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.futex_exit_release.exit_mm_release.exit_mm 0.00 ±200% -47.7% 0.00 ±300% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.perf_event_ctx_lock_nested.constprop.0 2.39 ± 81% -18.7% 1.94 ±100% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock.perf_poll.do_poll.constprop 0.00 +4.6e+101% 0.46 ±300% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.alloc_bprm 0.00 +3e+104% 300.07 ±152% perf-sched.wait_time.max.ms.__cond_resched.mutex_lock_killable.pcpu_alloc.mm_init.dup_mm 204.66 ±300% +0.1% 204.80 ±200% perf-sched.wait_time.max.ms.__cond_resched.process_one_work.worker_thread.kthread.ret_from_fork 0.00 +2e+104% 204.66 ±200% perf-sched.wait_time.max.ms.__cond_resched.refresh_cpu_vm_stats.vmstat_update.process_one_work.worker_thread 0.32 ±126% +45.0% 0.46 ±237% perf-sched.wait_time.max.ms.__cond_resched.remove_vma.do_vmi_align_munmap.__do_sys_brk.do_syscall_64 0.00 ±300% +1.6e+06% 73.17 ±128% perf-sched.wait_time.max.ms.__cond_resched.shmem_get_folio_gfp.shmem_read_folio_gfp.drm_gem_get_pages.drm_gem_shmem_get_pages 0.30 ±300% +244.1% 1.02 ±220% perf-sched.wait_time.max.ms.__cond_resched.shmem_inode_acct_blocks.shmem_alloc_and_add_folio.shmem_get_folio_gfp.shmem_write_begin 0.00 +2.8e+99% 0.00 ±300% perf-sched.wait_time.max.ms.__cond_resched.slab_pre_alloc_hook.constprop.0.kmem_cache_alloc_lru 86.01 ± 6% +5.5% 90.72 ± 4% perf-sched.wait_time.max.ms.__cond_resched.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 1.75 ±105% +80.5% 3.16 ±114% perf-sched.wait_time.max.ms.__cond_resched.stop_one_cpu.sched_exec.bprm_execve.part 0.00 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.switch_task_namespaces.do_exit.do_group_exit.__x64_sys_exit_group 0.10 ±100% +248.6% 0.35 ±258% perf-sched.wait_time.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode 0.00 +4.6e+100% 0.05 ±271% perf-sched.wait_time.max.ms.__cond_resched.task_work_run.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode 0.00 +1e+104% 100.06 ±300% perf-sched.wait_time.max.ms.__cond_resched.task_work_run.get_signal.arch_do_signal_or_restart.exit_to_user_mode_loop 0.40 ±295% -98.7% 0.01 ±200% perf-sched.wait_time.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.exit_mmap.__mmput 2.03 ± 23% +227.5% 6.66 ± 48% perf-sched.wait_time.max.ms.__cond_resched.tlb_batch_pages_flush.tlb_finish_mmu.unmap_region.constprop 0.02 ±300% +1253.6% 0.30 ±300% perf-sched.wait_time.max.ms.__cond_resched.unmap_page_range.unmap_vmas.unmap_region.constprop 1.59 ± 53% +153.9% 4.04 ± 57% perf-sched.wait_time.max.ms.__cond_resched.unmap_vmas.unmap_region.constprop.0 0.00 ±200% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.wp_page_copy.__handle_mm_fault.handle_mm_fault.do_user_addr_fault 5.09 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_fbdev_generic_helper_fb_dirty.drm_fb_helper_damage_work 0.00 +3.7e+100% 0.04 ±253% perf-sched.wait_time.max.ms.__cond_resched.ww_mutex_lock.drm_gem_vunmap_unlocked.drm_gem_fb_vunmap.drm_atomic_helper_cleanup_planes 1.18 ± 46% +17108.0% 202.73 ±197% perf-sched.wait_time.max.ms.__cond_resched.zap_pmd_range.isra.0.unmap_page_range 802.26 ± 49% -49.8% 402.51 ±120% perf-sched.wait_time.max.ms.__x64_sys_pause.do_syscall_64.entry_SYSCALL_64_after_hwframe.[unknown] 4.73 ± 40% -67.8% 1.52 ±154% perf-sched.wait_time.max.ms.devkmsg_read.vfs_read.ksys_read.do_syscall_64 702.00 ± 65% -14.3% 601.71 ± 81% perf-sched.wait_time.max.ms.do_nanosleep.hrtimer_nanosleep.common_nsleep.__x64_sys_clock_nanosleep 1321 ± 48% +37.2% 1813 ± 54% perf-sched.wait_time.max.ms.do_task_dead.do_exit.do_group_exit.__x64_sys_exit_group.do_syscall_64 6.90 ± 9% +1483.8% 109.24 ±272% perf-sched.wait_time.max.ms.do_wait.kernel_wait4.__do_sys_wait4.do_syscall_64 0.00 +2.6e+101% 0.26 ±300% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_common_interrupt 2.33 ± 39% +4436.5% 105.58 ±282% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_exc_page_fault 128.51 ±225% +2.5% 131.68 ±219% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_apic_timer_interrupt 2.44 ± 12% +89.7% 4.63 ± 50% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_call_function_single 5.66 ±156% +54.5% 8.74 ± 54% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.irqentry_exit_to_user_mode.asm_sysvec_reschedule_ipi 1000 +38.5% 1386 ± 83% perf-sched.wait_time.max.ms.exit_to_user_mode_loop.exit_to_user_mode_prepare.syscall_exit_to_user_mode.do_syscall_64 1.41 ±300% -100.0% 0.00 perf-sched.wait_time.max.ms.futex_wait_queue.futex_wait.do_futex.__x64_sys_futex 0.00 ±300% +16133.3% 0.39 ±300% perf-sched.wait_time.max.ms.io_schedule.folio_wait_bit_common.filemap_fault.__do_fault 1051 +0.8% 1059 perf-sched.wait_time.max.ms.pipe_read.vfs_read.ksys_read.do_syscall_64 1004 -0.0% 1003 perf-sched.wait_time.max.ms.schedule_hrtimeout_range_clock.do_poll.constprop.0.do_sys_poll 499.84 +0.0% 499.86 perf-sched.wait_time.max.ms.schedule_hrtimeout_range_clock.ep_poll.do_epoll_wait.__x64_sys_epoll_wait 319.32 ±299% -100.0% 0.00 perf-sched.wait_time.max.ms.schedule_preempt_disabled.rwsem_down_read_slowpath.down_read_killable.__access_remote_vm 0.01 ±122% +4.8% 0.01 ±123% perf-sched.wait_time.max.ms.schedule_timeout.__wait_for_common.affine_move_task.__set_cpus_allowed_ptr 8.87 ± 6% +50.9% 13.38 ± 50% perf-sched.wait_time.max.ms.schedule_timeout.__wait_for_common.wait_for_completion_state.kernel_clone 504.57 +0.1% 505.25 perf-sched.wait_time.max.ms.schedule_timeout.kcompactd.kthread.ret_from_fork 0.18 ±300% +102.3% 0.36 ±299% perf-sched.wait_time.max.ms.schedule_timeout.khugepaged_wait_work.khugepaged.kthread 14.61 ± 99% +0.9% 14.74 ± 73% perf-sched.wait_time.max.ms.schedule_timeout.rcu_gp_fqs_loop.rcu_gp_kthread.kthread 1053 +10.8% 1167 ± 26% perf-sched.wait_time.max.ms.smpboot_thread_fn.kthread.ret_from_fork.ret_from_fork_asm 7.67 ± 19% -71.1% 2.22 ±123% perf-sched.wait_time.max.ms.syslog_print.do_syslog.kmsg_read.vfs_read 3.55 ± 12% +13.4% 4.03 ± 29% perf-sched.wait_time.max.ms.wait_for_partner.fifo_open.do_dentry_open.do_open 2046 +0.0% 2046 perf-sched.wait_time.max.ms.worker_thread.kthread.ret_from_fork.ret_from_fork_asm xsang@inn:~$