[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250710121033.42db5ef3@foz.lan>
Date: Thu, 10 Jul 2025 12:10:33 +0200
From: Mauro Carvalho Chehab <mchehab+huawei@...nel.org>
To: Jonathan Corbet <corbet@....net>
Cc: linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org, Akira Yokosawa
<akiyks@...il.com>
Subject: Re: [PATCH 12/12] docs: kdoc: Improve the output text accumulation
Em Thu, 10 Jul 2025 10:19:31 +0200
Mauro Carvalho Chehab <mchehab+huawei@...nel.org> escreveu:
> Em Thu, 10 Jul 2025 09:13:52 +0200
> Mauro Carvalho Chehab <mchehab+huawei@...nel.org> escreveu:
>
> Heh, on those times where LLM can quickly code trivial things for us,
> I actually decided to test 3 different variants:
>
> - using string +=
> - using list append
> - using __add__
> - using __iadd__
Manually reorganized the LLM-generated code, in order to get more
precise results. Script enclosed at the end.
$ for i in python3.9 python3.13 python3.13t; do echo " $i:"; $i /tmp/bench.py 100000 10 1; $i /tmp/bench.py 1000 1000 1; done
python3.9:
10 strings in a loop with 100000 interactions, repeating 24 times
str += : time: 25.21
list join : time: 72.65: 188.18% slower than str +=
__add__ : time: 71.82: 184.88% slower than str +=
__iadd__ : time: 67.84: 169.09% slower than str +=
1000 strings in a loop with 1000 interactions, repeating 24 times
str += : time: 24.29
list join : time: 58.76: 141.88% slower than str +=
__add__ : time: 58.68: 141.54% slower than str +=
__iadd__ : time: 55.48: 128.37% slower than str +=
python3.13:
10 strings in a loop with 100000 interactions, repeating 24 times
str += : time: 28.01
list join : time: 32.46: 15.91% slower than str +=
__add__ : time: 52.56: 87.66% slower than str +=
__iadd__ : time: 58.69: 109.55% slower than str +=
1000 strings in a loop with 1000 interactions, repeating 24 times
str += : time: 22.03
list join : time: 23.38: 6.12% slower than str +=
__add__ : time: 44.25: 100.86% slower than str +=
__iadd__ : time: 40.70: 84.74% slower than str +=
python3.13t:
10 strings in a loop with 100000 interactions, repeating 24 times
str += : time: 25.65
list join : time: 74.95: 192.18% slower than str +=
__add__ : time: 83.04: 223.71% slower than str +=
__iadd__ : time: 79.07: 208.23% slower than str +=
1000 strings in a loop with 1000 interactions, repeating 24 times
str += : time: 57.39
list join : time: 62.31: 8.58% slower than str +=
__add__ : time: 70.65: 23.10% slower than str +=
__iadd__ : time: 68.67: 19.65% slower than str +=
From the above:
- It is not worth applying patch 12/12 as it makes the code slower;
- Python 3.13t (no-GIL version) had very bad results. It seems it
still requires optimization;
- Python 3.9 is a lot worse (140% to 190%) when using list append;
- when there are not many concats, Python 3.13 is about 15% slower
with lists than concat strings. It only approaches str concat
when the number of concats is high.
With the above, clearly str += is faster than list append.
So, except if I did something wrong on this benchmark script, please
don't apply patch 12/12.
Regards,
Mauro
---
Benchmark code:
#!/usr/bin/env python3
import argparse
import time
import sys
def benchmark_str_concat(test_strings, n_ops):
start = time.time()
for _ in range(n_ops):
result = ""
for s in test_strings:
result += s
return (time.time() - start) * 1000
def benchmark_explicit_list(test_strings, n_ops):
class ExplicitList:
def __init__(self):
self._output = []
def emit(self, text):
self._output.append(text)
def output(self):
return ''.join(self._output)
start = time.time()
for _ in range(n_ops):
obj = ExplicitList()
for s in test_strings:
obj.emit(s)
return (time.time() - start) * 1000
def benchmark_add_overload(test_strings, n_ops):
class OutputStringAdd:
def __init__(self):
self._output = []
def __add__(self, text):
self._output.append(text)
return self
def __str__(self):
return ''.join(self._output)
start = time.time()
for _ in range(n_ops):
obj = OutputStringAdd()
for s in test_strings:
obj += s
return (time.time() - start) * 1000
def benchmark_iadd_overload(test_strings, n_ops):
class OutputStringIAdd:
def __init__(self):
self._output = []
def __iadd__(self, text):
self._output.append(text)
return self
def __str__(self):
return ''.join(self._output)
start = time.time()
for _ in range(n_ops):
obj = OutputStringIAdd()
for s in test_strings:
obj += s
return (time.time() - start) * 1000
def calculate_comparison(base_time, compare_time):
if compare_time < base_time:
return (True, (1 - compare_time/base_time)*100)
return (False, (compare_time/base_time - 1)*100)
def benchmark(num_reps, strings_per_run, repeats, detail):
test_strings = [f"string_{i:03d}" for i in range(strings_per_run)]
# Create benchmark execution order list
benchmarks = [
("str +=", benchmark_str_concat),
("list join", benchmark_explicit_list),
("__add__", benchmark_add_overload),
("__iadd__", benchmark_iadd_overload)
]
# Use all possible permutations of benchmark order to reduce any
# noise due to CPU caches
all_orders = [
(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1),
(0, 3, 1, 2), (0, 3, 2, 1), (1, 0, 2, 3), (1, 0, 3, 2),
(1, 2, 0, 3), (1, 2, 3, 0), (1, 3, 0, 2), (1, 3, 2, 0),
(2, 0, 1, 3), (2, 0, 3, 1), (2, 1, 0, 3), (2, 1, 3, 0),
(2, 3, 0, 1), (2, 3, 1, 0), (3, 0, 1, 2), (3, 0, 2, 1),
(3, 1, 0, 2), (3, 1, 2, 0), (3, 2, 0, 1), (3, 2, 1, 0)
]
results = {}
for name, _ in benchmarks:
results[name] = 0
# Warm-up phase to reduce caching issues
for name, fn in benchmarks:
fn(test_strings, 1)
n_repeats = len(all_orders) * repeats
print(f" {strings_per_run} strings in a loop with {num_reps} interactions, repeating {n_repeats} times")
# Actual benchmark starts here
i = 0
if detail:
headers = ['Run'] + [name for name, _ in benchmarks]
print()
print(f"\t{headers[0]:<6} {headers[1]:<12} {headers[2]:<12} {headers[3]:<12} {headers[4]:<12}")
print("\t" + "-" * 60)
for _ in range(repeats):
# Shuffle execution order each run
for order in all_orders:
run_results = {}
for idx in order:
name, func = benchmarks[idx]
run_results[name] = func(test_strings, num_reps)
results[name] += run_results[name]
if detail:
# Output results in consistent order
print(f"\t{i+1:<6}", end=" ")
for name, _ in benchmarks:
print(f"{run_results[name]:<12.2f}", end=" ")
print()
i += 1
avg_results = {}
for name, _ in benchmarks:
avg_results[name] = results[name] / repeats / len(all_orders)
if detail:
print("\t" + "-" * 60)
print(f"\t ", end=" ")
for name, _ in benchmarks:
print(f"{avg_results[name]:<12.2f}", end=" ")
print()
print()
ref = benchmarks.pop(0)
print(f"\t{ref[0]:<12} : time: {avg_results[ref[0]]:3.2f}")
for name, _ in benchmarks:
is_faster, percentage = calculate_comparison(avg_results[ref[0]], avg_results[name])
direction = "faster" if is_faster else "slower"
print(f"\t{name:<12} : time: {avg_results[name]:3.2f}: {percentage:3.2f}% {direction} than {ref[0]}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--detail', action='store_true',
help='Enable detailed output')
args, remaining = parser.parse_known_args()
# Then handle the positional arguments manually
if len(remaining) != 3:
print(f"Usage: {sys.argv[0]} [-d] <num_repetitions> <strings_per_op> <repeats>")
sys.exit(1)
num_reps = int(remaining[0])
strings_per_op = int(remaining[1])
repeats = int(remaining[2])
num_reps = int(sys.argv[1])
strings_per_op = int(sys.argv[2])
repeats = int(sys.argv[3])
benchmark(num_reps, strings_per_op, repeats, args.detail)
Powered by blists - more mailing lists