[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20251006230205.521341-8-sjg@chromium.org>
Date: Mon, 6 Oct 2025 17:01:58 -0600
From: Simon Glass <sjg@...omium.org>
To: linux-arm-kernel@...ts.infradead.org
Cc: Chen-Yu Tsai <wenst@...omium.org>,
Ahmad Fatoum <a.fatoum@...gutronix.de>,
Masahiro Yamada <masahiroy@...nel.org>,
J . Neuschäfer <j.ne@...teo.net>,
Nicolas Schier <nicolas@...sle.eu>,
Tom Rini <trini@...sulko.com>,
Simon Glass <sjg@...omium.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 7/7] scripts/make_fit: Compress dtbs in parallel
When there are 1500 device tree files it takes quite a while to compress
them. Do it in parallel.
Signed-off-by: Simon Glass <sjg@...omium.org>
---
(no changes since v1)
scripts/make_fit.py | 56 ++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 50 insertions(+), 6 deletions(-)
diff --git a/scripts/make_fit.py b/scripts/make_fit.py
index 10a040f4eb83..d1af496f3a57 100755
--- a/scripts/make_fit.py
+++ b/scripts/make_fit.py
@@ -43,6 +43,7 @@ as U-Boot, Linuxboot, Tianocore, etc.
import argparse
import collections
+import multiprocessing
import os
import shutil
import subprocess
@@ -237,15 +238,31 @@ def compress_data(inf, compress):
return comp_data
-def output_dtb(fsw, seq, fname, arch, compress):
+def compress_dtb(fname, compress):
+ """Compress a single DTB file
+
+ Args:
+ fname (str): Filename containing the DTB
+ compress (str): Compression algorithm, e.g. 'gzip'
+
+ Returns:
+ tuple: (str: fname, bytes: compressed_data)
+ """
+ with open(fname, 'rb') as inf:
+ compressed = compress_data(inf, compress)
+ return fname, compressed
+
+
+def output_dtb(fsw, seq, fname, arch, compress, data=None):
"""Write out a single devicetree to the FIT
Args:
fsw (libfdt.FdtSw): Object to use for writing
seq (int): Sequence number (1 for first)
fname (str): Filename containing the DTB
- arch: FIT architecture, e.g. 'arm64'
+ arch (str): FIT architecture, e.g. 'arm64'
compress (str): Compressed algorithm, e.g. 'gzip'
+ data (bytes): Pre-compressed data (optional)
"""
with fsw.add_node(f'fdt-{seq}'):
fsw.property_string('description', os.path.basename(fname))
@@ -253,9 +270,10 @@ def output_dtb(fsw, seq, fname, arch, compress):
fsw.property_string('arch', arch)
fsw.property_string('compression', compress)
- with open(fname, 'rb') as inf:
- compressed = compress_data(inf, compress)
- fsw.property('data', compressed)
+ if data is None:
+ with open(fname, 'rb') as inf:
+ data = compress_data(inf, compress)
+ fsw.property('data', data)
def build_ramdisk(args, tmpdir):
@@ -366,6 +384,11 @@ def _process_dtbs(args, fsw, entries, fdts):
"""
seq = 0
size = 0
+
+ # First figure out the unique DTB files that need compression
+ todo = []
+ file_info = [] # List of (fname, model, compat, files) tuples
+
for fname in args.dtbs:
# Ignore non-DTB (*.dtb) files
if os.path.splitext(fname)[1] != '.dtb':
@@ -377,11 +400,32 @@ def _process_dtbs(args, fsw, entries, fdts):
sys.stderr.write(f'Error processing {fname}:\n')
raise e
+ file_info.append((fname, model, compat, files))
+ for fn in files:
+ if fn not in fdts and fn not in todo:
+ todo.append(fn)
+
+ # Compress all DTBs in parallel
+ cache = {}
+ if todo and args.compress != 'none':
+ if args.verbose:
+ print(f'Compressing {len(todo)} DTBs...')
+
+ with multiprocessing.Pool() as pool:
+ compress_args = [(fn, args.compress) for fn in todo]
+ # unpacks each tuple, calls compress_dtb(fn, compress) in parallel
+ results = pool.starmap(compress_dtb, compress_args)
+
+ cache = dict(results)
+
+ # Now write all DTBs to the FIT using pre-compressed data
+ for fname, model, compat, files in file_info:
for fn in files:
if fn not in fdts:
seq += 1
size += os.path.getsize(fn)
- output_dtb(fsw, seq, fn, args.arch, args.compress)
+ output_dtb(fsw, seq, fn, args.arch, args.compress,
+ cache.get(fn))
fdts[fn] = seq
files_seq = [fdts[fn] for fn in files]
--
2.43.0
Powered by blists - more mailing lists