Skip to content

Commit c7c88b2

Browse files
sjg20nathanchance
authored andcommitted
scripts/make_fit: Compress dtbs in parallel
When there are 1500 device tree files it takes quite a while to compress them. Do it in parallel. Signed-off-by: Simon Glass <sjg@chromium.org> Link: https://patch.msgid.link/20260106162738.2605574-7-sjg@chromium.org Signed-off-by: Nathan Chancellor <nathan@kernel.org>
1 parent fcdcf22 commit c7c88b2

1 file changed

Lines changed: 50 additions & 6 deletions

File tree

scripts/make_fit.py

Lines changed: 50 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737

3838
import argparse
3939
import collections
40+
import multiprocessing
4041
import os
4142
import subprocess
4243
import sys
@@ -225,25 +226,42 @@ def compress_data(inf, compress):
225226
return comp_data
226227

227228

228-
def output_dtb(fsw, seq, fname, arch, compress):
229+
def compress_dtb(fname, compress):
230+
"""Compress a single DTB file
231+
232+
Args:
233+
fname (str): Filename containing the DTB
234+
compress (str): Compression algorithm, e.g. 'gzip'
235+
236+
Returns:
237+
tuple: (str: fname, bytes: compressed_data)
238+
"""
239+
with open(fname, 'rb') as inf:
240+
compressed = compress_data(inf, compress)
241+
return fname, compressed
242+
243+
244+
def output_dtb(fsw, seq, fname, arch, compress, data=None):
229245
"""Write out a single devicetree to the FIT
230246
231247
Args:
232248
fsw (libfdt.FdtSw): Object to use for writing
233249
seq (int): Sequence number (1 for first)
234250
fname (str): Filename containing the DTB
235-
arch: FIT architecture, e.g. 'arm64'
251+
arch (str): FIT architecture, e.g. 'arm64'
236252
compress (str): Compressed algorithm, e.g. 'gzip'
253+
data (bytes): Pre-compressed data (optional)
237254
"""
238255
with fsw.add_node(f'fdt-{seq}'):
239256
fsw.property_string('description', os.path.basename(fname))
240257
fsw.property_string('type', 'flat_dt')
241258
fsw.property_string('arch', arch)
242259
fsw.property_string('compression', compress)
243260

244-
with open(fname, 'rb') as inf:
245-
compressed = compress_data(inf, compress)
246-
fsw.property('data', compressed)
261+
if data is None:
262+
with open(fname, 'rb') as inf:
263+
data = compress_data(inf, compress)
264+
fsw.property('data', data)
247265

248266

249267
def process_dtb(fname, args):
@@ -300,6 +318,11 @@ def _process_dtbs(args, fsw, entries, fdts):
300318
"""
301319
seq = 0
302320
size = 0
321+
322+
# First figure out the unique DTB files that need compression
323+
todo = []
324+
file_info = [] # List of (fname, model, compat, files) tuples
325+
303326
for fname in args.dtbs:
304327
# Ignore non-DTB (*.dtb) files
305328
if os.path.splitext(fname)[1] != '.dtb':
@@ -311,11 +334,32 @@ def _process_dtbs(args, fsw, entries, fdts):
311334
sys.stderr.write(f'Error processing {fname}:\n')
312335
raise e
313336

337+
file_info.append((fname, model, compat, files))
338+
for fn in files:
339+
if fn not in fdts and fn not in todo:
340+
todo.append(fn)
341+
342+
# Compress all DTBs in parallel
343+
cache = {}
344+
if todo and args.compress != 'none':
345+
if args.verbose:
346+
print(f'Compressing {len(todo)} DTBs...')
347+
348+
with multiprocessing.Pool() as pool:
349+
compress_args = [(fn, args.compress) for fn in todo]
350+
# unpacks each tuple, calls compress_dtb(fn, compress) in parallel
351+
results = pool.starmap(compress_dtb, compress_args)
352+
353+
cache = dict(results)
354+
355+
# Now write all DTBs to the FIT using pre-compressed data
356+
for fname, model, compat, files in file_info:
314357
for fn in files:
315358
if fn not in fdts:
316359
seq += 1
317360
size += os.path.getsize(fn)
318-
output_dtb(fsw, seq, fn, args.arch, args.compress)
361+
output_dtb(fsw, seq, fn, args.arch, args.compress,
362+
cache.get(fn))
319363
fdts[fn] = seq
320364

321365
files_seq = [fdts[fn] for fn in files]

0 commit comments

Comments
 (0)