[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202512151418.Lrp7Rxz8-lkp@intel.com>
Date: Mon, 15 Dec 2025 14:29:50 +0800
From: kernel test robot <lkp@...el.com>
To: Nick Terrell <terrelln@...a.com>
Cc: oe-kbuild-all@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: lib/zstd/compress/zstd_fast.c:291
ZSTD_compressBlock_fast_noDict_generic() warn: inconsistent indenting
tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: 8f0b4cce4481fb22653697cced8d0d04027cb1e8
commit: 65d1f5507ed2c78c64fce40e44e5574a9419eb09 zstd: Import upstream v1.5.7
date: 9 months ago
config: openrisc-randconfig-r072-20251214 (https://download.01.org/0day-ci/archive/20251215/202512151418.Lrp7Rxz8-lkp@intel.com/config)
compiler: or1k-linux-gcc (GCC) 15.1.0
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202512151418.Lrp7Rxz8-lkp@intel.com/
New smatch warnings:
lib/zstd/compress/zstd_fast.c:291 ZSTD_compressBlock_fast_noDict_generic() warn: inconsistent indenting
lib/zstd/compress/zstd_fast.c:291 ZSTD_compressBlock_fast_noDict_generic() warn: inconsistent indenting
lib/zstd/compress/zstd_opt.c:668 ZSTD_insertBtAndGetAllMatches() warn: maybe use && instead of &
lib/zstd/compress/zstd_lazy.c:1991 ZSTD_compressBlock_lazy_extDict_generic() warn: maybe use && instead of &
Old smatch warnings:
lib/zstd/compress/zstd_fast.c:316 ZSTD_compressBlock_fast_noDict_generic() warn: inconsistent indenting
lib/zstd/compress/zstd_fast.c:316 ZSTD_compressBlock_fast_noDict_generic() warn: inconsistent indenting
lib/zstd/compress/zstd_fast.c:944 ZSTD_compressBlock_fast_extDict_generic() warn: maybe use && instead of &
lib/zstd/compress/zstd_fast.c:944 ZSTD_compressBlock_fast_extDict_generic() warn: maybe use && instead of &
lib/zstd/compress/zstd_opt.c:674 ZSTD_insertBtAndGetAllMatches() warn: maybe use && instead of &
lib/zstd/compress/zstd_opt.c:1382 ZSTD_compressBlock_opt_generic() warn: inconsistent indenting
lib/zstd/compress/zstd_opt.c:1392 ZSTD_compressBlock_opt_generic() warn: inconsistent indenting
lib/zstd/compress/zstd_lazy.c:1629 ZSTD_compressBlock_lazy_generic() warn: if statement not indented
lib/zstd/compress/zstd_lazy.c:1990 ZSTD_compressBlock_lazy_extDict_generic() warn: if statement not indented
lib/zstd/compress/zstd_lazy.c:2021 ZSTD_compressBlock_lazy_extDict_generic() warn: if statement not indented
lib/zstd/compress/zstd_lazy.c:2031 ZSTD_compressBlock_lazy_extDict_generic() warn: if statement not indented
lib/zstd/compress/zstd_lazy.c:2032 ZSTD_compressBlock_lazy_extDict_generic() warn: maybe use && instead of &
lib/zstd/compress/zstd_lazy.c:2063 ZSTD_compressBlock_lazy_extDict_generic() warn: if statement not indented
lib/zstd/compress/zstd_lazy.c:2064 ZSTD_compressBlock_lazy_extDict_generic() warn: maybe use && instead of &
lib/zstd/compress/zstd_lazy.c:2117 ZSTD_compressBlock_lazy_extDict_generic() warn: if statement not indented
lib/zstd/compress/zstd_lazy.c:2118 ZSTD_compressBlock_lazy_extDict_generic() warn: maybe use && instead of &
vim +291 lib/zstd/compress/zstd_fast.c
247
248 step = stepSize;
249 nextStep = ip0 + kStepIncr;
250
251 /* calculate positions, ip0 - anchor == 0, so we skip step calc */
252 ip1 = ip0 + 1;
253 ip2 = ip0 + step;
254 ip3 = ip2 + 1;
255
256 if (ip3 >= ilimit) {
257 goto _cleanup;
258 }
259
260 hash0 = ZSTD_hashPtr(ip0, hlog, mls);
261 hash1 = ZSTD_hashPtr(ip1, hlog, mls);
262
263 matchIdx = hashTable[hash0];
264
265 do {
266 /* load repcode match for ip[2]*/
267 const U32 rval = MEM_read32(ip2 - rep_offset1);
268
269 /* write back hash table entry */
270 current0 = (U32)(ip0 - base);
271 hashTable[hash0] = current0;
272
273 /* check repcode at ip[2] */
274 if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
275 ip0 = ip2;
276 match0 = ip0 - rep_offset1;
277 mLength = ip0[-1] == match0[-1];
278 ip0 -= mLength;
279 match0 -= mLength;
280 offcode = REPCODE1_TO_OFFBASE;
281 mLength += 4;
282
283 /* Write next hash table entry: it's already calculated.
284 * This write is known to be safe because ip1 is before the
285 * repcode (ip2). */
286 hashTable[hash1] = (U32)(ip1 - base);
287
288 goto _match;
289 }
290
> 291 if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
292 /* Write next hash table entry (it's already calculated).
293 * This write is known to be safe because the ip1 == ip0 + 1,
294 * so searching will resume after ip1 */
295 hashTable[hash1] = (U32)(ip1 - base);
296
297 goto _offset;
298 }
299
300 /* lookup ip[1] */
301 matchIdx = hashTable[hash1];
302
303 /* hash ip[2] */
304 hash0 = hash1;
305 hash1 = ZSTD_hashPtr(ip2, hlog, mls);
306
307 /* advance to next positions */
308 ip0 = ip1;
309 ip1 = ip2;
310 ip2 = ip3;
311
312 /* write back hash table entry */
313 current0 = (U32)(ip0 - base);
314 hashTable[hash0] = current0;
315
316 if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
317 /* Write next hash table entry, since it's already calculated */
318 if (step <= 4) {
319 /* Avoid writing an index if it's >= position where search will resume.
320 * The minimum possible match has length 4, so search can resume at ip0 + 4.
321 */
322 hashTable[hash1] = (U32)(ip1 - base);
323 }
324 goto _offset;
325 }
326
327 /* lookup ip[1] */
328 matchIdx = hashTable[hash1];
329
330 /* hash ip[2] */
331 hash0 = hash1;
332 hash1 = ZSTD_hashPtr(ip2, hlog, mls);
333
334 /* advance to next positions */
335 ip0 = ip1;
336 ip1 = ip2;
337 ip2 = ip0 + step;
338 ip3 = ip1 + step;
339
340 /* calculate step */
341 if (ip2 >= nextStep) {
342 step++;
343 PREFETCH_L1(ip1 + 64);
344 PREFETCH_L1(ip1 + 128);
345 nextStep += kStepIncr;
346 }
347 } while (ip3 < ilimit);
348
349 _cleanup:
350 /* Note that there are probably still a couple positions one could search.
351 * However, it seems to be a meaningful performance hit to try to search
352 * them. So let's not. */
353
354 /* When the repcodes are outside of the prefix, we set them to zero before the loop.
355 * When the offsets are still zero, we need to restore them after the block to have a correct
356 * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
357 * offsets were invalid. We need to figure out which offset to refill with.
358 * - If both offsets are zero they are in the same order.
359 * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
360 * - If only one is zero, we need to decide which offset to restore.
361 * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
362 * - It is impossible for rep_offset2 to be non-zero.
363 *
364 * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
365 * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
366 */
367 offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
368
369 /* save reps for next block */
370 rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
371 rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
372
373 /* Return the last literals size */
374 return (size_t)(iend - anchor);
375
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Powered by blists - more mailing lists