lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 15 Dec 2016 12:23:25 +0800
From:   kbuild test robot <lkp@...el.com>
To:     "Jason A. Donenfeld" <Jason@...c4.com>
Cc:     kbuild-all@...org, Netdev <netdev@...r.kernel.org>,
        kernel-hardening@...ts.openwall.com,
        LKML <linux-kernel@...r.kernel.org>,
        linux-crypto@...r.kernel.org,
        "Jason A. Donenfeld" <Jason@...c4.com>,
        Jean-Philippe Aumasson <jeanphilippe.aumasson@...il.com>,
        "Daniel J . Bernstein" <djb@...yp.to>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Eric Biggers <ebiggers3@...il.com>,
        David Laight <David.Laight@...lab.com>
Subject: Re: [PATCH v4 1/4] siphash: add cryptographically secure hashtable
 function

Hi Jason,

[auto build test ERROR on linus/master]
[also build test ERROR on v4.9 next-20161215]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Jason-A-Donenfeld/siphash-add-cryptographically-secure-hashtable-function/20161215-095213
config: ia64-allmodconfig (attached as .config)
compiler: ia64-linux-gcc (GCC) 6.2.0
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=ia64 

Note: the linux-review/Jason-A-Donenfeld/siphash-add-cryptographically-secure-hashtable-function/20161215-095213 HEAD 3e343f4316f94cded0d1384cf35957fd51dbbc28 builds fine.
      It only hurts bisectibility.

All error/warnings (new ones prefixed by >>):

   In file included from include/linux/linkage.h:6:0,
                    from include/linux/kernel.h:6,
                    from lib/siphash.c:12:
>> lib/siphash.c:152:15: error: 'siphash24_unaligned' undeclared here (not in a function)
    EXPORT_SYMBOL(siphash24_unaligned);
                  ^
   include/linux/export.h:58:16: note: in definition of macro '___EXPORT_SYMBOL'
     extern typeof(sym) sym;      \
                   ^~~
>> lib/siphash.c:152:1: note: in expansion of macro 'EXPORT_SYMBOL'
    EXPORT_SYMBOL(siphash24_unaligned);
    ^~~~~~~~~~~~~

vim +/siphash24_unaligned +152 lib/siphash.c

     6	 * https://131002.net/siphash/
     7	 *
     8	 * This implementation is specifically for SipHash2-4.
     9	 */
    10	
    11	#include <linux/siphash.h>
  > 12	#include <linux/kernel.h>
    13	#include <asm/unaligned.h>
    14	
    15	#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
    16	#include <linux/dcache.h>
    17	#include <asm/word-at-a-time.h>
    18	#endif
    19	
    20	static inline u16 le16_to_cpuvp(const void *p)
    21	{
    22		return le16_to_cpup(p);
    23	}
    24	static inline u32 le32_to_cpuvp(const void *p)
    25	{
    26		return le32_to_cpup(p);
    27	}
    28	static inline u64 le64_to_cpuvp(const void *p)
    29	{
    30		return le64_to_cpup(p);
    31	}
    32	
    33	#define SIPROUND \
    34		do { \
    35		v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
    36		v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
    37		v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
    38		v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
    39		} while(0)
    40	
    41	/**
    42	 * siphash - compute 64-bit siphash PRF value
    43	 * @data: buffer to hash, must be aligned to SIPHASH_ALIGNMENT
    44	 * @size: size of @data
    45	 * @key: key buffer of size SIPHASH_KEY_LEN, must be aligned to SIPHASH_ALIGNMENT
    46	 */
    47	u64 siphash(const u8 *data, size_t len, const u8 key[SIPHASH_KEY_LEN])
    48	{
    49		u64 v0 = 0x736f6d6570736575ULL;
    50		u64 v1 = 0x646f72616e646f6dULL;
    51		u64 v2 = 0x6c7967656e657261ULL;
    52		u64 v3 = 0x7465646279746573ULL;
    53		u64 b = ((u64)len) << 56;
    54		u64 k0 = le64_to_cpuvp(key);
    55		u64 k1 = le64_to_cpuvp(key + sizeof(u64));
    56		u64 m;
    57		const u8 *end = data + len - (len % sizeof(u64));
    58		const u8 left = len & (sizeof(u64) - 1);
    59		v3 ^= k1;
    60		v2 ^= k0;
    61		v1 ^= k1;
    62		v0 ^= k0;
    63		for (; data != end; data += sizeof(u64)) {
    64			m = le64_to_cpuvp(data);
    65			v3 ^= m;
    66			SIPROUND;
    67			SIPROUND;
    68			v0 ^= m;
    69		}
    70	#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
    71		if (left)
    72			b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left)));
    73	#else
    74		switch (left) {
    75		case 7: b |= ((u64)data[6]) << 48;
    76		case 6: b |= ((u64)data[5]) << 40;
    77		case 5: b |= ((u64)data[4]) << 32;
    78		case 4: b |= le32_to_cpuvp(data); break;
    79		case 3: b |= ((u64)data[2]) << 16;
    80		case 2: b |= le16_to_cpuvp(data); break;
    81		case 1: b |= data[0];
    82		}
    83	#endif
    84		v3 ^= b;
    85		SIPROUND;
    86		SIPROUND;
    87		v0 ^= b;
    88		v2 ^= 0xff;
    89		SIPROUND;
    90		SIPROUND;
    91		SIPROUND;
    92		SIPROUND;
    93		return (v0 ^ v1) ^ (v2 ^ v3);
    94	}
    95	EXPORT_SYMBOL(siphash);
    96	
    97	#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
    98	/**
    99	 * siphash - compute 64-bit siphash PRF value, without alignment requirements
   100	 * @data: buffer to hash
   101	 * @size: size of @data
   102	 * @key: key buffer of size SIPHASH_KEY_LEN, must be aligned to SIPHASH_ALIGNMENT
   103	 */
   104	u64 siphash_unaligned(const u8 *data, size_t len, const u8 key[SIPHASH_KEY_LEN])
   105	{
   106		u64 v0 = 0x736f6d6570736575ULL;
   107		u64 v1 = 0x646f72616e646f6dULL;
   108		u64 v2 = 0x6c7967656e657261ULL;
   109		u64 v3 = 0x7465646279746573ULL;
   110		u64 b = ((u64)len) << 56;
   111		u64 k0 = le64_to_cpuvp(key);
   112		u64 k1 = le64_to_cpuvp(key + sizeof(u64));
   113		u64 m;
   114		const u8 *end = data + len - (len % sizeof(u64));
   115		const u8 left = len & (sizeof(u64) - 1);
   116		v3 ^= k1;
   117		v2 ^= k0;
   118		v1 ^= k1;
   119		v0 ^= k0;
   120		for (; data != end; data += sizeof(u64)) {
   121			m = get_unaligned_le64(data);
   122			v3 ^= m;
   123			SIPROUND;
   124			SIPROUND;
   125			v0 ^= m;
   126		}
   127	#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
   128		if (left)
   129			b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left)));
   130	#else
   131		switch (left) {
   132		case 7: b |= ((u64)data[6]) << 48;
   133		case 6: b |= ((u64)data[5]) << 40;
   134		case 5: b |= ((u64)data[4]) << 32;
   135		case 4: b |= get_unaligned_le32(data); break;
   136		case 3: b |= ((u64)data[2]) << 16;
   137		case 2: b |= get_unaligned_le16(data); break;
   138		case 1: b |= data[0];
   139		}
   140	#endif
   141		v3 ^= b;
   142		SIPROUND;
   143		SIPROUND;
   144		v0 ^= b;
   145		v2 ^= 0xff;
   146		SIPROUND;
   147		SIPROUND;
   148		SIPROUND;
   149		SIPROUND;
   150		return (v0 ^ v1) ^ (v2 ^ v3);
   151	}
 > 152	EXPORT_SYMBOL(siphash24_unaligned);
   153	#endif

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Download attachment ".config.gz" of type "application/gzip" (45649 bytes)

Powered by blists - more mailing lists