xxh_x86dispatch.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. /*
  2. * xxHash - Extremely Fast Hash algorithm
  3. * Copyright (C) 2020 Yann Collet
  4. *
  5. * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are
  9. * met:
  10. *
  11. * * Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * * Redistributions in binary form must reproduce the above
  14. * copyright notice, this list of conditions and the following disclaimer
  15. * in the documentation and/or other materials provided with the
  16. * distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. * You can contact the author at:
  31. * - xxHash homepage: https://www.xxhash.com
  32. * - xxHash source repository: https://github.com/Cyan4973/xxHash
  33. */
  34. /*!
  35. * @file xxh_x86dispatch.c
  36. *
  37. * Automatic dispatcher code for the @ref xxh3_family on x86-based targets.
  38. *
  39. * Optional add-on.
  40. *
  41. * **Compile this file with the default flags for your target.** Do not compile
  42. * with flags like `-mavx*`, `-march=native`, or `/arch:AVX*`, there will be
  43. * an error. See @ref XXH_X86DISPATCH_ALLOW_AVX for details.
  44. *
  45. * @defgroup dispatch x86 Dispatcher
  46. * @{
  47. */
  48. #if defined (__cplusplus)
  49. extern "C" {
  50. #endif
  51. #if !(defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || defined(_M_X64))
  52. # error "Dispatching is currently only supported on x86 and x86_64."
  53. #endif
  54. /*!
  55. * @def XXH_X86DISPATCH_ALLOW_AVX
  56. * @brief Disables the AVX sanity check.
  57. *
  58. * Don't compile xxh_x86dispatch.c with options like `-mavx*`, `-march=native`,
  59. * or `/arch:AVX*`. It is intended to be compiled for the minimum target, and
  60. * it selectively enables SSE2, AVX2, and AVX512 when it is needed.
  61. *
  62. * Using this option _globally_ allows this feature, and therefore makes it
  63. * undefined behavior to execute on any CPU without said feature.
  64. *
  65. * Even if the source code isn't directly using AVX intrinsics in a function,
  66. * the compiler can still generate AVX code from autovectorization and by
  67. * "upgrading" SSE2 intrinsics to use the VEX prefixes (a.k.a. AVX128).
  68. *
  69. * Use the same flags that you use to compile the rest of the program; this
  70. * file will safely generate SSE2, AVX2, and AVX512 without these flags.
  71. *
  72. * Define XXH_X86DISPATCH_ALLOW_AVX to ignore this check, and feel free to open
  73. * an issue if there is a target in the future where AVX is a default feature.
  74. */
  75. #ifdef XXH_DOXYGEN
  76. # define XXH_X86DISPATCH_ALLOW_AVX
  77. #endif
  78. #if defined(__AVX__) && !defined(XXH_X86DISPATCH_ALLOW_AVX)
  79. # error "Do not compile xxh_x86dispatch.c with AVX enabled! See the comment above."
  80. #endif
  81. #ifdef __has_include
  82. # define XXH_HAS_INCLUDE(header) __has_include(header)
  83. #else
  84. # define XXH_HAS_INCLUDE(header) 0
  85. #endif
  86. /*!
  87. * @def XXH_DISPATCH_SCALAR
  88. * @brief Enables/dispatching the scalar code path.
  89. *
  90. * If this is defined to 0, SSE2 support is assumed. This reduces code size
  91. * when the scalar path is not needed.
  92. *
  93. * This is automatically defined to 0 when...
  94. * - SSE2 support is enabled in the compiler
  95. * - Targeting x86_64
  96. * - Targeting Android x86
  97. * - Targeting macOS
  98. */
  99. #ifndef XXH_DISPATCH_SCALAR
  100. # if defined(__SSE2__) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2) /* SSE2 on by default */ \
  101. || defined(__x86_64__) || defined(_M_X64) /* x86_64 */ \
  102. || defined(__ANDROID__) || defined(__APPLEv__) /* Android or macOS */
  103. # define XXH_DISPATCH_SCALAR 0 /* disable */
  104. # else
  105. # define XXH_DISPATCH_SCALAR 1
  106. # endif
  107. #endif
  108. /*!
  109. * @def XXH_DISPATCH_AVX2
  110. * @brief Enables/disables dispatching for AVX2.
  111. *
  112. * This is automatically detected if it is not defined.
  113. * - GCC 4.7 and later are known to support AVX2, but >4.9 is required for
  114. * to get the AVX2 intrinsics and typedefs without -mavx -mavx2.
  115. * - Visual Studio 2013 Update 2 and later are known to support AVX2.
  116. * - The GCC/Clang internal header `<avx2intrin.h>` is detected. While this is
  117. * not allowed to be included directly, it still appears in the builtin
  118. * include path and is detectable with `__has_include`.
  119. *
  120. * @see XXH_AVX2
  121. */
  122. #ifndef XXH_DISPATCH_AVX2
  123. # if (defined(__GNUC__) && (__GNUC__ > 4)) /* GCC 5.0+ */ \
  124. || (defined(_MSC_VER) && _MSC_VER >= 1900) /* VS 2015+ */ \
  125. || (defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 180030501) /* VS 2013 Update 2 */ \
  126. || XXH_HAS_INCLUDE(<avx2intrin.h>) /* GCC/Clang internal header */
  127. # define XXH_DISPATCH_AVX2 1 /* enable dispatch towards AVX2 */
  128. # else
  129. # define XXH_DISPATCH_AVX2 0
  130. # endif
  131. #endif /* XXH_DISPATCH_AVX2 */
  132. /*!
  133. * @def XXH_DISPATCH_AVX512
  134. * @brief Enables/disables dispatching for AVX512.
  135. *
  136. * Automatically detected if one of the following conditions is met:
  137. * - GCC 4.9 and later are known to support AVX512.
  138. * - Visual Studio 2017 and later are known to support AVX2.
  139. * - The GCC/Clang internal header `<avx512fintrin.h>` is detected. While this
  140. * is not allowed to be included directly, it still appears in the builtin
  141. * include path and is detectable with `__has_include`.
  142. *
  143. * @see XXH_AVX512
  144. */
  145. #ifndef XXH_DISPATCH_AVX512
  146. # if (defined(__GNUC__) \
  147. && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9))) /* GCC 4.9+ */ \
  148. || (defined(_MSC_VER) && _MSC_VER >= 1910) /* VS 2017+ */ \
  149. || XXH_HAS_INCLUDE(<avx512fintrin.h>) /* GCC/Clang internal header */
  150. # define XXH_DISPATCH_AVX512 1 /* enable dispatch towards AVX512 */
  151. # else
  152. # define XXH_DISPATCH_AVX512 0
  153. # endif
  154. #endif /* XXH_DISPATCH_AVX512 */
  155. /*!
  156. * @def XXH_TARGET_SSE2
  157. * @brief Allows a function to be compiled with SSE2 intrinsics.
  158. *
  159. * Uses `__attribute__((__target__("sse2")))` on GCC to allow SSE2 to be used
  160. * even with `-mno-sse2`.
  161. *
  162. * @def XXH_TARGET_AVX2
  163. * @brief Like @ref XXH_TARGET_SSE2, but for AVX2.
  164. *
  165. * @def XXH_TARGET_AVX512
  166. * @brief Like @ref XXH_TARGET_SSE2, but for AVX512.
  167. */
  168. #if defined(__GNUC__)
  169. # include <emmintrin.h> /* SSE2 */
  170. # if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
  171. # include <immintrin.h> /* AVX2, AVX512F */
  172. # endif
  173. # define XXH_TARGET_SSE2 __attribute__((__target__("sse2")))
  174. # define XXH_TARGET_AVX2 __attribute__((__target__("avx2")))
  175. # define XXH_TARGET_AVX512 __attribute__((__target__("avx512f")))
  176. #elif defined(_MSC_VER)
  177. # include <intrin.h>
  178. # define XXH_TARGET_SSE2
  179. # define XXH_TARGET_AVX2
  180. # define XXH_TARGET_AVX512
  181. #else
  182. # error "Dispatching is currently not supported for your compiler."
  183. #endif
  184. #ifdef XXH_DISPATCH_DEBUG
  185. /* debug logging */
  186. # include <stdio.h>
  187. # define XXH_debugPrint(str) { fprintf(stderr, "DEBUG: xxHash dispatch: %s \n", str); fflush(NULL); }
  188. #else
  189. # define XXH_debugPrint(str) ((void)0)
  190. # undef NDEBUG /* avoid redefinition */
  191. # define NDEBUG
  192. #endif
  193. #include <assert.h>
  194. #define XXH_INLINE_ALL
  195. #define XXH_X86DISPATCH
  196. #include "xxhash.h"
  197. /*
  198. * Support both AT&T and Intel dialects
  199. *
  200. * GCC doesn't convert AT&T syntax to Intel syntax, and will error out if
  201. * compiled with -masm=intel. Instead, it supports dialect switching with
  202. * curly braces: { AT&T syntax | Intel syntax }
  203. *
  204. * Clang's integrated assembler automatically converts AT&T syntax to Intel if
  205. * needed, making the dialect switching useless (it isn't even supported).
  206. *
  207. * Note: Comments are written in the inline assembly itself.
  208. */
  209. #ifdef __clang__
  210. # define XXH_I_ATT(intel, att) att "\n\t"
  211. #else
  212. # define XXH_I_ATT(intel, att) "{" att "|" intel "}\n\t"
  213. #endif
  214. /*!
  215. * @internal
  216. * @brief Runs CPUID.
  217. *
  218. * @param eax, ecx The parameters to pass to CPUID, %eax and %ecx respectively.
  219. * @param abcd The array to store the result in, `{ eax, ebx, ecx, edx }`
  220. */
  221. static void XXH_cpuid(xxh_u32 eax, xxh_u32 ecx, xxh_u32* abcd)
  222. {
  223. #if defined(_MSC_VER)
  224. __cpuidex(abcd, eax, ecx);
  225. #else
  226. xxh_u32 ebx, edx;
  227. # if defined(__i386__) && defined(__PIC__)
  228. __asm__(
  229. "# Call CPUID\n\t"
  230. "#\n\t"
  231. "# On 32-bit x86 with PIC enabled, we are not allowed to overwrite\n\t"
  232. "# EBX, so we use EDI instead.\n\t"
  233. XXH_I_ATT("mov edi, ebx", "movl %%ebx, %%edi")
  234. XXH_I_ATT("cpuid", "cpuid" )
  235. XXH_I_ATT("xchg edi, ebx", "xchgl %%ebx, %%edi")
  236. : "=D" (ebx),
  237. # else
  238. __asm__(
  239. "# Call CPUID\n\t"
  240. XXH_I_ATT("cpuid", "cpuid")
  241. : "=b" (ebx),
  242. # endif
  243. "+a" (eax), "+c" (ecx), "=d" (edx));
  244. abcd[0] = eax;
  245. abcd[1] = ebx;
  246. abcd[2] = ecx;
  247. abcd[3] = edx;
  248. #endif
  249. }
  250. /*
  251. * Modified version of Intel's guide
  252. * https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family
  253. */
  254. #if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
  255. /*!
  256. * @internal
  257. * @brief Runs `XGETBV`.
  258. *
  259. * While the CPU may support AVX2, the operating system might not properly save
  260. * the full YMM/ZMM registers.
  261. *
  262. * xgetbv is used for detecting this: Any compliant operating system will define
  263. * a set of flags in the xcr0 register indicating how it saves the AVX registers.
  264. *
  265. * You can manually disable this flag on Windows by running, as admin:
  266. *
  267. * bcdedit.exe /set xsavedisable 1
  268. *
  269. * and rebooting. Run the same command with 0 to re-enable it.
  270. */
  271. static xxh_u64 XXH_xgetbv(void)
  272. {
  273. #if defined(_MSC_VER)
  274. return _xgetbv(0); /* min VS2010 SP1 compiler is required */
  275. #else
  276. xxh_u32 xcr0_lo, xcr0_hi;
  277. __asm__(
  278. "# Call XGETBV\n\t"
  279. "#\n\t"
  280. "# Older assemblers (e.g. macOS's ancient GAS version) don't support\n\t"
  281. "# the XGETBV opcode, so we encode it by hand instead.\n\t"
  282. "# See <https://github.com/asmjit/asmjit/issues/78> for details.\n\t"
  283. ".byte 0x0f, 0x01, 0xd0\n\t"
  284. : "=a" (xcr0_lo), "=d" (xcr0_hi) : "c" (0));
  285. return xcr0_lo | ((xxh_u64)xcr0_hi << 32);
  286. #endif
  287. }
  288. #endif
  289. #define XXH_SSE2_CPUID_MASK (1 << 26)
  290. #define XXH_OSXSAVE_CPUID_MASK ((1 << 26) | (1 << 27))
  291. #define XXH_AVX2_CPUID_MASK (1 << 5)
  292. #define XXH_AVX2_XGETBV_MASK ((1 << 2) | (1 << 1))
  293. #define XXH_AVX512F_CPUID_MASK (1 << 16)
  294. #define XXH_AVX512F_XGETBV_MASK ((7 << 5) | (1 << 2) | (1 << 1))
  295. /*!
  296. * @internal
  297. * @brief Returns the best XXH3 implementation.
  298. *
  299. * Runs various CPUID/XGETBV tests to try and determine the best implementation.
  300. *
  301. * @ret The best @ref XXH_VECTOR implementation.
  302. * @see XXH_VECTOR_TYPES
  303. */
  304. static int XXH_featureTest(void)
  305. {
  306. xxh_u32 abcd[4];
  307. xxh_u32 max_leaves;
  308. int best = XXH_SCALAR;
  309. #if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
  310. xxh_u64 xgetbv_val;
  311. #endif
  312. #if defined(__GNUC__) && defined(__i386__)
  313. xxh_u32 cpuid_supported;
  314. __asm__(
  315. "# For the sake of ruthless backwards compatibility, check if CPUID\n\t"
  316. "# is supported in the EFLAGS on i386.\n\t"
  317. "# This is not necessary on x86_64 - CPUID is mandatory.\n\t"
  318. "# The ID flag (bit 21) in the EFLAGS register indicates support\n\t"
  319. "# for the CPUID instruction. If a software procedure can set and\n\t"
  320. "# clear this flag, the processor executing the procedure supports\n\t"
  321. "# the CPUID instruction.\n\t"
  322. "# <https://c9x.me/x86/html/file_module_x86_id_45.html>\n\t"
  323. "#\n\t"
  324. "# Routine is from <https://wiki.osdev.org/CPUID>.\n\t"
  325. "# Save EFLAGS\n\t"
  326. XXH_I_ATT("pushfd", "pushfl" )
  327. "# Store EFLAGS\n\t"
  328. XXH_I_ATT("pushfd", "pushfl" )
  329. "# Invert the ID bit in stored EFLAGS\n\t"
  330. XXH_I_ATT("xor dword ptr[esp], 0x200000", "xorl $0x200000, (%%esp)")
  331. "# Load stored EFLAGS (with ID bit inverted)\n\t"
  332. XXH_I_ATT("popfd", "popfl" )
  333. "# Store EFLAGS again (ID bit may or not be inverted)\n\t"
  334. XXH_I_ATT("pushfd", "pushfl" )
  335. "# eax = modified EFLAGS (ID bit may or may not be inverted)\n\t"
  336. XXH_I_ATT("pop eax", "popl %%eax" )
  337. "# eax = whichever bits were changed\n\t"
  338. XXH_I_ATT("xor eax, dword ptr[esp]", "xorl (%%esp), %%eax" )
  339. "# Restore original EFLAGS\n\t"
  340. XXH_I_ATT("popfd", "popfl" )
  341. "# eax = zero if ID bit can't be changed, else non-zero\n\t"
  342. XXH_I_ATT("and eax, 0x200000", "andl $0x200000, %%eax" )
  343. : "=a" (cpuid_supported) :: "cc");
  344. if (XXH_unlikely(!cpuid_supported)) {
  345. XXH_debugPrint("CPUID support is not detected!");
  346. return best;
  347. }
  348. #endif
  349. /* Check how many CPUID pages we have */
  350. XXH_cpuid(0, 0, abcd);
  351. max_leaves = abcd[0];
  352. /* Shouldn't happen on hardware, but happens on some QEMU configs. */
  353. if (XXH_unlikely(max_leaves == 0)) {
  354. XXH_debugPrint("Max CPUID leaves == 0!");
  355. return best;
  356. }
  357. /* Check for SSE2, OSXSAVE and xgetbv */
  358. XXH_cpuid(1, 0, abcd);
  359. /*
  360. * Test for SSE2. The check is redundant on x86_64, but it doesn't hurt.
  361. */
  362. if (XXH_unlikely((abcd[3] & XXH_SSE2_CPUID_MASK) != XXH_SSE2_CPUID_MASK))
  363. return best;
  364. XXH_debugPrint("SSE2 support detected.");
  365. best = XXH_SSE2;
  366. #if XXH_DISPATCH_AVX2 || XXH_DISPATCH_AVX512
  367. /* Make sure we have enough leaves */
  368. if (XXH_unlikely(max_leaves < 7))
  369. return best;
  370. /* Test for OSXSAVE and XGETBV */
  371. if ((abcd[2] & XXH_OSXSAVE_CPUID_MASK) != XXH_OSXSAVE_CPUID_MASK)
  372. return best;
  373. /* CPUID check for AVX features */
  374. XXH_cpuid(7, 0, abcd);
  375. xgetbv_val = XXH_xgetbv();
  376. #if XXH_DISPATCH_AVX2
  377. /* Validate that AVX2 is supported by the CPU */
  378. if ((abcd[1] & XXH_AVX2_CPUID_MASK) != XXH_AVX2_CPUID_MASK)
  379. return best;
  380. /* Validate that the OS supports YMM registers */
  381. if ((xgetbv_val & XXH_AVX2_XGETBV_MASK) != XXH_AVX2_XGETBV_MASK) {
  382. XXH_debugPrint("AVX2 supported by the CPU, but not the OS.");
  383. return best;
  384. }
  385. /* AVX2 supported */
  386. XXH_debugPrint("AVX2 support detected.");
  387. best = XXH_AVX2;
  388. #endif
  389. #if XXH_DISPATCH_AVX512
  390. /* Check if AVX512F is supported by the CPU */
  391. if ((abcd[1] & XXH_AVX512F_CPUID_MASK) != XXH_AVX512F_CPUID_MASK) {
  392. XXH_debugPrint("AVX512F not supported by CPU");
  393. return best;
  394. }
  395. /* Validate that the OS supports ZMM registers */
  396. if ((xgetbv_val & XXH_AVX512F_XGETBV_MASK) != XXH_AVX512F_XGETBV_MASK) {
  397. XXH_debugPrint("AVX512F supported by the CPU, but not the OS.");
  398. return best;
  399. }
  400. /* AVX512F supported */
  401. XXH_debugPrint("AVX512F support detected.");
  402. best = XXH_AVX512;
  403. #endif
  404. #endif
  405. return best;
  406. }
  407. /* === Vector implementations === */
  408. /*!
  409. * @internal
  410. * @brief Defines the various dispatch functions.
  411. *
  412. * TODO: Consolidate?
  413. *
  414. * @param suffix The suffix for the functions, e.g. sse2 or scalar
  415. * @param target XXH_TARGET_* or empty.
  416. */
  417. #define XXH_DEFINE_DISPATCH_FUNCS(suffix, target) \
  418. \
  419. /* === XXH3, default variants === */ \
  420. \
  421. XXH_NO_INLINE target XXH64_hash_t \
  422. XXHL64_default_##suffix(const void* XXH_RESTRICT input, size_t len) \
  423. { \
  424. return XXH3_hashLong_64b_internal( \
  425. input, len, XXH3_kSecret, sizeof(XXH3_kSecret), \
  426. XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix \
  427. ); \
  428. } \
  429. \
  430. /* === XXH3, Seeded variants === */ \
  431. \
  432. XXH_NO_INLINE target XXH64_hash_t \
  433. XXHL64_seed_##suffix(const void* XXH_RESTRICT input, size_t len, \
  434. XXH64_hash_t seed) \
  435. { \
  436. return XXH3_hashLong_64b_withSeed_internal( \
  437. input, len, seed, XXH3_accumulate_512_##suffix, \
  438. XXH3_scrambleAcc_##suffix, XXH3_initCustomSecret_##suffix \
  439. ); \
  440. } \
  441. \
  442. /* === XXH3, Secret variants === */ \
  443. \
  444. XXH_NO_INLINE target XXH64_hash_t \
  445. XXHL64_secret_##suffix(const void* XXH_RESTRICT input, size_t len, \
  446. const void* secret, size_t secretLen) \
  447. { \
  448. return XXH3_hashLong_64b_internal( \
  449. input, len, secret, secretLen, \
  450. XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix \
  451. ); \
  452. } \
  453. \
  454. /* === XXH3 update variants === */ \
  455. \
  456. XXH_NO_INLINE target XXH_errorcode \
  457. XXH3_update_##suffix(XXH3_state_t* state, const void* input, size_t len) \
  458. { \
  459. return XXH3_update(state, (const xxh_u8*)input, len, \
  460. XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix); \
  461. } \
  462. \
  463. /* === XXH128 default variants === */ \
  464. \
  465. XXH_NO_INLINE target XXH128_hash_t \
  466. XXHL128_default_##suffix(const void* XXH_RESTRICT input, size_t len) \
  467. { \
  468. return XXH3_hashLong_128b_internal( \
  469. input, len, XXH3_kSecret, sizeof(XXH3_kSecret), \
  470. XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix \
  471. ); \
  472. } \
  473. \
  474. /* === XXH128 Secret variants === */ \
  475. \
  476. XXH_NO_INLINE target XXH128_hash_t \
  477. XXHL128_secret_##suffix(const void* XXH_RESTRICT input, size_t len, \
  478. const void* XXH_RESTRICT secret, size_t secretLen) \
  479. { \
  480. return XXH3_hashLong_128b_internal( \
  481. input, len, (const xxh_u8*)secret, secretLen, \
  482. XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix); \
  483. } \
  484. \
  485. /* === XXH128 Seeded variants === */ \
  486. \
  487. XXH_NO_INLINE target XXH128_hash_t \
  488. XXHL128_seed_##suffix(const void* XXH_RESTRICT input, size_t len, \
  489. XXH64_hash_t seed) \
  490. { \
  491. return XXH3_hashLong_128b_withSeed_internal(input, len, seed, \
  492. XXH3_accumulate_512_##suffix, XXH3_scrambleAcc_##suffix, \
  493. XXH3_initCustomSecret_##suffix); \
  494. }
  495. /* End XXH_DEFINE_DISPATCH_FUNCS */
  496. #if XXH_DISPATCH_SCALAR
  497. XXH_DEFINE_DISPATCH_FUNCS(scalar, /* nothing */)
  498. #endif
  499. XXH_DEFINE_DISPATCH_FUNCS(sse2, XXH_TARGET_SSE2)
  500. #if XXH_DISPATCH_AVX2
  501. XXH_DEFINE_DISPATCH_FUNCS(avx2, XXH_TARGET_AVX2)
  502. #endif
  503. #if XXH_DISPATCH_AVX512
  504. XXH_DEFINE_DISPATCH_FUNCS(avx512, XXH_TARGET_AVX512)
  505. #endif
  506. #undef XXH_DEFINE_DISPATCH_FUNCS
  507. /* ==== Dispatchers ==== */
  508. typedef XXH64_hash_t (*XXH3_dispatchx86_hashLong64_default)(const void* XXH_RESTRICT, size_t);
  509. typedef XXH64_hash_t (*XXH3_dispatchx86_hashLong64_withSeed)(const void* XXH_RESTRICT, size_t, XXH64_hash_t);
  510. typedef XXH64_hash_t (*XXH3_dispatchx86_hashLong64_withSecret)(const void* XXH_RESTRICT, size_t, const void* XXH_RESTRICT, size_t);
  511. typedef XXH_errorcode (*XXH3_dispatchx86_update)(XXH3_state_t*, const void*, size_t);
  512. typedef struct {
  513. XXH3_dispatchx86_hashLong64_default hashLong64_default;
  514. XXH3_dispatchx86_hashLong64_withSeed hashLong64_seed;
  515. XXH3_dispatchx86_hashLong64_withSecret hashLong64_secret;
  516. XXH3_dispatchx86_update update;
  517. } XXH_dispatchFunctions_s;
  518. #define XXH_NB_DISPATCHES 4
  519. /*!
  520. * @internal
  521. * @brief Table of dispatchers for @ref XXH3_64bits().
  522. *
  523. * @pre The indices must match @ref XXH_VECTOR_TYPE.
  524. */
  525. static const XXH_dispatchFunctions_s XXH_kDispatch[XXH_NB_DISPATCHES] = {
  526. #if XXH_DISPATCH_SCALAR
  527. /* Scalar */ { XXHL64_default_scalar, XXHL64_seed_scalar, XXHL64_secret_scalar, XXH3_update_scalar },
  528. #else
  529. /* Scalar */ { NULL, NULL, NULL, NULL },
  530. #endif
  531. /* SSE2 */ { XXHL64_default_sse2, XXHL64_seed_sse2, XXHL64_secret_sse2, XXH3_update_sse2 },
  532. #if XXH_DISPATCH_AVX2
  533. /* AVX2 */ { XXHL64_default_avx2, XXHL64_seed_avx2, XXHL64_secret_avx2, XXH3_update_avx2 },
  534. #else
  535. /* AVX2 */ { NULL, NULL, NULL, NULL },
  536. #endif
  537. #if XXH_DISPATCH_AVX512
  538. /* AVX512 */ { XXHL64_default_avx512, XXHL64_seed_avx512, XXHL64_secret_avx512, XXH3_update_avx512 }
  539. #else
  540. /* AVX512 */ { NULL, NULL, NULL, NULL }
  541. #endif
  542. };
  543. /*!
  544. * @internal
  545. * @brief The selected dispatch table for @ref XXH3_64bits().
  546. */
  547. static XXH_dispatchFunctions_s XXH_g_dispatch = { NULL, NULL, NULL, NULL };
  548. typedef XXH128_hash_t (*XXH3_dispatchx86_hashLong128_default)(const void* XXH_RESTRICT, size_t);
  549. typedef XXH128_hash_t (*XXH3_dispatchx86_hashLong128_withSeed)(const void* XXH_RESTRICT, size_t, XXH64_hash_t);
  550. typedef XXH128_hash_t (*XXH3_dispatchx86_hashLong128_withSecret)(const void* XXH_RESTRICT, size_t, const void* XXH_RESTRICT, size_t);
  551. typedef struct {
  552. XXH3_dispatchx86_hashLong128_default hashLong128_default;
  553. XXH3_dispatchx86_hashLong128_withSeed hashLong128_seed;
  554. XXH3_dispatchx86_hashLong128_withSecret hashLong128_secret;
  555. XXH3_dispatchx86_update update;
  556. } XXH_dispatch128Functions_s;
  557. /*!
  558. * @internal
  559. * @brief Table of dispatchers for @ref XXH3_128bits().
  560. *
  561. * @pre The indices must match @ref XXH_VECTOR_TYPE.
  562. */
  563. static const XXH_dispatch128Functions_s XXH_kDispatch128[XXH_NB_DISPATCHES] = {
  564. #if XXH_DISPATCH_SCALAR
  565. /* Scalar */ { XXHL128_default_scalar, XXHL128_seed_scalar, XXHL128_secret_scalar, XXH3_update_scalar },
  566. #else
  567. /* Scalar */ { NULL, NULL, NULL, NULL },
  568. #endif
  569. /* SSE2 */ { XXHL128_default_sse2, XXHL128_seed_sse2, XXHL128_secret_sse2, XXH3_update_sse2 },
  570. #if XXH_DISPATCH_AVX2
  571. /* AVX2 */ { XXHL128_default_avx2, XXHL128_seed_avx2, XXHL128_secret_avx2, XXH3_update_avx2 },
  572. #else
  573. /* AVX2 */ { NULL, NULL, NULL, NULL },
  574. #endif
  575. #if XXH_DISPATCH_AVX512
  576. /* AVX512 */ { XXHL128_default_avx512, XXHL128_seed_avx512, XXHL128_secret_avx512, XXH3_update_avx512 }
  577. #else
  578. /* AVX512 */ { NULL, NULL, NULL, NULL }
  579. #endif
  580. };
  581. /*!
  582. * @internal
  583. * @brief The selected dispatch table for @ref XXH3_64bits().
  584. */
  585. static XXH_dispatch128Functions_s XXH_g_dispatch128 = { NULL, NULL, NULL, NULL };
  586. /*!
  587. * @internal
  588. * @brief Runs a CPUID check and sets the correct dispatch tables.
  589. */
  590. static void XXH_setDispatch(void)
  591. {
  592. int vecID = XXH_featureTest();
  593. XXH_STATIC_ASSERT(XXH_AVX512 == XXH_NB_DISPATCHES-1);
  594. assert(XXH_SCALAR <= vecID && vecID <= XXH_AVX512);
  595. #if !XXH_DISPATCH_SCALAR
  596. assert(vecID != XXH_SCALAR);
  597. #endif
  598. #if !XXH_DISPATCH_AVX512
  599. assert(vecID != XXH_AVX512);
  600. #endif
  601. #if !XXH_DISPATCH_AVX2
  602. assert(vecID != XXH_AVX2);
  603. #endif
  604. XXH_g_dispatch = XXH_kDispatch[vecID];
  605. XXH_g_dispatch128 = XXH_kDispatch128[vecID];
  606. }
  607. /* ==== XXH3 public functions ==== */
  608. static XXH64_hash_t
  609. XXH3_hashLong_64b_defaultSecret_selection(const void* input, size_t len,
  610. XXH64_hash_t seed64, const xxh_u8* secret, size_t secretLen)
  611. {
  612. (void)seed64; (void)secret; (void)secretLen;
  613. if (XXH_g_dispatch.hashLong64_default == NULL) XXH_setDispatch();
  614. return XXH_g_dispatch.hashLong64_default(input, len);
  615. }
  616. XXH64_hash_t XXH3_64bits_dispatch(const void* input, size_t len)
  617. {
  618. return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_defaultSecret_selection);
  619. }
  620. static XXH64_hash_t
  621. XXH3_hashLong_64b_withSeed_selection(const void* input, size_t len,
  622. XXH64_hash_t seed64, const xxh_u8* secret, size_t secretLen)
  623. {
  624. (void)secret; (void)secretLen;
  625. if (XXH_g_dispatch.hashLong64_seed == NULL) XXH_setDispatch();
  626. return XXH_g_dispatch.hashLong64_seed(input, len, seed64);
  627. }
  628. XXH64_hash_t XXH3_64bits_withSeed_dispatch(const void* input, size_t len, XXH64_hash_t seed)
  629. {
  630. return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed_selection);
  631. }
  632. static XXH64_hash_t
  633. XXH3_hashLong_64b_withSecret_selection(const void* input, size_t len,
  634. XXH64_hash_t seed64, const xxh_u8* secret, size_t secretLen)
  635. {
  636. (void)seed64;
  637. if (XXH_g_dispatch.hashLong64_secret == NULL) XXH_setDispatch();
  638. return XXH_g_dispatch.hashLong64_secret(input, len, secret, secretLen);
  639. }
  640. XXH64_hash_t XXH3_64bits_withSecret_dispatch(const void* input, size_t len, const void* secret, size_t secretLen)
  641. {
  642. return XXH3_64bits_internal(input, len, 0, secret, secretLen, XXH3_hashLong_64b_withSecret_selection);
  643. }
  644. XXH_errorcode
  645. XXH3_64bits_update_dispatch(XXH3_state_t* state, const void* input, size_t len)
  646. {
  647. if (XXH_g_dispatch.update == NULL) XXH_setDispatch();
  648. return XXH_g_dispatch.update(state, (const xxh_u8*)input, len);
  649. }
  650. /* ==== XXH128 public functions ==== */
  651. static XXH128_hash_t
  652. XXH3_hashLong_128b_defaultSecret_selection(const void* input, size_t len,
  653. XXH64_hash_t seed64, const void* secret, size_t secretLen)
  654. {
  655. (void)seed64; (void)secret; (void)secretLen;
  656. if (XXH_g_dispatch128.hashLong128_default == NULL) XXH_setDispatch();
  657. return XXH_g_dispatch128.hashLong128_default(input, len);
  658. }
  659. XXH128_hash_t XXH3_128bits_dispatch(const void* input, size_t len)
  660. {
  661. return XXH3_128bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_128b_defaultSecret_selection);
  662. }
  663. static XXH128_hash_t
  664. XXH3_hashLong_128b_withSeed_selection(const void* input, size_t len,
  665. XXH64_hash_t seed64, const void* secret, size_t secretLen)
  666. {
  667. (void)secret; (void)secretLen;
  668. if (XXH_g_dispatch128.hashLong128_seed == NULL) XXH_setDispatch();
  669. return XXH_g_dispatch128.hashLong128_seed(input, len, seed64);
  670. }
  671. XXH128_hash_t XXH3_128bits_withSeed_dispatch(const void* input, size_t len, XXH64_hash_t seed)
  672. {
  673. return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_128b_withSeed_selection);
  674. }
  675. static XXH128_hash_t
  676. XXH3_hashLong_128b_withSecret_selection(const void* input, size_t len,
  677. XXH64_hash_t seed64, const void* secret, size_t secretLen)
  678. {
  679. (void)seed64;
  680. if (XXH_g_dispatch128.hashLong128_secret == NULL) XXH_setDispatch();
  681. return XXH_g_dispatch128.hashLong128_secret(input, len, secret, secretLen);
  682. }
  683. XXH128_hash_t XXH3_128bits_withSecret_dispatch(const void* input, size_t len, const void* secret, size_t secretLen)
  684. {
  685. return XXH3_128bits_internal(input, len, 0, secret, secretLen, XXH3_hashLong_128b_withSecret_selection);
  686. }
  687. XXH_errorcode
  688. XXH3_128bits_update_dispatch(XXH3_state_t* state, const void* input, size_t len)
  689. {
  690. if (XXH_g_dispatch128.update == NULL) XXH_setDispatch();
  691. return XXH_g_dispatch128.update(state, (const xxh_u8*)input, len);
  692. }
  693. #if defined (__cplusplus)
  694. }
  695. #endif
  696. /*! @} */