-
Notifications
You must be signed in to change notification settings - Fork 211
/
sse2neon.h
9408 lines (8528 loc) · 399 KB
/
sse2neon.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#ifndef SSE2NEON_H
#define SSE2NEON_H
/*
* sse2neon is freely redistributable under the MIT License.
*
* Copyright (c) 2015-2024 SSE2NEON Contributors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// This header file provides a simple API translation layer
// between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
//
// Contributors to this work are:
// John W. Ratcliff <[email protected]>
// Brandon Rowlett <[email protected]>
// Ken Fast <[email protected]>
// Eric van Beurden <[email protected]>
// Alexander Potylitsin <[email protected]>
// Hasindu Gamaarachchi <[email protected]>
// Jim Huang <[email protected]>
// Mark Cheng <[email protected]>
// Malcolm James MacLeod <[email protected]>
// Devin Hussey (easyaspi314) <[email protected]>
// Sebastian Pop <[email protected]>
// Developer Ecosystem Engineering <[email protected]>
// Danila Kutenin <[email protected]>
// François Turban (JishinMaster) <[email protected]>
// Pei-Hsuan Hung <[email protected]>
// Yang-Hao Yuan <[email protected]>
// Syoyo Fujita <[email protected]>
// Brecht Van Lommel <[email protected]>
// Jonathan Hue <[email protected]>
// Cuda Chen <[email protected]>
// Aymen Qader <[email protected]>
// Anthony Roberts <[email protected]>
/* Tunable configurations */
/* Enable precise implementation of math operations
* This would slow down the computation a bit, but gives consistent result with
* x86 SSE. (e.g. would solve a hole or NaN pixel in the rendering result)
*/
/* _mm_min|max_ps|ss|pd|sd */
#ifndef SSE2NEON_PRECISE_MINMAX
#define SSE2NEON_PRECISE_MINMAX (0)
#endif
/* _mm_rcp_ps */
#ifndef SSE2NEON_PRECISE_DIV
#define SSE2NEON_PRECISE_DIV (0)
#endif
/* _mm_sqrt_ps and _mm_rsqrt_ps */
#ifndef SSE2NEON_PRECISE_SQRT
#define SSE2NEON_PRECISE_SQRT (0)
#endif
/* _mm_dp_pd */
#ifndef SSE2NEON_PRECISE_DP
#define SSE2NEON_PRECISE_DP (0)
#endif
/* Enable inclusion of windows.h on MSVC platforms
* This makes _mm_clflush functional on windows, as there is no builtin.
*/
#ifndef SSE2NEON_INCLUDE_WINDOWS_H
#define SSE2NEON_INCLUDE_WINDOWS_H (0)
#endif
/* compiler specific definitions */
#if defined(__GNUC__) || defined(__clang__)
#pragma push_macro("FORCE_INLINE")
#pragma push_macro("ALIGN_STRUCT")
#define FORCE_INLINE static inline __attribute__((always_inline))
#define ALIGN_STRUCT(x) __attribute__((aligned(x)))
#define _sse2neon_likely(x) __builtin_expect(!!(x), 1)
#define _sse2neon_unlikely(x) __builtin_expect(!!(x), 0)
#elif defined(_MSC_VER)
#if _MSVC_TRADITIONAL
#error Using the traditional MSVC preprocessor is not supported! Use /Zc:preprocessor instead.
#endif
#ifndef FORCE_INLINE
#define FORCE_INLINE static inline
#endif
#ifndef ALIGN_STRUCT
#define ALIGN_STRUCT(x) __declspec(align(x))
#endif
#define _sse2neon_likely(x) (x)
#define _sse2neon_unlikely(x) (x)
#else
#pragma message("Macro name collisions may happen with unsupported compilers.")
#endif
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 10
#warning "GCC versions earlier than 10 are not supported."
#endif
#if defined(__OPTIMIZE__) && !defined(SSE2NEON_SUPPRESS_WARNINGS)
#warning \
"Report any potential compiler optimization issues when using SSE2NEON. See the 'Optimization' section at https://github.com/DLTcollab/sse2neon."
#endif
/* C language does not allow initializing a variable with a function call. */
#ifdef __cplusplus
#define _sse2neon_const static const
#else
#define _sse2neon_const const
#endif
#include <fenv.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
FORCE_INLINE double sse2neon_recast_u64_f64(uint64_t u64)
{
double f64;
memcpy(&f64, &u64, sizeof(uint64_t));
return f64;
}
FORCE_INLINE int64_t sse2neon_recast_f64_s64(double f64)
{
int64_t i64;
memcpy(&i64, &f64, sizeof(uint64_t));
return i64;
}
#if defined(_WIN32)
/* Definitions for _mm_{malloc,free} are provided by <malloc.h>
* from both MinGW-w64 and MSVC.
*/
#define SSE2NEON_ALLOC_DEFINED
#endif
/* If using MSVC */
#ifdef _MSC_VER
#include <intrin.h>
#if SSE2NEON_INCLUDE_WINDOWS_H
#include <processthreadsapi.h>
#include <windows.h>
#endif
#if !defined(__cplusplus)
#error SSE2NEON only supports C++ compilation with this compiler
#endif
#ifdef SSE2NEON_ALLOC_DEFINED
#include <malloc.h>
#endif
#if (defined(_M_AMD64) || defined(__x86_64__)) || \
(defined(_M_ARM64) || defined(__arm64__))
#define SSE2NEON_HAS_BITSCAN64
#endif
#endif
#if defined(__GNUC__) || defined(__clang__)
#define _sse2neon_define0(type, s, body) \
__extension__({ \
type _a = (s); \
body \
})
#define _sse2neon_define1(type, s, body) \
__extension__({ \
type _a = (s); \
body \
})
#define _sse2neon_define2(type, a, b, body) \
__extension__({ \
type _a = (a), _b = (b); \
body \
})
#define _sse2neon_return(ret) (ret)
#else
#define _sse2neon_define0(type, a, body) [=](type _a) { body }(a)
#define _sse2neon_define1(type, a, body) [](type _a) { body }(a)
#define _sse2neon_define2(type, a, b, body) \
[](type _a, type _b) { body }((a), (b))
#define _sse2neon_return(ret) return ret
#endif
#define _sse2neon_init(...) \
{ \
__VA_ARGS__ \
}
/* Compiler barrier */
#if defined(_MSC_VER) && !defined(__clang__)
#define SSE2NEON_BARRIER() _ReadWriteBarrier()
#else
#define SSE2NEON_BARRIER() \
do { \
__asm__ __volatile__("" ::: "memory"); \
(void) 0; \
} while (0)
#endif
/* Memory barriers
* __atomic_thread_fence does not include a compiler barrier; instead,
* the barrier is part of __atomic_load/__atomic_store's "volatile-like"
* semantics.
*/
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
#include <stdatomic.h>
#endif
FORCE_INLINE void _sse2neon_smp_mb(void)
{
SSE2NEON_BARRIER();
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
!defined(__STDC_NO_ATOMICS__)
atomic_thread_fence(memory_order_seq_cst);
#elif defined(__GNUC__) || defined(__clang__)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else /* MSVC */
__dmb(_ARM64_BARRIER_ISH);
#endif
}
/* Architecture-specific build options */
/* FIXME: #pragma GCC push_options is only available on GCC */
#if defined(__GNUC__)
#if defined(__arm__) && __ARM_ARCH == 7
/* According to ARM C Language Extensions Architecture specification,
* __ARM_NEON is defined to a value indicating the Advanced SIMD (NEON)
* architecture supported.
*/
#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
#error "You must enable NEON instructions (e.g. -mfpu=neon) to use SSE2NEON."
#endif
#if !defined(__clang__)
#pragma GCC push_options
#pragma GCC target("fpu=neon")
#endif
#elif defined(__aarch64__) || defined(_M_ARM64)
#if !defined(__clang__) && !defined(_MSC_VER)
#pragma GCC push_options
#pragma GCC target("+simd")
#endif
#elif __ARM_ARCH == 8
#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
#error \
"You must enable NEON instructions (e.g. -mfpu=neon-fp-armv8) to use SSE2NEON."
#endif
#if !defined(__clang__) && !defined(_MSC_VER)
#pragma GCC push_options
#endif
#else
#error \
"Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A \
(you could try setting target explicitly with -march or -mcpu)"
#endif
#endif
#include <arm_neon.h>
#if (!defined(__aarch64__) && !defined(_M_ARM64)) && (__ARM_ARCH == 8)
#if defined __has_include && __has_include(<arm_acle.h>)
#include <arm_acle.h>
#endif
#endif
/* Apple Silicon cache lines are double of what is commonly used by Intel, AMD
* and other Arm microarchitectures use.
* From sysctl -a on Apple M1:
* hw.cachelinesize: 128
*/
#if defined(__APPLE__) && (defined(__aarch64__) || defined(__arm64__))
#define SSE2NEON_CACHELINE_SIZE 128
#else
#define SSE2NEON_CACHELINE_SIZE 64
#endif
/* Rounding functions require either Aarch64 instructions or libm fallback */
#if !defined(__aarch64__) && !defined(_M_ARM64)
#include <math.h>
#endif
/* On ARMv7, some registers, such as PMUSERENR and PMCCNTR, are read-only
* or even not accessible in user mode.
* To write or access to these registers in user mode,
* we have to perform syscall instead.
*/
#if (!defined(__aarch64__) && !defined(_M_ARM64))
#include <sys/time.h>
#endif
/* "__has_builtin" can be used to query support for built-in functions
* provided by gcc/clang and other compilers that support it.
*/
#ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
/* Compatibility with gcc <= 9 */
#if defined(__GNUC__) && (__GNUC__ <= 9)
#define __has_builtin(x) HAS##x
#define HAS__builtin_popcount 1
#define HAS__builtin_popcountll 1
// __builtin_shuffle introduced in GCC 4.7.0
#if (__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))
#define HAS__builtin_shuffle 1
#else
#define HAS__builtin_shuffle 0
#endif
#define HAS__builtin_shufflevector 0
#define HAS__builtin_nontemporal_store 0
#else
#define __has_builtin(x) 0
#endif
#endif
/**
* MACRO for shuffle parameter for _mm_shuffle_ps().
* Argument fp3 is a digit[0123] that represents the fp from argument "b"
* of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
* for fp2 in result. fp1 is a digit[0123] that represents the fp from
* argument "a" of mm_shuffle_ps that will be places in fp1 of result.
* fp0 is the same for fp0 of result.
*/
#define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
(((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
/**
* MACRO for shuffle parameter for _mm_shuffle_pd().
* Argument fp1 is a digit[01] that represents the fp from argument "b"
* of mm_shuffle_pd that will be placed in fp1 of result.
* fp0 is a digit[01] that represents the fp from argument "a" of mm_shuffle_pd
* that will be placed in fp0 of result.
*/
#define _MM_SHUFFLE2(fp1, fp0) (((fp1) << 1) | (fp0))
#if __has_builtin(__builtin_shufflevector)
#define _sse2neon_shuffle(type, a, b, ...) \
__builtin_shufflevector(a, b, __VA_ARGS__)
#elif __has_builtin(__builtin_shuffle)
#define _sse2neon_shuffle(type, a, b, ...) \
__extension__({ \
type tmp = {__VA_ARGS__}; \
__builtin_shuffle(a, b, tmp); \
})
#endif
#ifdef _sse2neon_shuffle
#define vshuffle_s16(a, b, ...) _sse2neon_shuffle(int16x4_t, a, b, __VA_ARGS__)
#define vshuffleq_s16(a, b, ...) _sse2neon_shuffle(int16x8_t, a, b, __VA_ARGS__)
#define vshuffle_s32(a, b, ...) _sse2neon_shuffle(int32x2_t, a, b, __VA_ARGS__)
#define vshuffleq_s32(a, b, ...) _sse2neon_shuffle(int32x4_t, a, b, __VA_ARGS__)
#define vshuffle_s64(a, b, ...) _sse2neon_shuffle(int64x1_t, a, b, __VA_ARGS__)
#define vshuffleq_s64(a, b, ...) _sse2neon_shuffle(int64x2_t, a, b, __VA_ARGS__)
#endif
/* Rounding mode macros. */
#define _MM_FROUND_TO_NEAREST_INT 0x00
#define _MM_FROUND_TO_NEG_INF 0x01
#define _MM_FROUND_TO_POS_INF 0x02
#define _MM_FROUND_TO_ZERO 0x03
#define _MM_FROUND_CUR_DIRECTION 0x04
#define _MM_FROUND_NO_EXC 0x08
#define _MM_FROUND_RAISE_EXC 0x00
#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
#define _MM_ROUND_NEAREST 0x0000
#define _MM_ROUND_DOWN 0x2000
#define _MM_ROUND_UP 0x4000
#define _MM_ROUND_TOWARD_ZERO 0x6000
/* Flush zero mode macros. */
#define _MM_FLUSH_ZERO_MASK 0x8000
#define _MM_FLUSH_ZERO_ON 0x8000
#define _MM_FLUSH_ZERO_OFF 0x0000
/* Denormals are zeros mode macros. */
#define _MM_DENORMALS_ZERO_MASK 0x0040
#define _MM_DENORMALS_ZERO_ON 0x0040
#define _MM_DENORMALS_ZERO_OFF 0x0000
/* indicate immediate constant argument in a given range */
#define __constrange(a, b) const
/* A few intrinsics accept traditional data types like ints or floats, but
* most operate on data types that are specific to SSE.
* If a vector type ends in d, it contains doubles, and if it does not have
* a suffix, it contains floats. An integer vector type can contain any type
* of integer, from chars to shorts to unsigned long longs.
*/
typedef int64x1_t __m64;
typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
// On ARM 32-bit architecture, the float64x2_t is not supported.
// The data type __m128d should be represented in a different way for related
// intrinsic conversion.
#if defined(__aarch64__) || defined(_M_ARM64)
typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
#else
typedef float32x4_t __m128d;
#endif
typedef int64x2_t __m128i; /* 128-bit vector containing integers */
// Some intrinsics operate on unaligned data types.
typedef int16_t ALIGN_STRUCT(1) unaligned_int16_t;
typedef int32_t ALIGN_STRUCT(1) unaligned_int32_t;
typedef int64_t ALIGN_STRUCT(1) unaligned_int64_t;
// __int64 is defined in the Intrinsics Guide which maps to different datatype
// in different data model
#if !(defined(_WIN32) || defined(_WIN64) || defined(__int64))
#if (defined(__x86_64__) || defined(__i386__))
#define __int64 long long
#else
#define __int64 int64_t
#endif
#endif
/* type-safe casting between types */
#define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
#define vreinterpretq_m128_f32(x) (x)
#define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
#define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
#define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
#define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
#define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
#define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
#define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
#define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
#define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
#define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
#define vreinterpretq_f32_m128(x) (x)
#define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
#define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
#define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
#define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
#define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
#define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
#define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
#define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
#define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
#define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
#define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
#define vreinterpretq_m128i_s64(x) (x)
#define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
#define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
#define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
#define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
#define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x)
#define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x)
#define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
#define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
#define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
#define vreinterpretq_s64_m128i(x) (x)
#define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
#define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
#define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
#define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
#define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
#define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x)
#define vreinterpret_m64_s32(x) vreinterpret_s64_s32(x)
#define vreinterpret_m64_s64(x) (x)
#define vreinterpret_m64_u8(x) vreinterpret_s64_u8(x)
#define vreinterpret_m64_u16(x) vreinterpret_s64_u16(x)
#define vreinterpret_m64_u32(x) vreinterpret_s64_u32(x)
#define vreinterpret_m64_u64(x) vreinterpret_s64_u64(x)
#define vreinterpret_m64_f16(x) vreinterpret_s64_f16(x)
#define vreinterpret_m64_f32(x) vreinterpret_s64_f32(x)
#define vreinterpret_m64_f64(x) vreinterpret_s64_f64(x)
#define vreinterpret_u8_m64(x) vreinterpret_u8_s64(x)
#define vreinterpret_u16_m64(x) vreinterpret_u16_s64(x)
#define vreinterpret_u32_m64(x) vreinterpret_u32_s64(x)
#define vreinterpret_u64_m64(x) vreinterpret_u64_s64(x)
#define vreinterpret_s8_m64(x) vreinterpret_s8_s64(x)
#define vreinterpret_s16_m64(x) vreinterpret_s16_s64(x)
#define vreinterpret_s32_m64(x) vreinterpret_s32_s64(x)
#define vreinterpret_s64_m64(x) (x)
#define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x)
#if defined(__aarch64__) || defined(_M_ARM64)
#define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x)
#define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x)
#define vreinterpretq_m128d_u64(x) vreinterpretq_f64_u64(x)
#define vreinterpretq_m128d_f32(x) vreinterpretq_f64_f32(x)
#define vreinterpretq_m128d_f64(x) (x)
#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x)
#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f64(x)
#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x)
#define vreinterpretq_f64_m128d(x) (x)
#define vreinterpretq_f32_m128d(x) vreinterpretq_f32_f64(x)
#else
#define vreinterpretq_m128d_s32(x) vreinterpretq_f32_s32(x)
#define vreinterpretq_m128d_s64(x) vreinterpretq_f32_s64(x)
#define vreinterpretq_m128d_u32(x) vreinterpretq_f32_u32(x)
#define vreinterpretq_m128d_u64(x) vreinterpretq_f32_u64(x)
#define vreinterpretq_m128d_f32(x) (x)
#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f32(x)
#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f32(x)
#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f32(x)
#define vreinterpretq_f32_m128d(x) (x)
#endif
// A struct is defined in this header file called 'SIMDVec' which can be used
// by applications which attempt to access the contents of an __m128 struct
// directly. It is important to note that accessing the __m128 struct directly
// is bad coding practice by Microsoft: @see:
// https://learn.microsoft.com/en-us/cpp/cpp/m128
//
// However, some legacy source code may try to access the contents of an __m128
// struct directly so the developer can use the SIMDVec as an alias for it. Any
// casting must be done manually by the developer, as you cannot cast or
// otherwise alias the base NEON data type for intrinsic operations.
//
// union intended to allow direct access to an __m128 variable using the names
// that the MSVC compiler provides. This union should really only be used when
// trying to access the members of the vector as integer values. GCC/clang
// allow native access to the float members through a simple array access
// operator (in C since 4.6, in C++ since 4.8).
//
// Ideally direct accesses to SIMD vectors should not be used since it can cause
// a performance hit. If it really is needed however, the original __m128
// variable can be aliased with a pointer to this union and used to access
// individual components. The use of this union should be hidden behind a macro
// that is used throughout the codebase to access the members instead of always
// declaring this type of variable.
typedef union ALIGN_STRUCT(16) SIMDVec {
float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
int8_t m128_i8[16]; // as signed 8-bit integers.
int16_t m128_i16[8]; // as signed 16-bit integers.
int32_t m128_i32[4]; // as signed 32-bit integers.
int64_t m128_i64[2]; // as signed 64-bit integers.
uint8_t m128_u8[16]; // as unsigned 8-bit integers.
uint16_t m128_u16[8]; // as unsigned 16-bit integers.
uint32_t m128_u32[4]; // as unsigned 32-bit integers.
uint64_t m128_u64[2]; // as unsigned 64-bit integers.
} SIMDVec;
// casting using SIMDVec
#define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *) &x)->m128_u64[n])
#define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n])
#define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n])
/* SSE macros */
#define _MM_GET_FLUSH_ZERO_MODE _sse2neon_mm_get_flush_zero_mode
#define _MM_SET_FLUSH_ZERO_MODE _sse2neon_mm_set_flush_zero_mode
#define _MM_GET_DENORMALS_ZERO_MODE _sse2neon_mm_get_denormals_zero_mode
#define _MM_SET_DENORMALS_ZERO_MODE _sse2neon_mm_set_denormals_zero_mode
// Function declaration
// SSE
FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(void);
FORCE_INLINE __m128 _mm_move_ss(__m128, __m128);
FORCE_INLINE __m128 _mm_or_ps(__m128, __m128);
FORCE_INLINE __m128 _mm_set_ps1(float);
FORCE_INLINE __m128 _mm_setzero_ps(void);
// SSE2
FORCE_INLINE __m128i _mm_and_si128(__m128i, __m128i);
FORCE_INLINE __m128i _mm_castps_si128(__m128);
FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i, __m128i);
FORCE_INLINE __m128i _mm_cvtps_epi32(__m128);
FORCE_INLINE __m128d _mm_move_sd(__m128d, __m128d);
FORCE_INLINE __m128i _mm_or_si128(__m128i, __m128i);
FORCE_INLINE __m128i _mm_set_epi32(int, int, int, int);
FORCE_INLINE __m128i _mm_set_epi64x(int64_t, int64_t);
FORCE_INLINE __m128d _mm_set_pd(double, double);
FORCE_INLINE __m128i _mm_set1_epi32(int);
FORCE_INLINE __m128i _mm_setzero_si128(void);
// SSE4.1
FORCE_INLINE __m128d _mm_ceil_pd(__m128d);
FORCE_INLINE __m128 _mm_ceil_ps(__m128);
FORCE_INLINE __m128d _mm_floor_pd(__m128d);
FORCE_INLINE __m128 _mm_floor_ps(__m128);
FORCE_INLINE __m128d _mm_round_pd(__m128d, int);
FORCE_INLINE __m128 _mm_round_ps(__m128, int);
// SSE4.2
FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t, uint8_t);
/* Backwards compatibility for compilers with lack of specific type support */
// Older gcc does not define vld1q_u8_x4 type
#if defined(__GNUC__) && !defined(__clang__) && \
((__GNUC__ <= 13 && defined(__arm__)) || \
(__GNUC__ == 10 && __GNUC_MINOR__ < 3 && defined(__aarch64__)) || \
(__GNUC__ <= 9 && defined(__aarch64__)))
FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
{
uint8x16x4_t ret;
ret.val[0] = vld1q_u8(p + 0);
ret.val[1] = vld1q_u8(p + 16);
ret.val[2] = vld1q_u8(p + 32);
ret.val[3] = vld1q_u8(p + 48);
return ret;
}
#else
// Wraps vld1q_u8_x4
FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
{
return vld1q_u8_x4(p);
}
#endif
#if !defined(__aarch64__) && !defined(_M_ARM64)
/* emulate vaddv u8 variant */
FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
{
const uint64x1_t v1 = vpaddl_u32(vpaddl_u16(vpaddl_u8(v8)));
return vget_lane_u8(vreinterpret_u8_u64(v1), 0);
}
#else
// Wraps vaddv_u8
FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
{
return vaddv_u8(v8);
}
#endif
#if !defined(__aarch64__) && !defined(_M_ARM64)
/* emulate vaddvq u8 variant */
FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
{
uint8x8_t tmp = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
uint8_t res = 0;
for (int i = 0; i < 8; ++i)
res += tmp[i];
return res;
}
#else
// Wraps vaddvq_u8
FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
{
return vaddvq_u8(a);
}
#endif
#if !defined(__aarch64__) && !defined(_M_ARM64)
/* emulate vaddvq u16 variant */
FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
{
uint32x4_t m = vpaddlq_u16(a);
uint64x2_t n = vpaddlq_u32(m);
uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
return vget_lane_u32((uint32x2_t) o, 0);
}
#else
// Wraps vaddvq_u16
FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
{
return vaddvq_u16(a);
}
#endif
/* Function Naming Conventions
* The naming convention of SSE intrinsics is straightforward. A generic SSE
* intrinsic function is given as follows:
* _mm_<name>_<data_type>
*
* The parts of this format are given as follows:
* 1. <name> describes the operation performed by the intrinsic
* 2. <data_type> identifies the data type of the function's primary arguments
*
* This last part, <data_type>, is a little complicated. It identifies the
* content of the input values, and can be set to any of the following values:
* + ps - vectors contain floats (ps stands for packed single-precision)
* + pd - vectors contain doubles (pd stands for packed double-precision)
* + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
* signed integers
* + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
* unsigned integers
* + si128 - unspecified 128-bit vector or 256-bit vector
* + m128/m128i/m128d - identifies input vector types when they are different
* than the type of the returned vector
*
* For example, _mm_setzero_ps. The _mm implies that the function returns
* a 128-bit vector. The _ps at the end implies that the argument vectors
* contain floats.
*
* A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
* // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
* __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
* // Set packed 8-bit integers
* // 128 bits, 16 chars, per 8 bits
* __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
* 4, 5, 12, 13, 6, 7, 14, 15);
* // Shuffle packed 8-bit integers
* __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
*/
/* Constants for use with _mm_prefetch. */
enum _mm_hint {
_MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
_MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
_MM_HINT_T1 = 2, /* load data to L2 cache only */
_MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
};
// The bit field mapping to the FPCR(floating-point control register)
typedef struct {
uint16_t res0;
uint8_t res1 : 6;
uint8_t bit22 : 1;
uint8_t bit23 : 1;
uint8_t bit24 : 1;
uint8_t res2 : 7;
#if defined(__aarch64__) || defined(_M_ARM64)
uint32_t res3;
#endif
} fpcr_bitfield;
// Takes the upper 64 bits of a and places it in the low end of the result
// Takes the lower 64 bits of b and places it into the high end of the result.
FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
{
float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
}
// takes the lower two 32-bit values from a and swaps them and places in high
// end of result takes the higher two 32 bit values from b and swaps them and
// places in low end of result.
FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
{
float32x2_t a21 = vget_high_f32(
vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
float32x2_t b03 = vget_low_f32(
vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
{
float32x2_t a03 = vget_low_f32(
vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
float32x2_t b21 = vget_high_f32(
vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
}
FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
{
float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
}
FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
}
// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
// high
FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
{
float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
{
float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
{
float32x2_t a22 =
vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
{
float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
float32x2_t b22 =
vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
}
FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
{
float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
float32x2_t a22 =
vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
}
FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
{
float32x2_t a33 =
vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
{
float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
float32x2_t b20 = vset_lane_f32(b2, b00, 1);
return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32_t b2 = vgetq_lane_f32(b, 2);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
float32x2_t b20 = vset_lane_f32(b2, b00, 1);
return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
{
float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
float32_t b2 = vgetq_lane_f32(b, 2);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
float32x2_t b20 = vset_lane_f32(b2, b00, 1);
return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
}
// For MSVC, we check only if it is ARM64, as every single ARM64 processor
// supported by WoA has crypto extensions. If this changes in the future,
// this can be verified via the runtime-only method of:
// IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)
#if (defined(_M_ARM64) && !defined(__clang__)) || \
(defined(__ARM_FEATURE_CRYPTO) && \
(defined(__aarch64__) || __has_builtin(__builtin_arm_crypto_vmullp64)))
// Wraps vmull_p64
FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
{
poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
#if defined(_MSC_VER) && !defined(__clang__)
__n64 a1 = {a}, b1 = {b};
return vreinterpretq_u64_p128(vmull_p64(a1, b1));
#else
return vreinterpretq_u64_p128(vmull_p64(a, b));
#endif
}
#else // ARMv7 polyfill
// ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
//
// vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
// 64-bit->128-bit polynomial multiply.
//
// It needs some work and is somewhat slow, but it is still faster than all
// known scalar methods.
//
// Algorithm adapted to C from
// https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
// from "Fast Software Polynomial Multiplication on ARM Processors Using the
// NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
// (https://hal.inria.fr/hal-01506572)
static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
{
poly8x8_t a = vreinterpret_p8_u64(_a);
poly8x8_t b = vreinterpret_p8_u64(_b);
// Masks
uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
vcreate_u8(0x00000000ffffffff));
uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
vcreate_u8(0x0000000000000000));
// Do the multiplies, rotating with vext to get all combinations
uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
uint8x16_t e =
vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
uint8x16_t f =
vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
uint8x16_t g =
vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
uint8x16_t h =
vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
uint8x16_t i =
vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
uint8x16_t j =
vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
uint8x16_t k =
vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
// Add cross products
uint8x16_t l = veorq_u8(e, f); // L = E + F
uint8x16_t m = veorq_u8(g, h); // M = G + H
uint8x16_t n = veorq_u8(i, j); // N = I + J
// Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
// instructions.
#if defined(__aarch64__)
uint8x16_t lm_p0 = vreinterpretq_u8_u64(
vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
uint8x16_t lm_p1 = vreinterpretq_u8_u64(
vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
uint8x16_t nk_p0 = vreinterpretq_u8_u64(
vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
uint8x16_t nk_p1 = vreinterpretq_u8_u64(
vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
#else
uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
#endif
// t0 = (L) (P0 + P1) << 8
// t1 = (M) (P2 + P3) << 16
uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
// t2 = (N) (P4 + P5) << 24
// t3 = (K) (P6 + P7) << 32
uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
// De-interleave
#if defined(__aarch64__)
uint8x16_t t0 = vreinterpretq_u8_u64(
vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
uint8x16_t t1 = vreinterpretq_u8_u64(
vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
uint8x16_t t2 = vreinterpretq_u8_u64(
vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
uint8x16_t t3 = vreinterpretq_u8_u64(
vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
#else
uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
#endif
// Shift the cross products
uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
// Accumulate the products