From c89624555f75f170000ffa1185a4c0b83b3527a4 Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Thu, 24 Apr 2025 12:27:47 +0900 Subject: [PATCH 01/12] Added zend_simd.h --- Zend/zend_simd.h | 106 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 Zend/zend_simd.h diff --git a/Zend/zend_simd.h b/Zend/zend_simd.h new file mode 100644 index 0000000000000..359b7f36acbc9 --- /dev/null +++ b/Zend/zend_simd.h @@ -0,0 +1,106 @@ +/* + +----------------------------------------------------------------------+ + | Zend Engine | + +----------------------------------------------------------------------+ + | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) | + +----------------------------------------------------------------------+ + | This source file is subject to version 2.00 of the Zend license, | + | that is bundled with this package in the file LICENSE, and is | + | available through the world-wide-web at the following url: | + | http://www.zend.com/license/2_00.txt. | + | If you did not receive a copy of the Zend license and are unable to | + | obtain it through the world-wide-web, please send a note to | + | license@zend.com so we can mail you a copy immediately. | + +----------------------------------------------------------------------+ + | Authors: Saki Takamachi | + +----------------------------------------------------------------------+ +*/ + +#ifndef ZEND_SIMD_H +#define ZEND_SIMD_H + +#ifdef __SSE2__ +#include +#define ZEND_HAVE_VECTOR_128 + +typedef __m128i zend_vec_8x16_t; +typedef __m128i zend_vec_16x8_t; +typedef __m128i zend_vec_32x4_t; +typedef __m128i zend_vec_64x2_t; + +#define zend_vec_setzero_8x16() _mm_setzero_si128() +#define zend_vec_set_8x16(x) _mm_set1_epi8(x) +#define zend_vec_set_8x16_from_16x8(x0, x1, x2, x3, x4, x5, x6, x7) _mm_set_epi16(x0, x1, x2, x3, x4, x5, x6, x7) +#define zend_vec_set_8x16_from_32x4(x0, x1, x2, x3) _mm_set_epi32(x0, x1, x2, x3) +#define zend_vec_set_8x16_from_64x2(x0, x1) _mm_set_epi64(x0, x1) +#define zend_vec_load_8x16(x) _mm_load_si128((const __m128i *) (x)) +#define zend_vec_loadu_8x16(x) _mm_loadu_si128((const __m128i *) (x)) +#define zend_vec_store_8x16(to, x) _mm_store_si128((__m128i *) (to), x) +#define zend_vec_storeu_8x16(to, x) _mm_storeu_si128((__m128i *) (to), x) + +#define zend_vec_or_8x16(a, b) _mm_or_si128(a, b) +#define zend_vec_xor_8x16(a, b) _mm_xor_si128(a, b) +#define zend_vec_and_8x16(a, b) _mm_and_si128(a, b) +#define zend_vec_rshift_128_from_8x16(x, bytes) _mm_srli_si128(x, bytes) +#define zend_vec_lshift_128_from_8x16(x, bytes) _mm_slli_si128(x, bytes) + +#define zend_vec_add_8x16(a, b) _mm_add_epi8(a, b) + +#define zend_vec_cmpeq_8x16(a, b) _mm_cmpeq_epi8(a, b) +#define zend_vec_cmplt_8x16(a, b) _mm_cmplt_epi8(a, b) +#define zend_vec_cmpgt_8x16(a, b) _mm_cmpgt_epi8(a, b) + +#define zend_vec_movemask_8x16(x) _mm_movemask_epi8(x) + + +#elif defined(__aarch64__) || defined(_M_ARM64) +#include +#define ZEND_HAVE_VECTOR_128 + +typedef int8x16_t zend_vec_8x16_t; +typedef int16x8_t zend_vec_16x8_t; +typedef int32x4_t zend_vec_32x4_t; +typedef int64x2_t zend_vec_64x2_t; + +#define zend_vec_setzero_8x16() vdupq_n_s8(0) +#define zend_vec_set_8x16(x) vdupq_n_s8(x) +#define zend_vec_set_8x16_from_16x8(x0, x1, x2, x3, x4, x5, x6, x7) \ + vreinterpretq_s8_s16((int16x8_t) { \ + (int16_t) (x7), (int16_t) (x6), (int16_t) (x5), (int16_t) (x4), \ + (int16_t) (x3), (int16_t) (x2), (int16_t) (x1), (int16_t) (x0) }) +#define zend_vec_set_8x16_from_32x4(x0, x1, x2, x3) \ + vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x3), (int32_t) (x2), (int32_t) (x1), (int32_t) (x0) }) +#define zend_vec_set_8x16_from_64x2(x0, x1) vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) }) +#define zend_vec_load_8x16(x) vld1q_s8((const int8_t *) (x)) +#define zend_vec_loadu_8x16(x) zend_vec_load_8x16(x) +#define zend_vec_store_8x16(to, x) vst1q_s8((int8_t *) (to), x) +#define zend_vec_storeu_8x16(to, x) zend_vec_store_8x16(to, x) + +#define zend_vec_or_8x16(a, b) vorrq_s8(a, b) +#define zend_vec_xor_8x16(a, b) veorq_s8(a, b) +#define zend_vec_and_8x16(a, b) vandq_s8(a, b) +#define zend_vec_rshift_128_from_8x16(x, bytes) vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), bytes)) +#define zend_vec_lshift_128_from_8x16(x, bytes) vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), 16 - bytes)) + +#define zend_vec_add_8x16(a, b) vaddq_s8(a, b) + +#define zend_vec_cmpeq_8x16(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) +#define zend_vec_cmplt_8x16(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) +#define zend_vec_cmpgt_8x16(a, b) (vreinterpretq_s8_u8(vcgtq_s8(a, b))) + +static zend_always_inline int zend_vec_movemask_8x16(int8x16_t x) +{ + /** + * based on code from + * https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon + */ + uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(vreinterpretq_u8_s8(x), 7)); + uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); + uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); + uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); + return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); +} + +#endif + +#endif /* ZEND_SIMD_H */ From 178000fc869929dedeb180d464e3ef7adb5429b3 Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Thu, 24 Apr 2025 12:28:09 +0900 Subject: [PATCH 02/12] use zend_simd in url.c --- ext/standard/url.c | 77 ++++++++++++++++++++++------------------------ 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/ext/standard/url.c b/ext/standard/url.c index da2ddea067314..82f00d1223dac 100644 --- a/ext/standard/url.c +++ b/ext/standard/url.c @@ -19,14 +19,11 @@ #include #include -#ifdef __SSE2__ -#include -#endif - #include "php.h" #include "url.h" #include "file.h" +#include "zend_simd.h" /* {{{ free_url */ PHPAPI void php_url_free(php_url *theurl) @@ -460,53 +457,53 @@ static zend_always_inline zend_string *php_url_encode_impl(const char *s, size_t start = zend_string_safe_alloc(3, len, 0, 0); to = (unsigned char*)ZSTR_VAL(start); -#ifdef __SSE2__ +#ifdef ZEND_HAVE_VECTOR_128 while (from + 16 < end) { - __m128i mask; + zend_vec_8x16_t mask; uint32_t bits; - const __m128i _A = _mm_set1_epi8('A' - 1); - const __m128i Z_ = _mm_set1_epi8('Z' + 1); - const __m128i _a = _mm_set1_epi8('a' - 1); - const __m128i z_ = _mm_set1_epi8('z' + 1); - const __m128i _zero = _mm_set1_epi8('0' - 1); - const __m128i nine_ = _mm_set1_epi8('9' + 1); - const __m128i dot = _mm_set1_epi8('.'); - const __m128i minus = _mm_set1_epi8('-'); - const __m128i under = _mm_set1_epi8('_'); - - __m128i in = _mm_loadu_si128((__m128i *)from); - - __m128i gt = _mm_cmpgt_epi8(in, _A); - __m128i lt = _mm_cmplt_epi8(in, Z_); - mask = _mm_and_si128(lt, gt); /* upper */ - gt = _mm_cmpgt_epi8(in, _a); - lt = _mm_cmplt_epi8(in, z_); - mask = _mm_or_si128(mask, _mm_and_si128(lt, gt)); /* lower */ - gt = _mm_cmpgt_epi8(in, _zero); - lt = _mm_cmplt_epi8(in, nine_); - mask = _mm_or_si128(mask, _mm_and_si128(lt, gt)); /* number */ - mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, dot)); - mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, minus)); - mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, under)); + const zend_vec_8x16_t _A = zend_vec_set_8x16('A' - 1); + const zend_vec_8x16_t Z_ = zend_vec_set_8x16('Z' + 1); + const zend_vec_8x16_t _a = zend_vec_set_8x16('a' - 1); + const zend_vec_8x16_t z_ = zend_vec_set_8x16('z' + 1); + const zend_vec_8x16_t _zero = zend_vec_set_8x16('0' - 1); + const zend_vec_8x16_t nine_ = zend_vec_set_8x16('9' + 1); + const zend_vec_8x16_t dot = zend_vec_set_8x16('.'); + const zend_vec_8x16_t minus = zend_vec_set_8x16('-'); + const zend_vec_8x16_t under = zend_vec_set_8x16('_'); + + zend_vec_8x16_t in = zend_vec_loadu_8x16(from); + + zend_vec_8x16_t gt = zend_vec_cmpgt_8x16(in, _A); + zend_vec_8x16_t lt = zend_vec_cmplt_8x16(in, Z_); + mask = zend_vec_and_8x16(lt, gt); /* upper */ + gt = zend_vec_cmpgt_8x16(in, _a); + lt = zend_vec_cmplt_8x16(in, z_); + mask = zend_vec_or_8x16(mask, zend_vec_and_8x16(lt, gt)); /* lower */ + gt = zend_vec_cmpgt_8x16(in, _zero); + lt = zend_vec_cmplt_8x16(in, nine_); + mask = zend_vec_or_8x16(mask, zend_vec_and_8x16(lt, gt)); /* number */ + mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, dot)); + mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, minus)); + mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, under)); if (!raw) { - const __m128i blank = _mm_set1_epi8(' '); - __m128i eq = _mm_cmpeq_epi8(in, blank); - if (_mm_movemask_epi8(eq)) { - in = _mm_add_epi8(in, _mm_and_si128(eq, _mm_set1_epi8('+' - ' '))); - mask = _mm_or_si128(mask, eq); + const zend_vec_8x16_t blank = zend_vec_set_8x16(' '); + zend_vec_8x16_t eq = zend_vec_cmpeq_8x16(in, blank); + if (zend_vec_movemask_8x16(eq)) { + in = zend_vec_add_8x16(in, zend_vec_and_8x16(eq, zend_vec_set_8x16('+' - ' '))); + mask = zend_vec_or_8x16(mask, eq); } } if (raw) { - const __m128i wavy = _mm_set1_epi8('~'); - mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, wavy)); + const zend_vec_8x16_t wavy = zend_vec_set_8x16('~'); + mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, wavy)); } - if (((bits = _mm_movemask_epi8(mask)) & 0xffff) == 0xffff) { - _mm_storeu_si128((__m128i*)to, in); + if (((bits = zend_vec_movemask_8x16(mask)) & 0xffff) == 0xffff) { + zend_vec_storeu_8x16(to, in); to += 16; } else { unsigned char xmm[16]; - _mm_storeu_si128((__m128i*)xmm, in); + zend_vec_storeu_8x16(xmm, in); for (size_t i = 0; i < sizeof(xmm); i++) { if ((bits & (0x1 << i))) { *to++ = xmm[i]; From 37f66bc0bb6089e6b67f703c2aa3eb8a3fe2d529 Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Thu, 24 Apr 2025 12:28:39 +0900 Subject: [PATCH 03/12] use zend_simd.h in ZendAccelerator.c --- ext/opcache/ZendAccelerator.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ext/opcache/ZendAccelerator.c b/ext/opcache/ZendAccelerator.c index 704846c4a860f..7a4cce9739eb1 100644 --- a/ext/opcache/ZendAccelerator.c +++ b/ext/opcache/ZendAccelerator.c @@ -98,6 +98,8 @@ typedef int gid_t; #include #endif +#include "zend_simd.h" + ZEND_EXTENSION(); #ifndef ZTS @@ -171,16 +173,16 @@ static void bzero_aligned(void *mem, size_t size) _mm256_store_si256((__m256i*)(p+32), ymm0); p += 64; } -#elif defined(__SSE2__) +#elif defined(ZEND_HAVE_VECTOR_128) char *p = (char*)mem; char *end = p + size; - __m128i xmm0 = _mm_setzero_si128(); + zend_vec_8x16_t xmm0 = zend_vec_setzero_8x16(); while (p < end) { - _mm_store_si128((__m128i*)p, xmm0); - _mm_store_si128((__m128i*)(p+16), xmm0); - _mm_store_si128((__m128i*)(p+32), xmm0); - _mm_store_si128((__m128i*)(p+48), xmm0); + zend_vec_store_8x16(p, xmm0); + zend_vec_store_8x16((p+16), xmm0); + zend_vec_store_8x16((p+32), xmm0); + zend_vec_store_8x16((p+48), xmm0); p += 64; } #else From 004123aeff80961961c48a747459ac1ec118fe58 Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Mon, 28 Apr 2025 20:50:15 +0900 Subject: [PATCH 04/12] Use zend_simd.h as a wrapper for neon --- Zend/zend_simd.h | 72 ++++++++++------------------------- ext/opcache/ZendAccelerator.c | 10 ++--- ext/standard/url.c | 70 +++++++++++++++++----------------- 3 files changed, 60 insertions(+), 92 deletions(-) diff --git a/Zend/zend_simd.h b/Zend/zend_simd.h index 359b7f36acbc9..71ec02a52a064 100644 --- a/Zend/zend_simd.h +++ b/Zend/zend_simd.h @@ -23,72 +23,40 @@ #include #define ZEND_HAVE_VECTOR_128 -typedef __m128i zend_vec_8x16_t; -typedef __m128i zend_vec_16x8_t; -typedef __m128i zend_vec_32x4_t; -typedef __m128i zend_vec_64x2_t; - -#define zend_vec_setzero_8x16() _mm_setzero_si128() -#define zend_vec_set_8x16(x) _mm_set1_epi8(x) -#define zend_vec_set_8x16_from_16x8(x0, x1, x2, x3, x4, x5, x6, x7) _mm_set_epi16(x0, x1, x2, x3, x4, x5, x6, x7) -#define zend_vec_set_8x16_from_32x4(x0, x1, x2, x3) _mm_set_epi32(x0, x1, x2, x3) -#define zend_vec_set_8x16_from_64x2(x0, x1) _mm_set_epi64(x0, x1) -#define zend_vec_load_8x16(x) _mm_load_si128((const __m128i *) (x)) -#define zend_vec_loadu_8x16(x) _mm_loadu_si128((const __m128i *) (x)) -#define zend_vec_store_8x16(to, x) _mm_store_si128((__m128i *) (to), x) -#define zend_vec_storeu_8x16(to, x) _mm_storeu_si128((__m128i *) (to), x) - -#define zend_vec_or_8x16(a, b) _mm_or_si128(a, b) -#define zend_vec_xor_8x16(a, b) _mm_xor_si128(a, b) -#define zend_vec_and_8x16(a, b) _mm_and_si128(a, b) -#define zend_vec_rshift_128_from_8x16(x, bytes) _mm_srli_si128(x, bytes) -#define zend_vec_lshift_128_from_8x16(x, bytes) _mm_slli_si128(x, bytes) - -#define zend_vec_add_8x16(a, b) _mm_add_epi8(a, b) - -#define zend_vec_cmpeq_8x16(a, b) _mm_cmpeq_epi8(a, b) -#define zend_vec_cmplt_8x16(a, b) _mm_cmplt_epi8(a, b) -#define zend_vec_cmpgt_8x16(a, b) _mm_cmpgt_epi8(a, b) - -#define zend_vec_movemask_8x16(x) _mm_movemask_epi8(x) - #elif defined(__aarch64__) || defined(_M_ARM64) #include #define ZEND_HAVE_VECTOR_128 -typedef int8x16_t zend_vec_8x16_t; -typedef int16x8_t zend_vec_16x8_t; -typedef int32x4_t zend_vec_32x4_t; -typedef int64x2_t zend_vec_64x2_t; +typedef int8x16_t __m128i; -#define zend_vec_setzero_8x16() vdupq_n_s8(0) -#define zend_vec_set_8x16(x) vdupq_n_s8(x) -#define zend_vec_set_8x16_from_16x8(x0, x1, x2, x3, x4, x5, x6, x7) \ +#define _mm_setzero_si128() vdupq_n_s8(0) +#define _mm_set1_epi8(x) vdupq_n_s8(x) +#define _mm_set_epi16(x0, x1, x2, x3, x4, x5, x6, x7) \ vreinterpretq_s8_s16((int16x8_t) { \ (int16_t) (x7), (int16_t) (x6), (int16_t) (x5), (int16_t) (x4), \ (int16_t) (x3), (int16_t) (x2), (int16_t) (x1), (int16_t) (x0) }) -#define zend_vec_set_8x16_from_32x4(x0, x1, x2, x3) \ +#define _mm_set_epi32(x0, x1, x2, x3) \ vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x3), (int32_t) (x2), (int32_t) (x1), (int32_t) (x0) }) -#define zend_vec_set_8x16_from_64x2(x0, x1) vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) }) -#define zend_vec_load_8x16(x) vld1q_s8((const int8_t *) (x)) -#define zend_vec_loadu_8x16(x) zend_vec_load_8x16(x) -#define zend_vec_store_8x16(to, x) vst1q_s8((int8_t *) (to), x) -#define zend_vec_storeu_8x16(to, x) zend_vec_store_8x16(to, x) +#define _mm_set_epi64(x0, x1) vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) }) +#define _mm_load_si128(x) vld1q_s8((const int8_t *) (x)) +#define _mm_loadu_si128(x) _mm_load_si128(x) +#define _mm_store_si128(to, x) vst1q_s8((int8_t *) (to), x) +#define _mm_storeu_si128(to, x) _mm_store_si128(to, x) -#define zend_vec_or_8x16(a, b) vorrq_s8(a, b) -#define zend_vec_xor_8x16(a, b) veorq_s8(a, b) -#define zend_vec_and_8x16(a, b) vandq_s8(a, b) -#define zend_vec_rshift_128_from_8x16(x, bytes) vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), bytes)) -#define zend_vec_lshift_128_from_8x16(x, bytes) vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), 16 - bytes)) +#define _mm_or_si128(a, b) vorrq_s8(a, b) +#define _mm_xor_si128(a, b) veorq_s8(a, b) +#define _mm_and_si128(a, b) vandq_s8(a, b) +#define _mm_srli_si128(x, bytes) vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), bytes)) +#define _mm_slli_si128(x, bytes) vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), 16 - bytes)) -#define zend_vec_add_8x16(a, b) vaddq_s8(a, b) +#define _mm_add_epi8(a, b) vaddq_s8(a, b) -#define zend_vec_cmpeq_8x16(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) -#define zend_vec_cmplt_8x16(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) -#define zend_vec_cmpgt_8x16(a, b) (vreinterpretq_s8_u8(vcgtq_s8(a, b))) +#define _mm_cmpeq_epi8(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) +#define _mm_cmplt_epi8(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) +#define _mm_cmpgt_epi8(a, b) (vreinterpretq_s8_u8(vcgtq_s8(a, b))) -static zend_always_inline int zend_vec_movemask_8x16(int8x16_t x) +static zend_always_inline int _mm_movemask_epi8(int8x16_t x) { /** * based on code from diff --git a/ext/opcache/ZendAccelerator.c b/ext/opcache/ZendAccelerator.c index 7a4cce9739eb1..bffb72fb59c81 100644 --- a/ext/opcache/ZendAccelerator.c +++ b/ext/opcache/ZendAccelerator.c @@ -176,13 +176,13 @@ static void bzero_aligned(void *mem, size_t size) #elif defined(ZEND_HAVE_VECTOR_128) char *p = (char*)mem; char *end = p + size; - zend_vec_8x16_t xmm0 = zend_vec_setzero_8x16(); + __m128i xmm0 = _mm_setzero_si128(); while (p < end) { - zend_vec_store_8x16(p, xmm0); - zend_vec_store_8x16((p+16), xmm0); - zend_vec_store_8x16((p+32), xmm0); - zend_vec_store_8x16((p+48), xmm0); + _mm_store_si128((__m128i*)p, xmm0); + _mm_store_si128((__m128i*)(p+16), xmm0); + _mm_store_si128((__m128i*)(p+32), xmm0); + _mm_store_si128((__m128i*)(p+48), xmm0); p += 64; } #else diff --git a/ext/standard/url.c b/ext/standard/url.c index 82f00d1223dac..e5d8a58966db9 100644 --- a/ext/standard/url.c +++ b/ext/standard/url.c @@ -459,51 +459,51 @@ static zend_always_inline zend_string *php_url_encode_impl(const char *s, size_t #ifdef ZEND_HAVE_VECTOR_128 while (from + 16 < end) { - zend_vec_8x16_t mask; + __m128i mask; uint32_t bits; - const zend_vec_8x16_t _A = zend_vec_set_8x16('A' - 1); - const zend_vec_8x16_t Z_ = zend_vec_set_8x16('Z' + 1); - const zend_vec_8x16_t _a = zend_vec_set_8x16('a' - 1); - const zend_vec_8x16_t z_ = zend_vec_set_8x16('z' + 1); - const zend_vec_8x16_t _zero = zend_vec_set_8x16('0' - 1); - const zend_vec_8x16_t nine_ = zend_vec_set_8x16('9' + 1); - const zend_vec_8x16_t dot = zend_vec_set_8x16('.'); - const zend_vec_8x16_t minus = zend_vec_set_8x16('-'); - const zend_vec_8x16_t under = zend_vec_set_8x16('_'); - - zend_vec_8x16_t in = zend_vec_loadu_8x16(from); - - zend_vec_8x16_t gt = zend_vec_cmpgt_8x16(in, _A); - zend_vec_8x16_t lt = zend_vec_cmplt_8x16(in, Z_); - mask = zend_vec_and_8x16(lt, gt); /* upper */ - gt = zend_vec_cmpgt_8x16(in, _a); - lt = zend_vec_cmplt_8x16(in, z_); - mask = zend_vec_or_8x16(mask, zend_vec_and_8x16(lt, gt)); /* lower */ - gt = zend_vec_cmpgt_8x16(in, _zero); - lt = zend_vec_cmplt_8x16(in, nine_); - mask = zend_vec_or_8x16(mask, zend_vec_and_8x16(lt, gt)); /* number */ - mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, dot)); - mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, minus)); - mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, under)); + const __m128i _A = _mm_set1_epi8('A' - 1); + const __m128i Z_ = _mm_set1_epi8('Z' + 1); + const __m128i _a = _mm_set1_epi8('a' - 1); + const __m128i z_ = _mm_set1_epi8('z' + 1); + const __m128i _zero = _mm_set1_epi8('0' - 1); + const __m128i nine_ = _mm_set1_epi8('9' + 1); + const __m128i dot = _mm_set1_epi8('.'); + const __m128i minus = _mm_set1_epi8('-'); + const __m128i under = _mm_set1_epi8('_'); + + __m128i in = _mm_loadu_si128((__m128i *)from); + + __m128i gt = _mm_cmpgt_epi8(in, _A); + __m128i lt = _mm_cmplt_epi8(in, Z_); + mask = _mm_and_si128(lt, gt); /* upper */ + gt = _mm_cmpgt_epi8(in, _a); + lt = _mm_cmplt_epi8(in, z_); + mask = _mm_or_si128(mask, _mm_and_si128(lt, gt)); /* lower */ + gt = _mm_cmpgt_epi8(in, _zero); + lt = _mm_cmplt_epi8(in, nine_); + mask = _mm_or_si128(mask, _mm_and_si128(lt, gt)); /* number */ + mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, dot)); + mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, minus)); + mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, under)); if (!raw) { - const zend_vec_8x16_t blank = zend_vec_set_8x16(' '); - zend_vec_8x16_t eq = zend_vec_cmpeq_8x16(in, blank); - if (zend_vec_movemask_8x16(eq)) { - in = zend_vec_add_8x16(in, zend_vec_and_8x16(eq, zend_vec_set_8x16('+' - ' '))); - mask = zend_vec_or_8x16(mask, eq); + const __m128i blank = _mm_set1_epi8(' '); + __m128i eq = _mm_cmpeq_epi8(in, blank); + if (_mm_movemask_epi8(eq)) { + in = _mm_add_epi8(in, _mm_and_si128(eq, _mm_set1_epi8('+' - ' '))); + mask = _mm_or_si128(mask, eq); } } if (raw) { - const zend_vec_8x16_t wavy = zend_vec_set_8x16('~'); - mask = zend_vec_or_8x16(mask, zend_vec_cmpeq_8x16(in, wavy)); + const __m128i wavy = _mm_set1_epi8('~'); + mask = _mm_or_si128(mask, _mm_cmpeq_epi8(in, wavy)); } - if (((bits = zend_vec_movemask_8x16(mask)) & 0xffff) == 0xffff) { - zend_vec_storeu_8x16(to, in); + if (((bits = _mm_movemask_epi8(mask)) & 0xffff) == 0xffff) { + _mm_storeu_si128((__m128i*)to, in); to += 16; } else { unsigned char xmm[16]; - zend_vec_storeu_8x16(xmm, in); + _mm_storeu_si128((__m128i*)xmm, in); for (size_t i = 0; i < sizeof(xmm); i++) { if ((bits & (0x1 << i))) { *to++ = xmm[i]; From 59efacfa12255cc7fab48a6afe8567f6ba3c1006 Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Mon, 28 Apr 2025 20:56:23 +0900 Subject: [PATCH 05/12] use zend_simd.h in string.c --- ext/standard/string.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ext/standard/string.c b/ext/standard/string.c index 1e20791eb61ce..7a282442659e4 100644 --- a/ext/standard/string.c +++ b/ext/standard/string.c @@ -46,10 +46,11 @@ #include "ext/random/php_random.h" #ifdef __SSE2__ -#include #include "Zend/zend_bitset.h" #endif +#include "zend_simd.h" + /* this is read-only, so it's ok */ ZEND_SET_ALIGNED(16, static const char hexconvtab[]) = "0123456789abcdef"; @@ -2817,7 +2818,7 @@ static zend_string *php_strtr_ex(zend_string *str, const char *str_from, const c char *input = ZSTR_VAL(str); size_t len = ZSTR_LEN(str); -#ifdef __SSE2__ +#ifdef ZEND_HAVE_VECTOR_128 if (ZSTR_LEN(str) >= sizeof(__m128i)) { __m128i search = _mm_set1_epi8(ch_from); __m128i delta = _mm_set1_epi8(ch_to - ch_from); @@ -3037,7 +3038,7 @@ static zend_always_inline zend_long count_chars(const char *p, zend_long length, zend_long count = 0; const char *endp; -#ifdef __SSE2__ +#ifdef ZEND_HAVE_VECTOR_128 if (length >= sizeof(__m128i)) { __m128i search = _mm_set1_epi8(ch); @@ -5835,7 +5836,7 @@ static zend_string *php_str_rot13(zend_string *str) e = p + ZSTR_LEN(str); target = ZSTR_VAL(ret); -#ifdef __SSE2__ +#ifdef ZEND_HAVE_VECTOR_128 if (e - p > 15) { const __m128i a_minus_1 = _mm_set1_epi8('a' - 1); const __m128i m_plus_1 = _mm_set1_epi8('m' + 1); From f142b6e6032c08b8fb8102fff8c7710390e68dbe Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Mon, 28 Apr 2025 21:05:26 +0900 Subject: [PATCH 06/12] use zend_simd.h in bcmath --- ext/bcmath/libbcmath/src/convert.c | 20 +++++----- ext/bcmath/libbcmath/src/simd.h | 59 ------------------------------ ext/bcmath/libbcmath/src/str2num.c | 36 +++++++++--------- 3 files changed, 28 insertions(+), 87 deletions(-) delete mode 100644 ext/bcmath/libbcmath/src/simd.h diff --git a/ext/bcmath/libbcmath/src/convert.c b/ext/bcmath/libbcmath/src/convert.c index 5438b4c1c44e5..f57f33fda043f 100644 --- a/ext/bcmath/libbcmath/src/convert.c +++ b/ext/bcmath/libbcmath/src/convert.c @@ -17,22 +17,22 @@ #include "bcmath.h" #include "convert.h" #include "private.h" -#include "simd.h" +#include "zend_simd.h" char *bc_copy_and_toggle_bcd(char *restrict dest, const char *source, const char *source_end) { const size_t bulk_shift = SWAR_REPEAT('0'); -#ifdef HAVE_BC_SIMD_128 +#ifdef ZEND_HAVE_VECTOR_128 /* SIMD SSE2 or NEON bulk shift + copy */ - bc_simd_128_t shift_vector = bc_simd_set_8x16('0'); - while (source + sizeof(bc_simd_128_t) <= source_end) { - bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) source); - bytes = bc_simd_xor_8x16(bytes, shift_vector); - bc_simd_store_8x16((bc_simd_128_t *) dest, bytes); - - source += sizeof(bc_simd_128_t); - dest += sizeof(bc_simd_128_t); + __m128i shift_vector = _mm_set1_epi8('0'); + while (source + sizeof(__m128i) <= source_end) { + __m128i bytes = _mm_loadu_si128((const __m128i *) source); + bytes = _mm_xor_si128(bytes, shift_vector); + _mm_storeu_si128((__m128i *) dest, bytes); + + source += sizeof(__m128i); + dest += sizeof(__m128i); } #endif diff --git a/ext/bcmath/libbcmath/src/simd.h b/ext/bcmath/libbcmath/src/simd.h deleted file mode 100644 index af38f8349618c..0000000000000 --- a/ext/bcmath/libbcmath/src/simd.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - +----------------------------------------------------------------------+ - | Copyright (c) The PHP Group | - +----------------------------------------------------------------------+ - | This source file is subject to version 3.01 of the PHP license, | - | that is bundled with this package in the file LICENSE, and is | - | available through the world-wide-web at the following url: | - | https://www.php.net/license/3_01.txt | - | If you did not receive a copy of the PHP license and are unable to | - | obtain it through the world-wide-web, please send a note to | - | license@php.net so we can mail you a copy immediately. | - +----------------------------------------------------------------------+ - | Authors: Saki Takamachi | - +----------------------------------------------------------------------+ -*/ - - -#ifndef _BCMATH_SIMD_H_ -#define _BCMATH_SIMD_H_ - -#ifdef __SSE2__ -# include - typedef __m128i bc_simd_128_t; -# define HAVE_BC_SIMD_128 -# define bc_simd_set_8x16(x) _mm_set1_epi8(x) -# define bc_simd_load_8x16(ptr) _mm_loadu_si128((const __m128i *) (ptr)) -# define bc_simd_xor_8x16(a, b) _mm_xor_si128(a, b) -# define bc_simd_store_8x16(ptr, val) _mm_storeu_si128((__m128i *) (ptr), val) -# define bc_simd_add_8x16(a, b) _mm_add_epi8(a, b) -# define bc_simd_cmpeq_8x16(a, b) _mm_cmpeq_epi8(a, b) -# define bc_simd_cmplt_8x16(a, b) _mm_cmplt_epi8(a, b) -# define bc_simd_movemask_8x16(a) _mm_movemask_epi8(a) - -#elif defined(__aarch64__) || defined(_M_ARM64) -# include - typedef int8x16_t bc_simd_128_t; -# define HAVE_BC_SIMD_128 -# define bc_simd_set_8x16(x) vdupq_n_s8(x) -# define bc_simd_load_8x16(ptr) vld1q_s8((const int8_t *) (ptr)) -# define bc_simd_xor_8x16(a, b) veorq_s8(a, b) -# define bc_simd_store_8x16(ptr, val) vst1q_s8((int8_t *) (ptr), val) -# define bc_simd_add_8x16(a, b) vaddq_s8(a, b) -# define bc_simd_cmpeq_8x16(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) -# define bc_simd_cmplt_8x16(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) - static inline int bc_simd_movemask_8x16(int8x16_t vec) - { - /** - * based on code from - * https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon - */ - uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(vreinterpretq_u8_s8(vec), 7)); - uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); - uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); - uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); - return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); - } -#endif - -#endif diff --git a/ext/bcmath/libbcmath/src/str2num.c b/ext/bcmath/libbcmath/src/str2num.c index 945de0cf60003..1e1be35f066f1 100644 --- a/ext/bcmath/libbcmath/src/str2num.c +++ b/ext/bcmath/libbcmath/src/str2num.c @@ -32,7 +32,7 @@ #include "bcmath.h" #include "convert.h" #include "private.h" -#include "simd.h" +#include "zend_simd.h" #include #include @@ -40,20 +40,20 @@ static inline const char *bc_count_digits(const char *str, const char *end) { /* Process in bulk */ -#ifdef HAVE_BC_SIMD_128 - const bc_simd_128_t offset = bc_simd_set_8x16((signed char) (SCHAR_MIN - '0')); +#ifdef ZEND_HAVE_VECTOR_128 + const __m128i offset = _mm_set1_epi8((signed char) (SCHAR_MIN - '0')); /* we use the less than comparator, so add 1 */ - const bc_simd_128_t threshold = bc_simd_set_8x16(SCHAR_MIN + ('9' + 1 - '0')); + const __m128i threshold = _mm_set1_epi8(SCHAR_MIN + ('9' + 1 - '0')); - while (str + sizeof(bc_simd_128_t) <= end) { - bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) str); + while (str + sizeof(__m128i) <= end) { + __m128i bytes = _mm_loadu_si128((const __m128i *) str); /* Wrapping-add the offset to the bytes, such that all bytes below '0' are positive and others are negative. * More specifically, '0' will be -128 and '9' will be -119. */ - bytes = bc_simd_add_8x16(bytes, offset); + bytes = _mm_add_epi8(bytes, offset); /* Now mark all bytes that are <= '9', i.e. <= -119, i.e. < -118, i.e. the threshold. */ - bytes = bc_simd_cmplt_8x16(bytes, threshold); + bytes = _mm_cmplt_epi8(bytes, threshold); - int mask = bc_simd_movemask_8x16(bytes); + int mask = _mm_movemask_epi8(bytes); if (mask != 0xffff) { /* At least one of the bytes is not within range. Move to the first offending byte. */ #ifdef PHP_HAVE_BUILTIN_CTZL @@ -63,7 +63,7 @@ static inline const char *bc_count_digits(const char *str, const char *end) #endif } - str += sizeof(bc_simd_128_t); + str += sizeof(__m128i); } #endif @@ -77,19 +77,19 @@ static inline const char *bc_count_digits(const char *str, const char *end) static inline const char *bc_skip_zero_reverse(const char *scanner, const char *stop) { /* Check in bulk */ -#ifdef HAVE_BC_SIMD_128 - const bc_simd_128_t c_zero_repeat = bc_simd_set_8x16('0'); - while (scanner - sizeof(bc_simd_128_t) >= stop) { - scanner -= sizeof(bc_simd_128_t); - bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) scanner); +#ifdef ZEND_HAVE_VECTOR_128 + const __m128i c_zero_repeat = _mm_set1_epi8('0'); + while (scanner - sizeof(__m128i) >= stop) { + scanner -= sizeof(__m128i); + __m128i bytes = _mm_loadu_si128((const __m128i *) scanner); /* Checks if all numeric strings are equal to '0'. */ - bytes = bc_simd_cmpeq_8x16(bytes, c_zero_repeat); + bytes = _mm_cmpeq_epi8(bytes, c_zero_repeat); - int mask = bc_simd_movemask_8x16(bytes); + int mask = _mm_movemask_epi8(bytes); /* The probability of having 16 trailing 0s in a row is very low, so we use EXPECTED. */ if (EXPECTED(mask != 0xffff)) { /* Move the pointer back and check each character in loop. */ - scanner += sizeof(bc_simd_128_t); + scanner += sizeof(__m128i); break; } } From 4f0fc896c0b5dd53a9e56f11147c35c8ad8a0a1c Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Tue, 29 Apr 2025 10:33:19 +0900 Subject: [PATCH 07/12] Changed argument type from `int8x16_t` to `__m128i` for type hinting. --- Zend/zend_simd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Zend/zend_simd.h b/Zend/zend_simd.h index 71ec02a52a064..0739cdb4204e7 100644 --- a/Zend/zend_simd.h +++ b/Zend/zend_simd.h @@ -56,7 +56,7 @@ typedef int8x16_t __m128i; #define _mm_cmplt_epi8(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) #define _mm_cmpgt_epi8(a, b) (vreinterpretq_s8_u8(vcgtq_s8(a, b))) -static zend_always_inline int _mm_movemask_epi8(int8x16_t x) +static zend_always_inline int _mm_movemask_epi8(__m128i x) { /** * based on code from From d00d75d8e33d019c27ecff35f0db1aee9d87999d Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Tue, 29 Apr 2025 11:03:48 +0900 Subject: [PATCH 08/12] fixed `_mm_set_epi64` to `_mm_set_epi64x` --- Zend/zend_simd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Zend/zend_simd.h b/Zend/zend_simd.h index 0739cdb4204e7..61cdf1dbca36e 100644 --- a/Zend/zend_simd.h +++ b/Zend/zend_simd.h @@ -38,7 +38,7 @@ typedef int8x16_t __m128i; (int16_t) (x3), (int16_t) (x2), (int16_t) (x1), (int16_t) (x0) }) #define _mm_set_epi32(x0, x1, x2, x3) \ vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x3), (int32_t) (x2), (int32_t) (x1), (int32_t) (x0) }) -#define _mm_set_epi64(x0, x1) vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) }) +#define _mm_set_epi64x(x0, x1) vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) }) #define _mm_load_si128(x) vld1q_s8((const int8_t *) (x)) #define _mm_loadu_si128(x) _mm_load_si128(x) #define _mm_store_si128(to, x) vst1q_s8((int8_t *) (to), x) From 22c505a47860e3045dd2a742449a48dba9ea96a9 Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Tue, 29 Apr 2025 22:58:19 +0900 Subject: [PATCH 09/12] fixed `_mm_srli_si128` and `_mm_slli_si128` --- Zend/zend_simd.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Zend/zend_simd.h b/Zend/zend_simd.h index 61cdf1dbca36e..76971ae439cf5 100644 --- a/Zend/zend_simd.h +++ b/Zend/zend_simd.h @@ -47,8 +47,13 @@ typedef int8x16_t __m128i; #define _mm_or_si128(a, b) vorrq_s8(a, b) #define _mm_xor_si128(a, b) veorq_s8(a, b) #define _mm_and_si128(a, b) vandq_s8(a, b) -#define _mm_srli_si128(x, bytes) vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), bytes)) -#define _mm_slli_si128(x, bytes) vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), 16 - bytes)) + +#define _mm_slli_si128(x, imm) \ + ((imm) >= 16 ? vdupq_n_s8(0) : \ + vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), 16 - (imm)))) +#define _mm_srli_si128(x, imm) \ + ((imm) >= 16 ? vdupq_n_s8(0) : \ + vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), (imm)))) #define _mm_add_epi8(a, b) vaddq_s8(a, b) From 0f73ba4329dcdd0798d8f5bc390cf723b9fe89d6 Mon Sep 17 00:00:00 2001 From: SakiTakamachi Date: Tue, 29 Apr 2025 23:11:28 +0900 Subject: [PATCH 10/12] fixed `_mm_add_epi8` --- Zend/zend_simd.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Zend/zend_simd.h b/Zend/zend_simd.h index 76971ae439cf5..9ce5e073cc204 100644 --- a/Zend/zend_simd.h +++ b/Zend/zend_simd.h @@ -55,7 +55,11 @@ typedef int8x16_t __m128i; ((imm) >= 16 ? vdupq_n_s8(0) : \ vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), (imm)))) -#define _mm_add_epi8(a, b) vaddq_s8(a, b) +/** + * In practice, there is no problem, but a runtime error for signed integer overflow is triggered by UBSAN, + * so perform the calculation as unsigned. Since it is optimized at compile time, there are no unnecessary casts at runtime. + */ +#define _mm_add_epi8(a, b) vreinterpretq_s8_u8(vaddq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b))) #define _mm_cmpeq_epi8(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) #define _mm_cmplt_epi8(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) From 0e9302be697c75751cd616bbf37a45eeb1db5310 Mon Sep 17 00:00:00 2001 From: Saki Takamachi Date: Wed, 7 May 2025 18:22:15 +0900 Subject: [PATCH 11/12] Revert "use zend_simd.h in bcmath" This reverts commit f142b6e6032c08b8fb8102fff8c7710390e68dbe. --- ext/bcmath/libbcmath/src/convert.c | 20 +++++----- ext/bcmath/libbcmath/src/simd.h | 59 ++++++++++++++++++++++++++++++ ext/bcmath/libbcmath/src/str2num.c | 36 +++++++++--------- 3 files changed, 87 insertions(+), 28 deletions(-) create mode 100644 ext/bcmath/libbcmath/src/simd.h diff --git a/ext/bcmath/libbcmath/src/convert.c b/ext/bcmath/libbcmath/src/convert.c index f57f33fda043f..5438b4c1c44e5 100644 --- a/ext/bcmath/libbcmath/src/convert.c +++ b/ext/bcmath/libbcmath/src/convert.c @@ -17,22 +17,22 @@ #include "bcmath.h" #include "convert.h" #include "private.h" -#include "zend_simd.h" +#include "simd.h" char *bc_copy_and_toggle_bcd(char *restrict dest, const char *source, const char *source_end) { const size_t bulk_shift = SWAR_REPEAT('0'); -#ifdef ZEND_HAVE_VECTOR_128 +#ifdef HAVE_BC_SIMD_128 /* SIMD SSE2 or NEON bulk shift + copy */ - __m128i shift_vector = _mm_set1_epi8('0'); - while (source + sizeof(__m128i) <= source_end) { - __m128i bytes = _mm_loadu_si128((const __m128i *) source); - bytes = _mm_xor_si128(bytes, shift_vector); - _mm_storeu_si128((__m128i *) dest, bytes); - - source += sizeof(__m128i); - dest += sizeof(__m128i); + bc_simd_128_t shift_vector = bc_simd_set_8x16('0'); + while (source + sizeof(bc_simd_128_t) <= source_end) { + bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) source); + bytes = bc_simd_xor_8x16(bytes, shift_vector); + bc_simd_store_8x16((bc_simd_128_t *) dest, bytes); + + source += sizeof(bc_simd_128_t); + dest += sizeof(bc_simd_128_t); } #endif diff --git a/ext/bcmath/libbcmath/src/simd.h b/ext/bcmath/libbcmath/src/simd.h new file mode 100644 index 0000000000000..af38f8349618c --- /dev/null +++ b/ext/bcmath/libbcmath/src/simd.h @@ -0,0 +1,59 @@ +/* + +----------------------------------------------------------------------+ + | Copyright (c) The PHP Group | + +----------------------------------------------------------------------+ + | This source file is subject to version 3.01 of the PHP license, | + | that is bundled with this package in the file LICENSE, and is | + | available through the world-wide-web at the following url: | + | https://www.php.net/license/3_01.txt | + | If you did not receive a copy of the PHP license and are unable to | + | obtain it through the world-wide-web, please send a note to | + | license@php.net so we can mail you a copy immediately. | + +----------------------------------------------------------------------+ + | Authors: Saki Takamachi | + +----------------------------------------------------------------------+ +*/ + + +#ifndef _BCMATH_SIMD_H_ +#define _BCMATH_SIMD_H_ + +#ifdef __SSE2__ +# include + typedef __m128i bc_simd_128_t; +# define HAVE_BC_SIMD_128 +# define bc_simd_set_8x16(x) _mm_set1_epi8(x) +# define bc_simd_load_8x16(ptr) _mm_loadu_si128((const __m128i *) (ptr)) +# define bc_simd_xor_8x16(a, b) _mm_xor_si128(a, b) +# define bc_simd_store_8x16(ptr, val) _mm_storeu_si128((__m128i *) (ptr), val) +# define bc_simd_add_8x16(a, b) _mm_add_epi8(a, b) +# define bc_simd_cmpeq_8x16(a, b) _mm_cmpeq_epi8(a, b) +# define bc_simd_cmplt_8x16(a, b) _mm_cmplt_epi8(a, b) +# define bc_simd_movemask_8x16(a) _mm_movemask_epi8(a) + +#elif defined(__aarch64__) || defined(_M_ARM64) +# include + typedef int8x16_t bc_simd_128_t; +# define HAVE_BC_SIMD_128 +# define bc_simd_set_8x16(x) vdupq_n_s8(x) +# define bc_simd_load_8x16(ptr) vld1q_s8((const int8_t *) (ptr)) +# define bc_simd_xor_8x16(a, b) veorq_s8(a, b) +# define bc_simd_store_8x16(ptr, val) vst1q_s8((int8_t *) (ptr), val) +# define bc_simd_add_8x16(a, b) vaddq_s8(a, b) +# define bc_simd_cmpeq_8x16(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) +# define bc_simd_cmplt_8x16(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) + static inline int bc_simd_movemask_8x16(int8x16_t vec) + { + /** + * based on code from + * https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon + */ + uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(vreinterpretq_u8_s8(vec), 7)); + uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); + uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); + uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); + return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); + } +#endif + +#endif diff --git a/ext/bcmath/libbcmath/src/str2num.c b/ext/bcmath/libbcmath/src/str2num.c index 1e1be35f066f1..945de0cf60003 100644 --- a/ext/bcmath/libbcmath/src/str2num.c +++ b/ext/bcmath/libbcmath/src/str2num.c @@ -32,7 +32,7 @@ #include "bcmath.h" #include "convert.h" #include "private.h" -#include "zend_simd.h" +#include "simd.h" #include #include @@ -40,20 +40,20 @@ static inline const char *bc_count_digits(const char *str, const char *end) { /* Process in bulk */ -#ifdef ZEND_HAVE_VECTOR_128 - const __m128i offset = _mm_set1_epi8((signed char) (SCHAR_MIN - '0')); +#ifdef HAVE_BC_SIMD_128 + const bc_simd_128_t offset = bc_simd_set_8x16((signed char) (SCHAR_MIN - '0')); /* we use the less than comparator, so add 1 */ - const __m128i threshold = _mm_set1_epi8(SCHAR_MIN + ('9' + 1 - '0')); + const bc_simd_128_t threshold = bc_simd_set_8x16(SCHAR_MIN + ('9' + 1 - '0')); - while (str + sizeof(__m128i) <= end) { - __m128i bytes = _mm_loadu_si128((const __m128i *) str); + while (str + sizeof(bc_simd_128_t) <= end) { + bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) str); /* Wrapping-add the offset to the bytes, such that all bytes below '0' are positive and others are negative. * More specifically, '0' will be -128 and '9' will be -119. */ - bytes = _mm_add_epi8(bytes, offset); + bytes = bc_simd_add_8x16(bytes, offset); /* Now mark all bytes that are <= '9', i.e. <= -119, i.e. < -118, i.e. the threshold. */ - bytes = _mm_cmplt_epi8(bytes, threshold); + bytes = bc_simd_cmplt_8x16(bytes, threshold); - int mask = _mm_movemask_epi8(bytes); + int mask = bc_simd_movemask_8x16(bytes); if (mask != 0xffff) { /* At least one of the bytes is not within range. Move to the first offending byte. */ #ifdef PHP_HAVE_BUILTIN_CTZL @@ -63,7 +63,7 @@ static inline const char *bc_count_digits(const char *str, const char *end) #endif } - str += sizeof(__m128i); + str += sizeof(bc_simd_128_t); } #endif @@ -77,19 +77,19 @@ static inline const char *bc_count_digits(const char *str, const char *end) static inline const char *bc_skip_zero_reverse(const char *scanner, const char *stop) { /* Check in bulk */ -#ifdef ZEND_HAVE_VECTOR_128 - const __m128i c_zero_repeat = _mm_set1_epi8('0'); - while (scanner - sizeof(__m128i) >= stop) { - scanner -= sizeof(__m128i); - __m128i bytes = _mm_loadu_si128((const __m128i *) scanner); +#ifdef HAVE_BC_SIMD_128 + const bc_simd_128_t c_zero_repeat = bc_simd_set_8x16('0'); + while (scanner - sizeof(bc_simd_128_t) >= stop) { + scanner -= sizeof(bc_simd_128_t); + bc_simd_128_t bytes = bc_simd_load_8x16((const bc_simd_128_t *) scanner); /* Checks if all numeric strings are equal to '0'. */ - bytes = _mm_cmpeq_epi8(bytes, c_zero_repeat); + bytes = bc_simd_cmpeq_8x16(bytes, c_zero_repeat); - int mask = _mm_movemask_epi8(bytes); + int mask = bc_simd_movemask_8x16(bytes); /* The probability of having 16 trailing 0s in a row is very low, so we use EXPECTED. */ if (EXPECTED(mask != 0xffff)) { /* Move the pointer back and check each character in loop. */ - scanner += sizeof(__m128i); + scanner += sizeof(bc_simd_128_t); break; } } From 95407ca417d57457229066c721ca8d8c94fb2d7e Mon Sep 17 00:00:00 2001 From: Saki Takamachi Date: Wed, 7 May 2025 18:25:00 +0900 Subject: [PATCH 12/12] use "xsee.h" lib for zend_simd.h --- Zend/zend_simd.h | 493 ++++++++++++++++++++++++++++------ ext/opcache/ZendAccelerator.c | 2 +- ext/standard/string.c | 6 +- ext/standard/url.c | 2 +- 4 files changed, 415 insertions(+), 88 deletions(-) diff --git a/Zend/zend_simd.h b/Zend/zend_simd.h index 9ce5e073cc204..9bd16ce9e9afb 100644 --- a/Zend/zend_simd.h +++ b/Zend/zend_simd.h @@ -1,83 +1,410 @@ -/* - +----------------------------------------------------------------------+ - | Zend Engine | - +----------------------------------------------------------------------+ - | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) | - +----------------------------------------------------------------------+ - | This source file is subject to version 2.00 of the Zend license, | - | that is bundled with this package in the file LICENSE, and is | - | available through the world-wide-web at the following url: | - | http://www.zend.com/license/2_00.txt. | - | If you did not receive a copy of the Zend license and are unable to | - | obtain it through the world-wide-web, please send a note to | - | license@zend.com so we can mail you a copy immediately. | - +----------------------------------------------------------------------+ - | Authors: Saki Takamachi | - +----------------------------------------------------------------------+ -*/ - -#ifndef ZEND_SIMD_H -#define ZEND_SIMD_H - -#ifdef __SSE2__ -#include -#define ZEND_HAVE_VECTOR_128 - - -#elif defined(__aarch64__) || defined(_M_ARM64) -#include -#define ZEND_HAVE_VECTOR_128 - -typedef int8x16_t __m128i; - -#define _mm_setzero_si128() vdupq_n_s8(0) -#define _mm_set1_epi8(x) vdupq_n_s8(x) -#define _mm_set_epi16(x0, x1, x2, x3, x4, x5, x6, x7) \ - vreinterpretq_s8_s16((int16x8_t) { \ - (int16_t) (x7), (int16_t) (x6), (int16_t) (x5), (int16_t) (x4), \ - (int16_t) (x3), (int16_t) (x2), (int16_t) (x1), (int16_t) (x0) }) -#define _mm_set_epi32(x0, x1, x2, x3) \ - vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x3), (int32_t) (x2), (int32_t) (x1), (int32_t) (x0) }) -#define _mm_set_epi64x(x0, x1) vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) }) -#define _mm_load_si128(x) vld1q_s8((const int8_t *) (x)) -#define _mm_loadu_si128(x) _mm_load_si128(x) -#define _mm_store_si128(to, x) vst1q_s8((int8_t *) (to), x) -#define _mm_storeu_si128(to, x) _mm_store_si128(to, x) - -#define _mm_or_si128(a, b) vorrq_s8(a, b) -#define _mm_xor_si128(a, b) veorq_s8(a, b) -#define _mm_and_si128(a, b) vandq_s8(a, b) - -#define _mm_slli_si128(x, imm) \ - ((imm) >= 16 ? vdupq_n_s8(0) : \ - vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), 16 - (imm)))) -#define _mm_srli_si128(x, imm) \ - ((imm) >= 16 ? vdupq_n_s8(0) : \ - vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), (imm)))) - -/** - * In practice, there is no problem, but a runtime error for signed integer overflow is triggered by UBSAN, - * so perform the calculation as unsigned. Since it is optimized at compile time, there are no unnecessary casts at runtime. - */ -#define _mm_add_epi8(a, b) vreinterpretq_s8_u8(vaddq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b))) - -#define _mm_cmpeq_epi8(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) -#define _mm_cmplt_epi8(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) -#define _mm_cmpgt_epi8(a, b) (vreinterpretq_s8_u8(vcgtq_s8(a, b))) - -static zend_always_inline int _mm_movemask_epi8(__m128i x) -{ - /** - * based on code from - * https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon - */ - uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(vreinterpretq_u8_s8(x), 7)); - uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); - uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); - uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); - return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); -} - -#endif - -#endif /* ZEND_SIMD_H */ +/******************************************************************************** + * MIT License + * Copyright (c) 2025 Saki Takamachi + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + *********************************************************************************/ + + + #ifndef XSSE_H + #define XSSE_H + + #define XSSE_VERSION 10000 + + #ifdef _MSC_VER + # define XSSE_FORCE_INLINE __forceinline + #elif defined(__GNUC__) || defined(__clang__) + # define XSSE_FORCE_INLINE inline __attribute__((always_inline)) + # define XSSE_HAS_MACRO_EXTENSION + #else + # define XSSE_FORCE_INLINE inline + #endif + + + #if defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) + #include + #define XSSE2 + + + #elif defined(__aarch64__) || defined(_M_ARM64) + #include + #define XSSE2 + + typedef int8x16_t __m128i; + + + /***************************************************************************** + * Load / Store * + *****************************************************************************/ + + #define _mm_set_epi8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15) \ + ((int8x16_t) { \ + (int8_t) (x15), (int8_t) (x14), (int8_t) (x13), (int8_t) (x12), \ + (int8_t) (x11), (int8_t) (x10), (int8_t) (x9), (int8_t) (x8), \ + (int8_t) (x7), (int8_t) (x6), (int8_t) (x5), (int8_t) (x4), \ + (int8_t) (x3), (int8_t) (x2), (int8_t) (x1), (int8_t) (x0) }) + #define _mm_set_epi16(x0, x1, x2, x3, x4, x5, x6, x7) \ + (vreinterpretq_s8_s16((int16x8_t) { \ + (int16_t) (x7), (int16_t) (x6), (int16_t) (x5), (int16_t) (x4), \ + (int16_t) (x3), (int16_t) (x2), (int16_t) (x1), (int16_t) (x0) })) + #define _mm_set_epi32(x0, x1, x2, x3) \ + (vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x3), (int32_t) (x2), (int32_t) (x1), (int32_t) (x0) })) + #define _mm_set_epi64x(x0, x1) (vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x1), (int64_t) (x0) })) + #define _mm_set1_epi8(x) (vdupq_n_s8((int8_t) (x))) + #define _mm_set1_epi16(x) (vreinterpretq_s8_s16(vdupq_n_s16((int16_t) (x)))) + #define _mm_set1_epi32(x) (vreinterpretq_s8_s32(vdupq_n_s32((int32_t) (x)))) + #define _mm_set1_epi64x(x) (vreinterpretq_s8_s64(vdupq_n_s64((int64_t) (x)))) + + #define _mm_setr_epi8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15) \ + ((int8x16_t) { \ + (int8_t) (x0), (int8_t) (x1), (int8_t) (x2), (int8_t) (x3), \ + (int8_t) (x4), (int8_t) (x5), (int8_t) (x6), (int8_t) (x7), \ + (int8_t) (x8), (int8_t) (x9), (int8_t) (x10), (int8_t) (x11), \ + (int8_t) (x12), (int8_t) (x13), (int8_t) (x14), (int8_t) (x15) }) + #define _mm_setr_epi16(x0, x1, x2, x3, x4, x5, x6, x7) \ + (vreinterpretq_s8_s16((int16x8_t) { \ + (int16_t) (x0), (int16_t) (x1), (int16_t) (x2), (int16_t) (x3), \ + (int16_t) (x4), (int16_t) (x5), (int16_t) (x6), (int16_t) (x7) })) + #define _mm_setr_epi32(x0, x1, x2, x3) \ + (vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x0), (int32_t) (x1), (int32_t) (x2), (int32_t) (x3) })) + + #define _mm_setzero_si128() (vdupq_n_s8(0)) + + #define _mm_load_si128(x) (vld1q_s8((const int8_t *) (x))) + #define _mm_loadu_si128(x) _mm_load_si128(x) + + #define _mm_store_si128(to, x) (vst1q_s8((int8_t *) (to), x)) + #define _mm_storeu_si128(to, x) _mm_store_si128(to, x) + #define _mm_stream_si128(to, x) _mm_store_si128(to, x) + #define _mm_stream_si32(to, x) (*(volatile int32_t *)(to) = (int32_t)(x)) + + + /***************************************************************************** + * Bit shift / Bit wise * + *****************************************************************************/ + + #define _mm_or_si128(a, b) (vorrq_s8((a), (b))) + #define _mm_xor_si128(a, b) (veorq_s8((a), (b))) + #define _mm_and_si128(a, b) (vandq_s8((a), (b))) + #define _mm_andnot_si128(a, b) (vbicq_s8((b), (a))) + + #define _mm_slli_epi16(x, count) (vreinterpretq_s8_u16(vshlq_n_u16(vreinterpretq_u16_s8(x), (count)))) + #define _mm_slli_epi32(x, count) (vreinterpretq_s8_u32(vshlq_n_u32(vreinterpretq_u32_s8(x), (count)))) + #define _mm_slli_epi64(x, count) (vreinterpretq_s8_u64(vshlq_n_u64(vreinterpretq_u64_s8(x), (count)))) + static XSSE_FORCE_INLINE __m128i _mm_sll_epi16(__m128i x, __m128i count) + { + uint16_t shift = (uint16_t) (vgetq_lane_s64(vreinterpretq_s64_s8(count), 0) & 0xFFFF); + return vreinterpretq_s8_u16( + vshlq_u16(vreinterpretq_u16_s8(x), vdupq_n_s16((int16_t) shift)) + ); + } + static XSSE_FORCE_INLINE __m128i _mm_sll_epi32(__m128i x, __m128i count) + { + uint32_t shift = (uint32_t) (vgetq_lane_s64(vreinterpretq_s64_s8(count), 0) & 0xFFFFFFFF); + return vreinterpretq_s8_u32( + vshlq_u32(vreinterpretq_u32_s8(x), vdupq_n_s32((int32_t) shift)) + ); + } + static XSSE_FORCE_INLINE __m128i _mm_sll_epi64(__m128i x, __m128i count) + { + uint64_t shift = (uint64_t) vgetq_lane_s64(vreinterpretq_s64_s8(count), 0); + return vreinterpretq_s8_u64( + vshlq_u64(vreinterpretq_u64_s8(x), vdupq_n_s64((int64_t) shift)) + ); + } + + #define _mm_slli_si128(x, imm) \ + ((imm) >= 16 ? vdupq_n_s8(0) : vreinterpretq_s8_u8(vextq_u8(vdupq_n_u8(0), vreinterpretq_u8_s8(x), 16 - (imm)))) + + #define _mm_srai_epi16(x, count) (vreinterpretq_s8_s16(vshrq_n_s16(vreinterpretq_s16_s8(x), (count)))) + #define _mm_srai_epi32(x, count) (vreinterpretq_s8_s32(vshrq_n_s32(vreinterpretq_s32_s8(x), (count)))) + static inline __m128i _mm_sra_epi16(__m128i x, __m128i count) + { + uint16_t shift = (uint16_t) (vgetq_lane_s64(vreinterpretq_s64_s8(count), 0) & 0xFFFF); + return vreinterpretq_s8_s16( + vshlq_s16(vreinterpretq_s16_s8(x), vdupq_n_s16(-(int16_t) shift)) + ); + } + static inline __m128i _mm_sra_epi32(__m128i x, __m128i count) + { + uint32_t shift = (uint32_t) (vgetq_lane_s64(vreinterpretq_s64_s8(count), 0) & 0xFFFFFFFF); + return vreinterpretq_s8_s32( + vshlq_s32(vreinterpretq_s32_s8(x), vdupq_n_s32(-(int32_t) shift)) + ); + } + + #define _mm_srli_epi16(x, count) (vreinterpretq_s8_u16(vshrq_n_u16(vreinterpretq_u16_s8(x), (count)))) + #define _mm_srli_epi32(x, count) (vreinterpretq_s8_u32(vshrq_n_u32(vreinterpretq_u32_s8(x), (count)))) + #define _mm_srli_epi64(x, count) (vreinterpretq_s8_u64(vshrq_n_u64(vreinterpretq_u64_s8(x), (count)))) + static XSSE_FORCE_INLINE __m128i _mm_srl_epi16(__m128i x, __m128i count) + { + uint16_t shift = (uint16_t) (vgetq_lane_s64(vreinterpretq_s64_s8(count), 0) & 0xFFFF); + return vreinterpretq_s8_u16( + vshlq_u16(vreinterpretq_u16_s8(x), vdupq_n_s16(-(int16_t) shift)) + ); + } + static XSSE_FORCE_INLINE __m128i _mm_srl_epi32(__m128i x, __m128i count) + { + uint32_t shift = (uint32_t) (vgetq_lane_s64(vreinterpretq_s64_s8(count), 0) & 0xFFFFFFFF); + return vreinterpretq_s8_u32( + vshlq_u32(vreinterpretq_u32_s8(x), vdupq_n_s32(-(int32_t) shift)) + ); + } + static XSSE_FORCE_INLINE __m128i _mm_srl_epi64(__m128i x, __m128i count) + { + uint64_t shift = (uint64_t) vgetq_lane_s64(vreinterpretq_s64_s8(count), 0); + return vreinterpretq_s8_u64( + vshlq_u64(vreinterpretq_u64_s8(x), vdupq_n_s64(-(int64_t) shift)) + ); + } + + #define _mm_srli_si128(x, imm) \ + ((imm) >= 16 ? vdupq_n_s8(0) : vreinterpretq_s8_u8(vextq_u8(vreinterpretq_u8_s8(x), vdupq_n_u8(0), (imm)))) + + + /***************************************************************************** + * Integer Arithmetic Operations * + *****************************************************************************/ + + /** + * In practice, there is no problem, but a runtime error for signed integer overflow is triggered by UBSAN, + * so perform the calculation as unsigned. Since it is optimized at compile time, there are no unnecessary casts at runtime. + */ + #define _mm_add_epi8(a, b) (vreinterpretq_s8_u8(vaddq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))) + #define _mm_add_epi16(a, b) (vreinterpretq_s8_u16(vaddq_u16(vreinterpretq_u16_s8(a), vreinterpretq_u16_s8(b)))) + #define _mm_add_epi32(a, b) (vreinterpretq_s8_u32(vaddq_u32(vreinterpretq_u32_s8(a), vreinterpretq_u32_s8(b)))) + #define _mm_add_epi64(a, b) (vreinterpretq_s8_u64(vaddq_u64(vreinterpretq_u64_s8(a), vreinterpretq_u64_s8(b)))) + + #define _mm_adds_epi8(a, b) (vqaddq_s8((a), (b))) + #define _mm_adds_epi16(a, b) (vreinterpretq_s8_s16(vqaddq_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_adds_epu8(a, b) (vreinterpretq_s8_u8(vqaddq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))) + #define _mm_adds_epu16(a, b) (vreinterpretq_s8_u16(vqaddq_u16(vreinterpretq_u16_s8(a), vreinterpretq_u16_s8(b)))) + + #define _mm_avg_epu8(a, b) (vreinterpretq_s8_u8(vrhaddq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))) + #define _mm_avg_epu16(a, b) (vreinterpretq_s8_u16(vrhaddq_u16(vreinterpretq_u16_s8(a), vreinterpretq_u16_s8(b)))) + + static XSSE_FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b) + { + int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_s8(a)), vget_low_s16(vreinterpretq_s16_s8(b))); + int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_s8(a)), vget_high_s16(vreinterpretq_s16_s8(b))); + + return vreinterpretq_s8_s32(vcombine_s32( + vpadd_s32(vget_low_s32(mul_lo), vget_high_s32(mul_lo)), + vpadd_s32(vget_low_s32(mul_hi), vget_high_s32(mul_hi)) + )); + } + + #define _mm_max_epu8(a, b) (vreinterpretq_s8_u8(vmaxq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))) + #define _mm_max_epi16(a, b) (vreinterpretq_s8_s16(vmaxq_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_min_epu8(a, b) (vreinterpretq_s8_u8(vminq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))) + #define _mm_min_epi16(a, b) (vreinterpretq_s8_s16(vminq_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + + static XSSE_FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b) + { + int32x4_t lo = vmull_s16(vget_low_s16(vreinterpretq_s16_s8(a)), vget_low_s16(vreinterpretq_s16_s8(b))); + int32x4_t hi = vmull_s16(vget_high_s16(vreinterpretq_s16_s8(a)), vget_high_s16(vreinterpretq_s16_s8(b))); + return vreinterpretq_s8_s16(vcombine_s16(vshrn_n_s32(lo, 16), vshrn_n_s32(hi, 16))); + } + static XSSE_FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b) + { + uint32x4_t lo = vmull_u16(vget_low_u16(vreinterpretq_u16_s8(a)), vget_low_u16(vreinterpretq_u16_s8(b))); + uint32x4_t hi = vmull_u16(vget_high_u16(vreinterpretq_u16_s8(a)), vget_high_u16(vreinterpretq_u16_s8(b))); + return vreinterpretq_s8_u16(vcombine_u16(vshrn_n_u32(lo, 16), vshrn_n_u32(hi, 16))); + } + static XSSE_FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b) + { + int32x4_t lo = vmull_s16(vget_low_s16(vreinterpretq_s16_s8(a)), vget_low_s16(vreinterpretq_s16_s8(b))); + int32x4_t hi = vmull_s16(vget_high_s16(vreinterpretq_s16_s8(a)), vget_high_s16(vreinterpretq_s16_s8(b))); + return vreinterpretq_s8_s16(vcombine_s16(vmovn_s32(lo), vmovn_s32(hi))); + } + static XSSE_FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b) + { + uint32x4_t evens = vuzpq_u32(vreinterpretq_u32_s8(a), vreinterpretq_u32_s8(b)).val[0]; + return vreinterpretq_s8_u64(vmull_u32(vget_low_u32(evens), vget_high_u32(evens))); + } + static XSSE_FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b) + { + uint16x8_t abs_diffs_16 = vpaddlq_u8(vabdq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b))); + uint32x4_t abs_diffs_32 = vpaddlq_u16(abs_diffs_16); + uint64x2_t abs_diffs_64 = vpaddlq_u32(abs_diffs_32); + + return vreinterpretq_s8_u16((uint16x8_t) { + (int16_t) vgetq_lane_u64(abs_diffs_64, 0), 0, 0, 0, + (int16_t) vgetq_lane_u64(abs_diffs_64, 1), 0, 0, 0 + }); + } + + #define _mm_sub_epi8(a, b) (vreinterpretq_s8_u8(vsubq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))) + #define _mm_sub_epi16(a, b) (vreinterpretq_s8_u16(vsubq_u16(vreinterpretq_u16_s8(a), vreinterpretq_u16_s8(b)))) + #define _mm_sub_epi32(a, b) (vreinterpretq_s8_u32(vsubq_u32(vreinterpretq_u32_s8(a), vreinterpretq_u32_s8(b)))) + #define _mm_sub_epi64(a, b) (vreinterpretq_s8_u64(vsubq_u64(vreinterpretq_u64_s8(a), vreinterpretq_u64_s8(b)))) + + #define _mm_subs_epi8(a, b) (vqsubq_s8((a), (b))) + #define _mm_subs_epi16(a, b) (vreinterpretq_s8_s16(vqsubq_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_subs_epu8(a, b) (vreinterpretq_s8_u8(vqsubq_u8(vreinterpretq_u8_s8(a), vreinterpretq_u8_s8(b)))) + #define _mm_subs_epu16(a, b) (vreinterpretq_s8_u16(vqsubq_u16(vreinterpretq_u16_s8(a), vreinterpretq_u16_s8(b)))) + + + /***************************************************************************** + * Comparison * + *****************************************************************************/ + + #define _mm_cmpeq_epi8(a, b) (vreinterpretq_s8_u8(vceqq_s8((a), (b)))) + #define _mm_cmpeq_epi16(a, b) (vreinterpretq_s8_u16(vceqq_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_cmpeq_epi32(a, b) (vreinterpretq_s8_u32(vceqq_s32(vreinterpretq_s32_s8(a), vreinterpretq_s32_s8(b)))) + + #define _mm_cmplt_epi8(a, b) (vreinterpretq_s8_u8(vcltq_s8((a), (b)))) + #define _mm_cmplt_epi16(a, b) (vreinterpretq_s8_u16(vcltq_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_cmplt_epi32(a, b) (vreinterpretq_s8_u32(vcltq_s32(vreinterpretq_s32_s8(a), vreinterpretq_s32_s8(b)))) + + #define _mm_cmpgt_epi8(a, b) (vreinterpretq_s8_u8(vcgtq_s8((a), (b)))) + #define _mm_cmpgt_epi16(a, b) (vreinterpretq_s8_u16(vcgtq_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_cmpgt_epi32(a, b) (vreinterpretq_s8_u32(vcgtq_s32(vreinterpretq_s32_s8(a), vreinterpretq_s32_s8(b)))) + + + /***************************************************************************** + * Convert * + *****************************************************************************/ + + #define _mm_cvtsi32_si128(x) (vreinterpretq_s8_s32((int32x4_t) { (int32_t) (x), 0, 0, 0 })) + #define _mm_cvtsi64_si128(x) (vreinterpretq_s8_s64((int64x2_t) { (int64_t) (x), 0 })) + #define _mm_cvtsi128_si32(x) (vgetq_lane_s32(vreinterpretq_s32_s8(x), 0)) + #define _mm_cvtsi128_si64(x) (vgetq_lane_s64(vreinterpretq_s64_s8(x), 0)) + + + /***************************************************************************** + * Others * + *****************************************************************************/ + + #define _mm_packs_epi16(a, b) (vcombine_s8(vqmovn_s16(vreinterpretq_s16_s8(a)), vqmovn_s16(vreinterpretq_s16_s8(b)))) + #define _mm_packs_epi32(a, b) \ + (vreinterpretq_s8_s16(vcombine_s16(vqmovn_s32(vreinterpretq_s32_s8(a)), vqmovn_s32(vreinterpretq_s32_s8(b))))) + #define _mm_packus_epi16(a, b) \ + (vreinterpretq_s8_u8(vcombine_u8(vqmovun_s16(vreinterpretq_s16_s8(a)), vqmovun_s16(vreinterpretq_s16_s8(b))))) + + #define _mm_extract_epi16(x, imm) (vgetq_lane_s16(vreinterpretq_s16_s8(x), (imm))) + #define _mm_insert_epi16(x, val, imm) (vreinterpretq_s8_s16(vsetq_lane_s16((int16_t) (val), vreinterpretq_s16_s8(x), (imm)))) + + static XSSE_FORCE_INLINE int _mm_movemask_epi8(__m128i x) + { + /** + * based on code from + * https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon + */ + uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(vreinterpretq_u8_s8(x), 7)); + uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); + uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); + uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); + return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); + } + + #define _MM_SHUFFLE(a, b, c, d) (((a) << 6) | ((b) << 4) | ((c) << 2) | (d)) + #ifdef XSSE_HAS_MACRO_EXTENSION + #define _mm_shuffle_epi32(x, imm) __extension__({ \ + int32x4_t __xsse_tmp = vreinterpretq_s32_s8(x); \ + vreinterpretq_s8_s32((int32x4_t) { \ + (int32_t) vgetq_lane_s32(__xsse_tmp, ((imm) >> 0) & 0x3), \ + (int32_t) vgetq_lane_s32(__xsse_tmp, ((imm) >> 2) & 0x3), \ + (int32_t) vgetq_lane_s32(__xsse_tmp, ((imm) >> 4) & 0x3), \ + (int32_t) vgetq_lane_s32(__xsse_tmp, ((imm) >> 6) & 0x3) \ + }); \ + }) + #define _mm_shufflehi_epi16(x, imm) __extension__({ \ + int16x8_t __xsse_tmp = vreinterpretq_s16_s8(x); \ + vreinterpretq_s8_s16(vcombine_s16( \ + vget_low_s16(__xsse_tmp), \ + (int16x4_t) { \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 0) & 0x3) + 4), \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 2) & 0x3) + 4), \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 4) & 0x3) + 4), \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 6) & 0x3) + 4) \ + } \ + )); \ + }) + #define _mm_shufflelo_epi16(x, imm) __extension__({ \ + int16x8_t __xsse_tmp = vreinterpretq_s16_s8(x); \ + vreinterpretq_s8_s16(vcombine_s16( \ + (int16x4_t) { \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 0) & 0x3)), \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 2) & 0x3)), \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 4) & 0x3)), \ + (int16_t) vgetq_lane_s16(__xsse_tmp, (((imm) >> 6) & 0x3)) \ + }, \ + vget_high_s16(__xsse_tmp) \ + )); \ + }) + #else + static XSSE_FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i x, int imm) + { + int32x4_t vec = vreinterpretq_s32_s8(x); + int32_t arr[4]; + vst1q_s32(arr, vec); + + return vreinterpretq_s8_s32((int32x4_t) { + arr[(imm >> 0) & 0x3], + arr[(imm >> 2) & 0x3], + arr[(imm >> 4) & 0x3], + arr[(imm >> 6) & 0x3] + }); + } + static XSSE_FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i x, int imm) + { + int16x8_t vec = vreinterpretq_s16_s8(x); + int16_t arr[8]; + vst1q_s16(arr, vec); + + return vreinterpretq_s8_s16((int16x8_t) { + arr[0], arr[1], arr[2], arr[3], + arr[((imm >> 0) & 0x3) + 4], + arr[((imm >> 2) & 0x3) + 4], + arr[((imm >> 4) & 0x3) + 4], + arr[((imm >> 6) & 0x3) + 4] + }); + } + static XSSE_FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i x, int imm) + { + int16x8_t vec = vreinterpretq_s16_s8(x); + int16_t arr[8]; + vst1q_s16(arr, vec); + + return vreinterpretq_s8_s16((int16x8_t) { + arr[((imm >> 0) & 0x3)], + arr[((imm >> 2) & 0x3)], + arr[((imm >> 4) & 0x3)], + arr[((imm >> 6) & 0x3)], + arr[4], arr[5], arr[6], arr[7] + }); + } + #endif + + #define _mm_unpackhi_epi8(a, b) (vzip2q_s8((a), (b))) + #define _mm_unpackhi_epi16(a, b) (vreinterpretq_s8_s16(vzip2q_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_unpackhi_epi32(a, b) (vreinterpretq_s8_s32(vzip2q_s32(vreinterpretq_s32_s8(a), vreinterpretq_s32_s8(b)))) + #define _mm_unpackhi_epi64(a, b) (vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(a), vreinterpretq_s64_s8(b)))) + + #define _mm_unpacklo_epi8(a, b) (vzip1q_s8((a), (b))) + #define _mm_unpacklo_epi16(a, b) (vreinterpretq_s8_s16(vzip1q_s16(vreinterpretq_s16_s8(a), vreinterpretq_s16_s8(b)))) + #define _mm_unpacklo_epi32(a, b) (vreinterpretq_s8_s32(vzip1q_s32(vreinterpretq_s32_s8(a), vreinterpretq_s32_s8(b)))) + #define _mm_unpacklo_epi64(a, b) (vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(a), vreinterpretq_s64_s8(b)))) + + #define _mm_move_epi64(x) (vreinterpretq_s8_s64((int64x2_t) { vgetq_lane_s64(vreinterpretq_s64_s8(x), 0), 0 })) + + #endif + + #endif /* XSSE_H */ diff --git a/ext/opcache/ZendAccelerator.c b/ext/opcache/ZendAccelerator.c index bffb72fb59c81..eb75bc0b74736 100644 --- a/ext/opcache/ZendAccelerator.c +++ b/ext/opcache/ZendAccelerator.c @@ -173,7 +173,7 @@ static void bzero_aligned(void *mem, size_t size) _mm256_store_si256((__m256i*)(p+32), ymm0); p += 64; } -#elif defined(ZEND_HAVE_VECTOR_128) +#elif defined(XSSE2) char *p = (char*)mem; char *end = p + size; __m128i xmm0 = _mm_setzero_si128(); diff --git a/ext/standard/string.c b/ext/standard/string.c index 7a282442659e4..f21c9be8a7bd2 100644 --- a/ext/standard/string.c +++ b/ext/standard/string.c @@ -2818,7 +2818,7 @@ static zend_string *php_strtr_ex(zend_string *str, const char *str_from, const c char *input = ZSTR_VAL(str); size_t len = ZSTR_LEN(str); -#ifdef ZEND_HAVE_VECTOR_128 +#ifdef XSSE2 if (ZSTR_LEN(str) >= sizeof(__m128i)) { __m128i search = _mm_set1_epi8(ch_from); __m128i delta = _mm_set1_epi8(ch_to - ch_from); @@ -3038,7 +3038,7 @@ static zend_always_inline zend_long count_chars(const char *p, zend_long length, zend_long count = 0; const char *endp; -#ifdef ZEND_HAVE_VECTOR_128 +#ifdef XSSE2 if (length >= sizeof(__m128i)) { __m128i search = _mm_set1_epi8(ch); @@ -5836,7 +5836,7 @@ static zend_string *php_str_rot13(zend_string *str) e = p + ZSTR_LEN(str); target = ZSTR_VAL(ret); -#ifdef ZEND_HAVE_VECTOR_128 +#ifdef XSSE2 if (e - p > 15) { const __m128i a_minus_1 = _mm_set1_epi8('a' - 1); const __m128i m_plus_1 = _mm_set1_epi8('m' + 1); diff --git a/ext/standard/url.c b/ext/standard/url.c index e5d8a58966db9..3c79fd2250021 100644 --- a/ext/standard/url.c +++ b/ext/standard/url.c @@ -457,7 +457,7 @@ static zend_always_inline zend_string *php_url_encode_impl(const char *s, size_t start = zend_string_safe_alloc(3, len, 0, 0); to = (unsigned char*)ZSTR_VAL(start); -#ifdef ZEND_HAVE_VECTOR_128 +#ifdef XSSE2 while (from + 16 < end) { __m128i mask; uint32_t bits;