Skip to content

Commit 389bf9a

Browse files
committed
Add Miri tests for f16/f128 SIMD operations
1 parent 7c594eb commit 389bf9a

File tree

1 file changed

+258
-2
lines changed

1 file changed

+258
-2
lines changed

src/tools/miri/tests/pass/intrinsics/portable-simd.rs

Lines changed: 258 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,143 @@
66
rustc_attrs,
77
intrinsics,
88
core_intrinsics,
9-
repr_simd
9+
repr_simd,
10+
f16,
11+
f128
1012
)]
11-
#![allow(incomplete_features, internal_features)]
13+
#![allow(incomplete_features, internal_features, non_camel_case_types)]
14+
use std::fmt::{self, Debug, Formatter};
1215
use std::intrinsics::simd as intrinsics;
1316
use std::ptr;
1417
use std::simd::StdFloat;
1518
use std::simd::prelude::*;
1619

20+
#[repr(simd, packed)]
21+
#[derive(Copy)]
22+
struct PackedSimd<T, const N: usize>([T; N]);
23+
24+
impl<T: Copy, const N: usize> Clone for PackedSimd<T, N> {
25+
fn clone(&self) -> Self {
26+
*self
27+
}
28+
}
29+
30+
impl<T: PartialEq + Copy, const N: usize> PartialEq for PackedSimd<T, N> {
31+
fn eq(&self, other: &Self) -> bool {
32+
self.into_array() == other.into_array()
33+
}
34+
}
35+
36+
impl<T: Debug + Copy, const N: usize> Debug for PackedSimd<T, N> {
37+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
38+
Debug::fmt(&self.into_array(), f)
39+
}
40+
}
41+
42+
type f16x2 = PackedSimd<f16, 2>;
43+
type f16x4 = PackedSimd<f16, 4>;
44+
45+
type f128x2 = PackedSimd<f128, 2>;
46+
type f128x4 = PackedSimd<f128, 4>;
47+
48+
impl<T: Copy, const N: usize> PackedSimd<T, N> {
49+
fn splat(x: T) -> Self {
50+
Self([x; N])
51+
}
52+
fn from_array(a: [T; N]) -> Self {
53+
Self(a)
54+
}
55+
fn into_array(self) -> [T; N] {
56+
// as we have `repr(packed)`, there shouldn't be any padding bytes
57+
unsafe { std::mem::transmute_copy(&self) }
58+
}
59+
}
60+
1761
#[rustc_intrinsic]
1862
#[rustc_nounwind]
1963
pub unsafe fn simd_shuffle_const_generic<T, U, const IDX: &'static [u32]>(x: T, y: T) -> U;
2064

65+
pub fn simd_ops_f16() {
66+
use intrinsics::*;
67+
68+
// small hack to make type inference better
69+
macro_rules! assert_eq {
70+
($a:expr, $b:expr $(,$t:tt)*) => {
71+
::std::assert_eq!($b, $a $(,$t)*)
72+
}
73+
}
74+
75+
let a = f16x4::splat(10.0);
76+
let b = f16x4::from_array([1.0, 2.0, 3.0, -4.0]);
77+
78+
unsafe {
79+
assert_eq!(simd_neg(b), f16x4::from_array([-1.0, -2.0, -3.0, 4.0]));
80+
assert_eq!(simd_add(a, b), f16x4::from_array([11.0, 12.0, 13.0, 6.0]));
81+
assert_eq!(simd_sub(a, b), f16x4::from_array([9.0, 8.0, 7.0, 14.0]));
82+
assert_eq!(simd_mul(a, b), f16x4::from_array([10.0, 20.0, 30.0, -40.0]));
83+
assert_eq!(simd_div(b, a), f16x4::from_array([0.1, 0.2, 0.3, -0.4]));
84+
assert_eq!(simd_div(a, f16x4::splat(2.0)), f16x4::splat(5.0));
85+
assert_eq!(simd_rem(a, b), f16x4::from_array([0.0, 0.0, 1.0, 2.0]));
86+
assert_eq!(simd_fabs(b), f16x4::from_array([1.0, 2.0, 3.0, 4.0]));
87+
assert_eq!(
88+
simd_fmax(a, simd_mul(b, f16x4::splat(4.0))),
89+
f16x4::from_array([10.0, 10.0, 12.0, 10.0])
90+
);
91+
assert_eq!(
92+
simd_fmin(a, simd_mul(b, f16x4::splat(4.0))),
93+
f16x4::from_array([4.0, 8.0, 10.0, -16.0])
94+
);
95+
96+
assert_eq!(simd_fma(a, b, a), simd_add(simd_mul(a, b), a));
97+
assert_eq!(simd_fma(b, b, a), simd_add(simd_mul(b, b), a));
98+
assert_eq!(simd_fma(a, b, b), simd_add(simd_mul(a, b), b));
99+
assert_eq!(
100+
simd_fma(f16x4::splat(-3.2), b, f16x4::splat(f16::NEG_INFINITY)),
101+
f16x4::splat(f16::NEG_INFINITY)
102+
);
103+
104+
assert_eq!(simd_relaxed_fma(a, b, a), simd_add(simd_mul(a, b), a));
105+
assert_eq!(simd_relaxed_fma(b, b, a), simd_add(simd_mul(b, b), a));
106+
assert_eq!(simd_relaxed_fma(a, b, b), simd_add(simd_mul(a, b), b));
107+
assert_eq!(
108+
simd_relaxed_fma(f16x4::splat(-3.2), b, f16x4::splat(f16::NEG_INFINITY)),
109+
f16x4::splat(f16::NEG_INFINITY)
110+
);
111+
112+
assert_eq!(simd_fsqrt(simd_mul(a, a)), a);
113+
assert_eq!(simd_fsqrt(simd_mul(b, b)), simd_fabs(b));
114+
115+
assert_eq!(simd_eq(a, simd_mul(f16x4::splat(5.0), b)), i32x4::from_array([0, !0, 0, 0]));
116+
assert_eq!(simd_ne(a, simd_mul(f16x4::splat(5.0), b)), i32x4::from_array([!0, 0, !0, !0]));
117+
assert_eq!(simd_le(a, simd_mul(f16x4::splat(5.0), b)), i32x4::from_array([0, !0, !0, 0]));
118+
assert_eq!(simd_lt(a, simd_mul(f16x4::splat(5.0), b)), i32x4::from_array([0, 0, !0, 0]));
119+
assert_eq!(simd_ge(a, simd_mul(f16x4::splat(5.0), b)), i32x4::from_array([!0, !0, 0, !0]));
120+
assert_eq!(simd_gt(a, simd_mul(f16x4::splat(5.0), b)), i32x4::from_array([!0, 0, 0, !0]));
121+
122+
assert_eq!(simd_reduce_add_ordered(a, 0.0), 40.0f16);
123+
assert_eq!(simd_reduce_add_ordered(b, 0.0), 2.0f16);
124+
assert_eq!(simd_reduce_mul_ordered(a, 1.0), 10000.0f16);
125+
assert_eq!(simd_reduce_mul_ordered(b, 1.0), -24.0f16);
126+
assert_eq!(simd_reduce_max(a), 10.0f16);
127+
assert_eq!(simd_reduce_max(b), 3.0f16);
128+
assert_eq!(simd_reduce_min(a), 10.0f16);
129+
assert_eq!(simd_reduce_min(b), -4.0f16);
130+
131+
assert_eq!(
132+
simd_fmax(f16x2::from_array([0.0, f16::NAN]), f16x2::from_array([f16::NAN, 0.0])),
133+
f16x2::from_array([0.0, 0.0])
134+
);
135+
assert_eq!(simd_reduce_max(f16x2::from_array([0.0, f16::NAN])), 0.0f16);
136+
assert_eq!(simd_reduce_max(f16x2::from_array([f16::NAN, 0.0])), 0.0f16);
137+
assert_eq!(
138+
simd_fmin(f16x2::from_array([0.0, f16::NAN]), f16x2::from_array([f16::NAN, 0.0])),
139+
f16x2::from_array([0.0, 0.0])
140+
);
141+
assert_eq!(simd_reduce_min(f16x2::from_array([0.0, f16::NAN])), 0.0f16);
142+
assert_eq!(simd_reduce_min(f16x2::from_array([f16::NAN, 0.0])), 0.0f16);
143+
}
144+
}
145+
21146
fn simd_ops_f32() {
22147
let a = f32x4::splat(10.0);
23148
let b = f32x4::from_array([1.0, 2.0, 3.0, -4.0]);
@@ -148,6 +273,87 @@ fn simd_ops_f64() {
148273
assert_eq!(f64x2::from_array([f64::NAN, 0.0]).reduce_min(), 0.0);
149274
}
150275

276+
pub fn simd_ops_f128() {
277+
use intrinsics::*;
278+
279+
// small hack to make type inference better
280+
macro_rules! assert_eq {
281+
($a:expr, $b:expr $(,$t:tt)*) => {
282+
::std::assert_eq!($b, $a $(,$t)*)
283+
}
284+
}
285+
286+
let a = f128x4::splat(10.0);
287+
let b = f128x4::from_array([1.0, 2.0, 3.0, -4.0]);
288+
289+
unsafe {
290+
assert_eq!(simd_neg(b), f128x4::from_array([-1.0, -2.0, -3.0, 4.0]));
291+
assert_eq!(simd_add(a, b), f128x4::from_array([11.0, 12.0, 13.0, 6.0]));
292+
assert_eq!(simd_sub(a, b), f128x4::from_array([9.0, 8.0, 7.0, 14.0]));
293+
assert_eq!(simd_mul(a, b), f128x4::from_array([10.0, 20.0, 30.0, -40.0]));
294+
assert_eq!(simd_div(b, a), f128x4::from_array([0.1, 0.2, 0.3, -0.4]));
295+
assert_eq!(simd_div(a, f128x4::splat(2.0)), f128x4::splat(5.0));
296+
assert_eq!(simd_rem(a, b), f128x4::from_array([0.0, 0.0, 1.0, 2.0]));
297+
assert_eq!(simd_fabs(b), f128x4::from_array([1.0, 2.0, 3.0, 4.0]));
298+
assert_eq!(
299+
simd_fmax(a, simd_mul(b, f128x4::splat(4.0))),
300+
f128x4::from_array([10.0, 10.0, 12.0, 10.0])
301+
);
302+
assert_eq!(
303+
simd_fmin(a, simd_mul(b, f128x4::splat(4.0))),
304+
f128x4::from_array([4.0, 8.0, 10.0, -16.0])
305+
);
306+
307+
assert_eq!(simd_fma(a, b, a), simd_add(simd_mul(a, b), a));
308+
assert_eq!(simd_fma(b, b, a), simd_add(simd_mul(b, b), a));
309+
assert_eq!(simd_fma(a, b, b), simd_add(simd_mul(a, b), b));
310+
assert_eq!(
311+
simd_fma(f128x4::splat(-3.2), b, f128x4::splat(f128::NEG_INFINITY)),
312+
f128x4::splat(f128::NEG_INFINITY)
313+
);
314+
315+
assert_eq!(simd_relaxed_fma(a, b, a), simd_add(simd_mul(a, b), a));
316+
assert_eq!(simd_relaxed_fma(b, b, a), simd_add(simd_mul(b, b), a));
317+
assert_eq!(simd_relaxed_fma(a, b, b), simd_add(simd_mul(a, b), b));
318+
assert_eq!(
319+
simd_relaxed_fma(f128x4::splat(-3.2), b, f128x4::splat(f128::NEG_INFINITY)),
320+
f128x4::splat(f128::NEG_INFINITY)
321+
);
322+
323+
assert_eq!(simd_fsqrt(simd_mul(a, a)), a);
324+
assert_eq!(simd_fsqrt(simd_mul(b, b)), simd_fabs(b));
325+
326+
assert_eq!(simd_eq(a, simd_mul(f128x4::splat(5.0), b)), i32x4::from_array([0, !0, 0, 0]));
327+
assert_eq!(simd_ne(a, simd_mul(f128x4::splat(5.0), b)), i32x4::from_array([!0, 0, !0, !0]));
328+
assert_eq!(simd_le(a, simd_mul(f128x4::splat(5.0), b)), i32x4::from_array([0, !0, !0, 0]));
329+
assert_eq!(simd_lt(a, simd_mul(f128x4::splat(5.0), b)), i32x4::from_array([0, 0, !0, 0]));
330+
assert_eq!(simd_ge(a, simd_mul(f128x4::splat(5.0), b)), i32x4::from_array([!0, !0, 0, !0]));
331+
assert_eq!(simd_gt(a, simd_mul(f128x4::splat(5.0), b)), i32x4::from_array([!0, 0, 0, !0]));
332+
333+
assert_eq!(simd_reduce_add_ordered(a, 0.0), 40.0f128);
334+
assert_eq!(simd_reduce_add_ordered(b, 0.0), 2.0f128);
335+
assert_eq!(simd_reduce_mul_ordered(a, 1.0), 10000.0f128);
336+
assert_eq!(simd_reduce_mul_ordered(b, 1.0), -24.0f128);
337+
assert_eq!(simd_reduce_max(a), 10.0f128);
338+
assert_eq!(simd_reduce_max(b), 3.0f128);
339+
assert_eq!(simd_reduce_min(a), 10.0f128);
340+
assert_eq!(simd_reduce_min(b), -4.0f128);
341+
342+
assert_eq!(
343+
simd_fmax(f128x2::from_array([0.0, f128::NAN]), f128x2::from_array([f128::NAN, 0.0])),
344+
f128x2::from_array([0.0, 0.0])
345+
);
346+
assert_eq!(simd_reduce_max(f128x2::from_array([0.0, f128::NAN])), 0.0f128);
347+
assert_eq!(simd_reduce_max(f128x2::from_array([f128::NAN, 0.0])), 0.0f128);
348+
assert_eq!(
349+
simd_fmin(f128x2::from_array([0.0, f128::NAN]), f128x2::from_array([f128::NAN, 0.0])),
350+
f128x2::from_array([0.0, 0.0])
351+
);
352+
assert_eq!(simd_reduce_min(f128x2::from_array([0.0, f128::NAN])), 0.0f128);
353+
assert_eq!(simd_reduce_min(f128x2::from_array([f128::NAN, 0.0])), 0.0f128);
354+
}
355+
}
356+
151357
fn simd_ops_i32() {
152358
let a = i32x4::splat(10);
153359
let b = i32x4::from_array([1, 2, 3, -4]);
@@ -563,6 +769,31 @@ fn simd_gather_scatter() {
563769
}
564770

565771
fn simd_round() {
772+
unsafe {
773+
use intrinsics::*;
774+
775+
assert_eq!(
776+
simd_ceil(f16x4::from_array([0.9, 1.001, 2.0, -4.5])),
777+
f16x4::from_array([1.0, 2.0, 2.0, -4.0])
778+
);
779+
assert_eq!(
780+
simd_floor(f16x4::from_array([0.9, 1.001, 2.0, -4.5])),
781+
f16x4::from_array([0.0, 1.0, 2.0, -5.0])
782+
);
783+
assert_eq!(
784+
simd_round(f16x4::from_array([0.9, 1.001, 2.0, -4.5])),
785+
f16x4::from_array([1.0, 1.0, 2.0, -5.0])
786+
);
787+
assert_eq!(
788+
simd_round_ties_even(f16x4::from_array([0.9, 1.001, 2.0, -4.5])),
789+
f16x4::from_array([1.0, 1.0, 2.0, -4.0])
790+
);
791+
assert_eq!(
792+
simd_trunc(f16x4::from_array([0.9, 1.001, 2.0, -4.5])),
793+
f16x4::from_array([0.0, 1.0, 2.0, -4.0])
794+
);
795+
}
796+
566797
assert_eq!(
567798
f32x4::from_array([0.9, 1.001, 2.0, -4.5]).ceil(),
568799
f32x4::from_array([1.0, 2.0, 2.0, -4.0])
@@ -604,6 +835,31 @@ fn simd_round() {
604835
f64x4::from_array([0.9, 1.001, 2.0, -4.5]).trunc(),
605836
f64x4::from_array([0.0, 1.0, 2.0, -4.0])
606837
);
838+
839+
unsafe {
840+
use intrinsics::*;
841+
842+
assert_eq!(
843+
simd_ceil(f128x4::from_array([0.9, 1.001, 2.0, -4.5])),
844+
f128x4::from_array([1.0, 2.0, 2.0, -4.0])
845+
);
846+
assert_eq!(
847+
simd_floor(f128x4::from_array([0.9, 1.001, 2.0, -4.5])),
848+
f128x4::from_array([0.0, 1.0, 2.0, -5.0])
849+
);
850+
assert_eq!(
851+
simd_round(f128x4::from_array([0.9, 1.001, 2.0, -4.5])),
852+
f128x4::from_array([1.0, 1.0, 2.0, -5.0])
853+
);
854+
assert_eq!(
855+
simd_round_ties_even(f128x4::from_array([0.9, 1.001, 2.0, -4.5])),
856+
f128x4::from_array([1.0, 1.0, 2.0, -4.0])
857+
);
858+
assert_eq!(
859+
simd_trunc(f128x4::from_array([0.9, 1.001, 2.0, -4.5])),
860+
f128x4::from_array([0.0, 1.0, 2.0, -4.0])
861+
);
862+
}
607863
}
608864

609865
fn simd_intrinsics() {

0 commit comments

Comments
 (0)