From ea8f414d2eb56455ea02d12508c337e5e8524655 Mon Sep 17 00:00:00 2001 From: nomaterials Date: Thu, 26 Jun 2025 11:31:57 +0200 Subject: [PATCH 1/2] implement wasm boolean ops --- fearless_simd/src/generated/wasm.rs | 108 ++++++++++------------------ fearless_simd_gen/src/mk_wasm.rs | 22 ------ fearless_simd_tests/tests/wasm.rs | 39 ++++++++++ 3 files changed, 75 insertions(+), 94 deletions(-) diff --git a/fearless_simd/src/generated/wasm.rs b/fearless_simd/src/generated/wasm.rs index b123f647..6374f364 100644 --- a/fearless_simd/src/generated/wasm.rs +++ b/fearless_simd/src/generated/wasm.rs @@ -160,8 +160,7 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_i8x16(self, a: i8x16) -> i8x16 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn add_i8x16(self, a: i8x16, b: i8x16) -> i8x16 { @@ -180,18 +179,15 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn and_i8x16(self, a: i8x16, b: i8x16) -> i8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_i8x16(self, a: i8x16, b: i8x16) -> i8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_i8x16(self, a: i8x16, b: i8x16) -> i8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn shr_i8x16(self, a: i8x16, shift: u32) -> i8x16 { @@ -254,8 +250,7 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_u8x16(self, a: u8x16) -> u8x16 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn add_u8x16(self, a: u8x16, b: u8x16) -> u8x16 { @@ -274,18 +269,15 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn and_u8x16(self, a: u8x16, b: u8x16) -> u8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_u8x16(self, a: u8x16, b: u8x16) -> u8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_u8x16(self, a: u8x16, b: u8x16) -> u8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn shr_u8x16(self, a: u8x16, shift: u32) -> u8x16 { @@ -348,23 +340,19 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_mask8x16(self, a: mask8x16) -> mask8x16 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn and_mask8x16(self, a: mask8x16, b: mask8x16) -> mask8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_mask8x16(self, a: mask8x16, b: mask8x16) -> mask8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_mask8x16(self, a: mask8x16, b: mask8x16) -> mask8x16 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn select_mask8x16( @@ -392,8 +380,7 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_i16x8(self, a: i16x8) -> i16x8 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn add_i16x8(self, a: i16x8, b: i16x8) -> i16x8 { @@ -409,18 +396,15 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn and_i16x8(self, a: i16x8, b: i16x8) -> i16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_i16x8(self, a: i16x8, b: i16x8) -> i16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_i16x8(self, a: i16x8, b: i16x8) -> i16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn shr_i16x8(self, a: i16x8, shift: u32) -> i16x8 { @@ -483,8 +467,7 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_u16x8(self, a: u16x8) -> u16x8 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn add_u16x8(self, a: u16x8, b: u16x8) -> u16x8 { @@ -500,18 +483,15 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn and_u16x8(self, a: u16x8, b: u16x8) -> u16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_u16x8(self, a: u16x8, b: u16x8) -> u16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_u16x8(self, a: u16x8, b: u16x8) -> u16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn shr_u16x8(self, a: u16x8, shift: u32) -> u16x8 { @@ -574,23 +554,19 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_mask16x8(self, a: mask16x8) -> mask16x8 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn and_mask16x8(self, a: mask16x8, b: mask16x8) -> mask16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_mask16x8(self, a: mask16x8, b: mask16x8) -> mask16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_mask16x8(self, a: mask16x8, b: mask16x8) -> mask16x8 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn select_mask16x8( @@ -618,8 +594,7 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_i32x4(self, a: i32x4) -> i32x4 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn add_i32x4(self, a: i32x4, b: i32x4) -> i32x4 { @@ -635,18 +610,15 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn and_i32x4(self, a: i32x4, b: i32x4) -> i32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_i32x4(self, a: i32x4, b: i32x4) -> i32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_i32x4(self, a: i32x4, b: i32x4) -> i32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn shr_i32x4(self, a: i32x4, shift: u32) -> i32x4 { @@ -709,8 +681,7 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_u32x4(self, a: u32x4) -> u32x4 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn add_u32x4(self, a: u32x4, b: u32x4) -> u32x4 { @@ -726,18 +697,15 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn and_u32x4(self, a: u32x4, b: u32x4) -> u32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_u32x4(self, a: u32x4, b: u32x4) -> u32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_u32x4(self, a: u32x4, b: u32x4) -> u32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn shr_u32x4(self, a: u32x4, shift: u32) -> u32x4 { @@ -800,23 +768,19 @@ impl Simd for WasmSimd128 { } #[inline(always)] fn not_mask32x4(self, a: mask32x4) -> mask32x4 { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() + v128_not(a.into()).simd_into(self) } #[inline(always)] fn and_mask32x4(self, a: mask32x4, b: mask32x4) -> mask32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_and(a.into(), b.into()).simd_into(self) } #[inline(always)] fn or_mask32x4(self, a: mask32x4, b: mask32x4) -> mask32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_or(a.into(), b.into()).simd_into(self) } #[inline(always)] fn xor_mask32x4(self, a: mask32x4, b: mask32x4) -> mask32x4 { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() + v128_xor(a.into(), b.into()).simd_into(self) } #[inline(always)] fn select_mask32x4( diff --git a/fearless_simd_gen/src/mk_wasm.rs b/fearless_simd_gen/src/mk_wasm.rs index e0282430..0c917883 100644 --- a/fearless_simd_gen/src/mk_wasm.rs +++ b/fearless_simd_gen/src/mk_wasm.rs @@ -58,17 +58,6 @@ fn mk_simd_impl(level: Level) -> TokenStream { } } } - OpSig::Unary if method == "not" => { - let args = [quote! { a.into() }]; - let expr = Wasm.expr(method, vec_ty, &args); - quote! { - #[inline(always)] - fn #method_ident(self, a: #ty) -> #ret_ty { - /// TODO: If v128 is used, we need to reinterpret it. - todo!() - } - } - } OpSig::Unary => { let args = [quote! { a.into() }]; let expr = if method == "fract" { @@ -94,17 +83,6 @@ fn mk_simd_impl(level: Level) -> TokenStream { } } } - OpSig::Binary if method == "xor" || method == "or" || method == "and" => { - let args = [quote! { a.into() }, quote! { b.into() }]; - let expr = Wasm.expr(method, vec_ty, &args); - quote! { - #[inline(always)] - fn #method_ident(self, a: #ty, b: #ty) -> #ret_ty { - /// TODO: If v128 is used we need to reinterpret it accurately... - todo!() - } - } - } OpSig::Binary => { let args = [quote! { a.into() }, quote! { b.into() }]; match method { diff --git a/fearless_simd_tests/tests/wasm.rs b/fearless_simd_tests/tests/wasm.rs index ad5225fc..dc7742e9 100644 --- a/fearless_simd_tests/tests/wasm.rs +++ b/fearless_simd_tests/tests/wasm.rs @@ -244,3 +244,42 @@ test_wasm_simd_parity! { } } } + +test_wasm_simd_parity! { + fn and_i8x16() { + |s| -> [i8; 16] { + let a = i8x16::from_slice(s, &[-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0]); + let b = i8x16::from_slice(s, &[85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85]); + (a & b).into() + } + } +} + +test_wasm_simd_parity! { + fn or_i8x16() { + |s| -> [i8; 16] { + let a = i8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, -7, -8]); + let b = i8x16::from_slice(s, &[1, 1, 1, 1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0]); + (a | b).into() + } + } +} + +test_wasm_simd_parity! { + fn xor_i8x16() { + |s| -> [i8; 16] { + let a = i8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1, -1, 0, 0, 0, 0]); + let b = i8x16::from_slice(s, &[-1, -1, 0, 0, 5, 4, 7, 6, -1, 0, -1, 0, -1, 0, -1, 0]); + (a ^ b).into() + } + } +} + +test_wasm_simd_parity! { + fn not_i8x16() { + |s| -> [i8; 16] { + let a = i8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, -1, -2, -3, -4, -5, -6, -7, -8]); + i8x16::not(a).into() + } + } +} From 75ed0d258f1e05c3c0ca08eb4b9a7de0fb42e299 Mon Sep 17 00:00:00 2001 From: nomaterials Date: Thu, 26 Jun 2025 15:21:43 +0200 Subject: [PATCH 2/2] review: implement additional tests covering `unsigned` & `mask` types --- fearless_simd_tests/tests/wasm.rs | 78 +++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/fearless_simd_tests/tests/wasm.rs b/fearless_simd_tests/tests/wasm.rs index dc7742e9..291ed779 100644 --- a/fearless_simd_tests/tests/wasm.rs +++ b/fearless_simd_tests/tests/wasm.rs @@ -283,3 +283,81 @@ test_wasm_simd_parity! { } } } + +test_wasm_simd_parity! { + fn and_u8x16() { + |s| -> [u8; 16] { + let a = u8x16::from_slice(s, &[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]); + let b = u8x16::from_slice(s, &[85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85]); + (a & b).into() + } + } +} + +test_wasm_simd_parity! { + fn or_u8x16() { + |s| -> [u8; 16] { + let a = u8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8]); + let b = u8x16::from_slice(s, &[1, 1, 1, 1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0]); + (a | b).into() + } + } +} + +test_wasm_simd_parity! { + fn xor_u8x16() { + |s| -> [u8; 16] { + let a = u8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, 1, 1, 1, 1, 0, 0, 0, 0]); + let b = u8x16::from_slice(s, &[1, 1, 0, 0, 5, 4, 7, 6, 1, 0, 1, 0, 1, 0, 1, 0]); + (a ^ b).into() + } + } +} + +test_wasm_simd_parity! { + fn not_u8x16() { + |s| -> [u8; 16] { + let a = u8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8]); + u8x16::not(a).into() + } + } +} + +test_wasm_simd_parity! { + fn and_mask8x16() { + |s| -> [i8; 16] { + let a = mask8x16::from_slice(s, &[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]); + let b = mask8x16::from_slice(s, &[85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85]); + (a & b).into() + } + } +} + +test_wasm_simd_parity! { + fn or_mask8x16() { + |s| -> [i8; 16] { + let a = mask8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8]); + let b = mask8x16::from_slice(s, &[1, 1, 1, 1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0]); + (a | b).into() + } + } +} + +test_wasm_simd_parity! { + fn xor_mask8x16() { + |s| -> [i8; 16] { + let a = mask8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, 1, 1, 1, 1, 0, 0, 0, 0]); + let b = mask8x16::from_slice(s, &[1, 1, 0, 0, 5, 4, 7, 6, 1, 0, 1, 0, 1, 0, 1, 0]); + (a ^ b).into() + } + } +} + +test_wasm_simd_parity! { + fn not_mask8x16() { + |s| -> [i8; 16] { + let a = mask8x16::from_slice(s, &[0, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8]); + mask8x16::not(a).into() + } + } +}