From 43dabd1aeabe02c3404c76a8c62f29b55f1d1c7c Mon Sep 17 00:00:00 2001 From: Caleb Zulawski Date: Thu, 1 Oct 2020 22:50:15 -0400 Subject: [PATCH] Implement `core::ops` (#10) * Add vector-vector arithmetic ops * Add operators and integer conversions for masks * Add unary traits * Implement Index and IndexMut * Implement by-ref ops for masks * Document intrinsics * Implement format traits for masks * Add floating point ops tests * Add integer tests * Add mask tests --- crates/core_simd/src/intrinsics.rs | 39 ++ crates/core_simd/src/lib.rs | 4 +- crates/core_simd/src/masks.rs | 51 ++ crates/core_simd/src/ops.rs | 628 ++++++++++++++++++ crates/core_simd/tests/helpers/biteq.rs | 127 ++++ crates/core_simd/tests/helpers/lanewise.rs | 49 ++ crates/core_simd/tests/helpers/mod.rs | 4 + crates/core_simd/tests/ops.rs | 1 + crates/core_simd/tests/ops_impl/f32.rs | 6 + crates/core_simd/tests/ops_impl/f64.rs | 5 + .../core_simd/tests/ops_impl/float_macros.rs | 237 +++++++ crates/core_simd/tests/ops_impl/i128.rs | 4 + crates/core_simd/tests/ops_impl/i16.rs | 6 + crates/core_simd/tests/ops_impl/i32.rs | 6 + crates/core_simd/tests/ops_impl/i64.rs | 5 + crates/core_simd/tests/ops_impl/i8.rs | 6 + crates/core_simd/tests/ops_impl/int_macros.rs | 388 +++++++++++ crates/core_simd/tests/ops_impl/isize.rs | 5 + crates/core_simd/tests/ops_impl/mask128.rs | 4 + crates/core_simd/tests/ops_impl/mask16.rs | 6 + crates/core_simd/tests/ops_impl/mask32.rs | 6 + crates/core_simd/tests/ops_impl/mask64.rs | 5 + crates/core_simd/tests/ops_impl/mask8.rs | 6 + .../core_simd/tests/ops_impl/mask_macros.rs | 179 +++++ crates/core_simd/tests/ops_impl/masksize.rs | 5 + crates/core_simd/tests/ops_impl/mod.rs | 39 ++ crates/core_simd/tests/ops_impl/u128.rs | 4 + crates/core_simd/tests/ops_impl/u16.rs | 6 + crates/core_simd/tests/ops_impl/u32.rs | 6 + crates/core_simd/tests/ops_impl/u64.rs | 5 + crates/core_simd/tests/ops_impl/u8.rs | 6 + .../core_simd/tests/ops_impl/uint_macros.rs | 381 +++++++++++ crates/core_simd/tests/ops_impl/usize.rs | 5 + 33 files changed, 2233 insertions(+), 1 deletion(-) create mode 100644 crates/core_simd/src/intrinsics.rs create mode 100644 crates/core_simd/src/ops.rs create mode 100644 crates/core_simd/tests/helpers/biteq.rs create mode 100644 crates/core_simd/tests/helpers/lanewise.rs create mode 100644 crates/core_simd/tests/helpers/mod.rs create mode 100644 crates/core_simd/tests/ops.rs create mode 100644 crates/core_simd/tests/ops_impl/f32.rs create mode 100644 crates/core_simd/tests/ops_impl/f64.rs create mode 100644 crates/core_simd/tests/ops_impl/float_macros.rs create mode 100644 crates/core_simd/tests/ops_impl/i128.rs create mode 100644 crates/core_simd/tests/ops_impl/i16.rs create mode 100644 crates/core_simd/tests/ops_impl/i32.rs create mode 100644 crates/core_simd/tests/ops_impl/i64.rs create mode 100644 crates/core_simd/tests/ops_impl/i8.rs create mode 100644 crates/core_simd/tests/ops_impl/int_macros.rs create mode 100644 crates/core_simd/tests/ops_impl/isize.rs create mode 100644 crates/core_simd/tests/ops_impl/mask128.rs create mode 100644 crates/core_simd/tests/ops_impl/mask16.rs create mode 100644 crates/core_simd/tests/ops_impl/mask32.rs create mode 100644 crates/core_simd/tests/ops_impl/mask64.rs create mode 100644 crates/core_simd/tests/ops_impl/mask8.rs create mode 100644 crates/core_simd/tests/ops_impl/mask_macros.rs create mode 100644 crates/core_simd/tests/ops_impl/masksize.rs create mode 100644 crates/core_simd/tests/ops_impl/mod.rs create mode 100644 crates/core_simd/tests/ops_impl/u128.rs create mode 100644 crates/core_simd/tests/ops_impl/u16.rs create mode 100644 crates/core_simd/tests/ops_impl/u32.rs create mode 100644 crates/core_simd/tests/ops_impl/u64.rs create mode 100644 crates/core_simd/tests/ops_impl/u8.rs create mode 100644 crates/core_simd/tests/ops_impl/uint_macros.rs create mode 100644 crates/core_simd/tests/ops_impl/usize.rs diff --git a/crates/core_simd/src/intrinsics.rs b/crates/core_simd/src/intrinsics.rs new file mode 100644 index 0000000000000..c2cef778560fc --- /dev/null +++ b/crates/core_simd/src/intrinsics.rs @@ -0,0 +1,39 @@ +//! This module contains the LLVM intrinsics bindings that provide the functionality for this +//! crate. +//! +//! The LLVM assembly language is documented here: https://llvm.org/docs/LangRef.html + +/// These intrinsics aren't linked directly from LLVM and are mostly undocumented, however they are +/// simply lowered to the matching LLVM instructions by the compiler. The associated instruction +/// is documented alongside each intrinsic. +extern "platform-intrinsic" { + /// add/fadd + pub(crate) fn simd_add(x: T, y: T) -> T; + + /// sub/fsub + pub(crate) fn simd_sub(x: T, y: T) -> T; + + /// mul/fmul + pub(crate) fn simd_mul(x: T, y: T) -> T; + + /// udiv/sdiv/fdiv + pub(crate) fn simd_div(x: T, y: T) -> T; + + /// urem/srem/frem + pub(crate) fn simd_rem(x: T, y: T) -> T; + + /// shl + pub(crate) fn simd_shl(x: T, y: T) -> T; + + /// lshr/ashr + pub(crate) fn simd_shr(x: T, y: T) -> T; + + /// and + pub(crate) fn simd_and(x: T, y: T) -> T; + + /// or + pub(crate) fn simd_or(x: T, y: T) -> T; + + /// xor + pub(crate) fn simd_xor(x: T, y: T) -> T; +} diff --git a/crates/core_simd/src/lib.rs b/crates/core_simd/src/lib.rs index d88f5b1eac45d..d08ef400f9bbb 100644 --- a/crates/core_simd/src/lib.rs +++ b/crates/core_simd/src/lib.rs @@ -1,5 +1,5 @@ #![no_std] -#![feature(repr_simd)] +#![feature(repr_simd, platform_intrinsics)] #![warn(missing_docs)] //! Portable SIMD module. @@ -7,6 +7,8 @@ mod macros; mod fmt; +mod intrinsics; +mod ops; mod masks; pub use masks::*; diff --git a/crates/core_simd/src/masks.rs b/crates/core_simd/src/masks.rs index 1fc281a310d38..cba76b6a2a35d 100644 --- a/crates/core_simd/src/masks.rs +++ b/crates/core_simd/src/masks.rs @@ -1,3 +1,13 @@ +/// The error type returned when converting an integer to a mask fails. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct TryFromMaskError(()); + +impl core::fmt::Display for TryFromMaskError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "mask must have all bits set or unset") + } +} + macro_rules! define_mask { { $(#[$attr:meta])* struct $name:ident($type:ty); } => { $(#[$attr])* @@ -34,11 +44,52 @@ macro_rules! define_mask { } } + impl core::convert::TryFrom<$type> for $name { + type Error = TryFromMaskError; + fn try_from(value: $type) -> Result { + if value == 0 || !value == 0 { + Ok(Self(value)) + } else { + Err(TryFromMaskError(())) + } + } + } + + impl core::convert::From<$name> for $type { + fn from(value: $name) -> Self { + value.0 + } + } + impl core::fmt::Debug for $name { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { self.test().fmt(f) } } + + impl core::fmt::Binary for $name { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + <$type as core::fmt::Binary>::fmt(&self.0, f) + } + } + + impl core::fmt::Octal for $name { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + <$type as core::fmt::Octal>::fmt(&self.0, f) + } + } + + impl core::fmt::LowerHex for $name { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + <$type as core::fmt::LowerHex>::fmt(&self.0, f) + } + } + + impl core::fmt::UpperHex for $name { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + <$type as core::fmt::UpperHex>::fmt(&self.0, f) + } + } } } diff --git a/crates/core_simd/src/ops.rs b/crates/core_simd/src/ops.rs new file mode 100644 index 0000000000000..5af10a4e1886a --- /dev/null +++ b/crates/core_simd/src/ops.rs @@ -0,0 +1,628 @@ +/// Checks if the right-hand side argument of a left- or right-shift would cause overflow. +fn invalid_shift_rhs(rhs: T) -> bool +where + T: Default + PartialOrd + core::convert::TryFrom, + >::Error: core::fmt::Debug, +{ + let bits_in_type = T::try_from(8 * core::mem::size_of::()).unwrap(); + rhs < T::default() || rhs >= bits_in_type +} + +/// Automatically implements operators over references in addition to the provided operator. +macro_rules! impl_ref_ops { + // binary op + { + impl core::ops::$trait:ident<$rhs:ty> for $type:ty { + type Output = $output:ty; + + $(#[$attrs:meta])* + fn $fn:ident($self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) -> Self::Output $body:tt + } + } => { + impl core::ops::$trait<$rhs> for $type { + type Output = $output; + + $(#[$attrs])* + fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body + } + + impl core::ops::$trait<&'_ $rhs> for $type { + type Output = <$type as core::ops::$trait<$rhs>>::Output; + + $(#[$attrs])* + fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output { + core::ops::$trait::$fn($self_tok, *$rhs_arg) + } + } + + impl core::ops::$trait<$rhs> for &'_ $type { + type Output = <$type as core::ops::$trait<$rhs>>::Output; + + $(#[$attrs])* + fn $fn($self_tok, $rhs_arg: $rhs) -> Self::Output { + core::ops::$trait::$fn(*$self_tok, $rhs_arg) + } + } + + impl core::ops::$trait<&'_ $rhs> for &'_ $type { + type Output = <$type as core::ops::$trait<$rhs>>::Output; + + $(#[$attrs])* + fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output { + core::ops::$trait::$fn(*$self_tok, *$rhs_arg) + } + } + }; + + // binary assignment op + { + impl core::ops::$trait:ident<$rhs:ty> for $type:ty { + $(#[$attrs:meta])* + fn $fn:ident(&mut $self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) $body:tt + } + } => { + impl core::ops::$trait<$rhs> for $type { + $(#[$attrs])* + fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body + } + + impl core::ops::$trait<&'_ $rhs> for $type { + $(#[$attrs])* + fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) { + core::ops::$trait::$fn($self_tok, *$rhs_arg) + } + } + }; + + // unary op + { + impl core::ops::$trait:ident for $type:ty { + type Output = $output:ty; + fn $fn:ident($self_tok:ident) -> Self::Output $body:tt + } + } => { + impl core::ops::$trait for $type { + type Output = $output; + fn $fn($self_tok) -> Self::Output $body + } + + impl core::ops::$trait for &'_ $type { + type Output = <$type as core::ops::$trait>::Output; + fn $fn($self_tok) -> Self::Output { + core::ops::$trait::$fn(*$self_tok) + } + } + } +} + +/// Implements op traits for masks +macro_rules! impl_mask_ops { + { $($mask:ty),* } => { + $( + impl_ref_ops! { + impl core::ops::BitAnd<$mask> for $mask { + type Output = Self; + fn bitand(self, rhs: Self) -> Self::Output { + Self(self.0 & rhs.0) + } + } + } + + impl_ref_ops! { + impl core::ops::BitAndAssign<$mask> for $mask { + fn bitand_assign(&mut self, rhs: Self) { + *self = *self & rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::BitOr<$mask> for $mask { + type Output = Self; + fn bitor(self, rhs: Self) -> Self::Output { + Self(self.0 | rhs.0) + } + } + } + + impl_ref_ops! { + impl core::ops::BitOrAssign<$mask> for $mask { + fn bitor_assign(&mut self, rhs: Self) { + *self = *self | rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::BitXor<$mask> for $mask { + type Output = Self; + fn bitxor(self, rhs: Self) -> Self::Output { + Self(self.0 ^ rhs.0) + } + } + } + + impl_ref_ops! { + impl core::ops::BitXorAssign<$mask> for $mask { + fn bitxor_assign(&mut self, rhs: Self) { + *self = *self ^ rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::Not for $mask { + type Output = Self; + fn not(self) -> Self::Output { + Self(!self.0) + } + } + } + )* + } +} +impl_mask_ops! { crate::mask8, crate::mask16, crate::mask32, crate::mask64, crate::mask128, crate::masksize } + +/// Automatically implements operators over vectors and scalars for a particular vector. +macro_rules! impl_op { + { impl Add for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, Add::add, AddAssign::add_assign, simd_add } + }; + { impl Sub for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, Sub::sub, SubAssign::sub_assign, simd_sub } + }; + { impl Mul for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, Mul::mul, MulAssign::mul_assign, simd_mul } + }; + { impl Div for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, Div::div, DivAssign::div_assign, simd_div } + }; + { impl Rem for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, Rem::rem, RemAssign::rem_assign, simd_rem } + }; + { impl Shl for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, Shl::shl, ShlAssign::shl_assign, simd_shl } + }; + { impl Shr for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, Shr::shr, ShrAssign::shr_assign, simd_shr } + }; + { impl BitAnd for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, BitAnd::bitand, BitAndAssign::bitand_assign, simd_and } + }; + { impl BitOr for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, BitOr::bitor, BitOrAssign::bitor_assign, simd_or } + }; + { impl BitXor for $type:ty, $scalar:ty } => { + impl_op! { @binary $type, $scalar, BitXor::bitxor, BitXorAssign::bitxor_assign, simd_xor } + }; + + { impl Not for $type:ty, $scalar:ty } => { + impl_ref_ops! { + impl core::ops::Not for $type { + type Output = Self; + fn not(self) -> Self::Output { + self ^ <$type>::splat(!<$scalar>::default()) + } + } + } + }; + + { impl Neg for $type:ty, $scalar:ty } => { + impl_ref_ops! { + impl core::ops::Neg for $type { + type Output = Self; + fn neg(self) -> Self::Output { + <$type>::splat(-<$scalar>::default()) - self + } + } + } + }; + + { impl Index for $type:ty, $scalar:ty } => { + impl core::ops::Index for $type + where + I: core::slice::SliceIndex<[$scalar]>, + { + type Output = I::Output; + fn index(&self, index: I) -> &Self::Output { + let slice: &[_] = self.as_ref(); + &slice[index] + } + } + + impl core::ops::IndexMut for $type + where + I: core::slice::SliceIndex<[$scalar]>, + { + fn index_mut(&mut self, index: I) -> &mut Self::Output { + let slice: &mut [_] = self.as_mut(); + &mut slice[index] + } + } + }; + + // generic binary op with assignment when output is `Self` + { @binary $type:ty, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => { + impl_ref_ops! { + impl core::ops::$trait<$type> for $type { + type Output = $type; + + #[inline] + fn $trait_fn(self, rhs: $type) -> Self::Output { + unsafe { + crate::intrinsics::$intrinsic(self, rhs) + } + } + } + } + + impl_ref_ops! { + impl core::ops::$trait<$scalar> for $type { + type Output = $type; + + #[inline] + fn $trait_fn(self, rhs: $scalar) -> Self::Output { + core::ops::$trait::$trait_fn(self, <$type>::splat(rhs)) + } + } + } + + impl_ref_ops! { + impl core::ops::$trait<$type> for $scalar { + type Output = $type; + + #[inline] + fn $trait_fn(self, rhs: $type) -> Self::Output { + core::ops::$trait::$trait_fn(<$type>::splat(self), rhs) + } + } + } + + impl_ref_ops! { + impl core::ops::$assign_trait<$type> for $type { + #[inline] + fn $assign_trait_fn(&mut self, rhs: $type) { + unsafe { + *self = crate::intrinsics::$intrinsic(*self, rhs); + } + } + } + } + + impl_ref_ops! { + impl core::ops::$assign_trait<$scalar> for $type { + #[inline] + fn $assign_trait_fn(&mut self, rhs: $scalar) { + core::ops::$assign_trait::$assign_trait_fn(self, <$type>::splat(rhs)); + } + } + } + }; +} + +/// Implements floating-point operators for the provided types. +macro_rules! impl_float_ops { + { $($scalar:ty => $($vector:ty),*;)* } => { + $( // scalar + $( // vector + impl_op! { impl Add for $vector, $scalar } + impl_op! { impl Sub for $vector, $scalar } + impl_op! { impl Mul for $vector, $scalar } + impl_op! { impl Div for $vector, $scalar } + impl_op! { impl Rem for $vector, $scalar } + impl_op! { impl Neg for $vector, $scalar } + impl_op! { impl Index for $vector, $scalar } + )* + )* + }; +} + +/// Implements mask operators for the provided types. +macro_rules! impl_mask_ops { + { $($scalar:ty => $($vector:ty),*;)* } => { + $( // scalar + $( // vector + impl_op! { impl BitAnd for $vector, $scalar } + impl_op! { impl BitOr for $vector, $scalar } + impl_op! { impl BitXor for $vector, $scalar } + impl_op! { impl Not for $vector, $scalar } + impl_op! { impl Index for $vector, $scalar } + )* + )* + }; +} + +/// Implements unsigned integer operators for the provided types. +macro_rules! impl_unsigned_int_ops { + { $($scalar:ty => $($vector:ty),*;)* } => { + $( // scalar + $( // vector + impl_op! { impl Add for $vector, $scalar } + impl_op! { impl Sub for $vector, $scalar } + impl_op! { impl Mul for $vector, $scalar } + impl_op! { impl BitAnd for $vector, $scalar } + impl_op! { impl BitOr for $vector, $scalar } + impl_op! { impl BitXor for $vector, $scalar } + impl_op! { impl Not for $vector, $scalar } + impl_op! { impl Index for $vector, $scalar } + + // Integers panic on divide by 0 + impl_ref_ops! { + impl core::ops::Div<$vector> for $vector { + type Output = Self; + + #[inline] + fn div(self, rhs: $vector) -> Self::Output { + // TODO there is probably a better way of doing this + if AsRef::<[$scalar]>::as_ref(&rhs) + .iter() + .any(|x| *x == 0) + { + panic!("attempt to divide by zero"); + } + unsafe { crate::intrinsics::simd_div(self, rhs) } + } + } + } + + impl_ref_ops! { + impl core::ops::Div<$scalar> for $vector { + type Output = $vector; + + #[inline] + fn div(self, rhs: $scalar) -> Self::Output { + if rhs == 0 { + panic!("attempt to divide by zero"); + } + let rhs = Self::splat(rhs); + unsafe { crate::intrinsics::simd_div(self, rhs) } + } + } + } + + impl_ref_ops! { + impl core::ops::Div<$vector> for $scalar { + type Output = $vector; + + #[inline] + fn div(self, rhs: $vector) -> Self::Output { + <$vector>::splat(self) / rhs + } + } + } + + impl_ref_ops! { + impl core::ops::DivAssign<$vector> for $vector { + #[inline] + fn div_assign(&mut self, rhs: Self) { + *self = *self / rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::DivAssign<$scalar> for $vector { + #[inline] + fn div_assign(&mut self, rhs: $scalar) { + *self = *self / rhs; + } + } + } + + // remainder panics on zero divisor + impl_ref_ops! { + impl core::ops::Rem<$vector> for $vector { + type Output = Self; + + #[inline] + fn rem(self, rhs: $vector) -> Self::Output { + // TODO there is probably a better way of doing this + if AsRef::<[$scalar]>::as_ref(&rhs) + .iter() + .any(|x| *x == 0) + { + panic!("attempt to calculate the remainder with a divisor of zero"); + } + unsafe { crate::intrinsics::simd_rem(self, rhs) } + } + } + } + + impl_ref_ops! { + impl core::ops::Rem<$scalar> for $vector { + type Output = $vector; + + #[inline] + fn rem(self, rhs: $scalar) -> Self::Output { + if rhs == 0 { + panic!("attempt to calculate the remainder with a divisor of zero"); + } + let rhs = Self::splat(rhs); + unsafe { crate::intrinsics::simd_rem(self, rhs) } + } + } + } + + impl_ref_ops! { + impl core::ops::Rem<$vector> for $scalar { + type Output = $vector; + + #[inline] + fn rem(self, rhs: $vector) -> Self::Output { + <$vector>::splat(self) % rhs + } + } + } + + impl_ref_ops! { + impl core::ops::RemAssign<$vector> for $vector { + #[inline] + fn rem_assign(&mut self, rhs: Self) { + *self = *self % rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::RemAssign<$scalar> for $vector { + #[inline] + fn rem_assign(&mut self, rhs: $scalar) { + *self = *self % rhs; + } + } + } + + // shifts panic on overflow + impl_ref_ops! { + impl core::ops::Shl<$vector> for $vector { + type Output = Self; + + #[inline] + fn shl(self, rhs: $vector) -> Self::Output { + // TODO there is probably a better way of doing this + if AsRef::<[$scalar]>::as_ref(&rhs) + .iter() + .copied() + .any(invalid_shift_rhs) + { + panic!("attempt to shift left with overflow"); + } + unsafe { crate::intrinsics::simd_shl(self, rhs) } + } + } + } + + impl_ref_ops! { + impl core::ops::Shl<$scalar> for $vector { + type Output = $vector; + + #[inline] + fn shl(self, rhs: $scalar) -> Self::Output { + if invalid_shift_rhs(rhs) { + panic!("attempt to shift left with overflow"); + } + let rhs = Self::splat(rhs); + unsafe { crate::intrinsics::simd_shl(self, rhs) } + } + } + } + + + impl_ref_ops! { + impl core::ops::ShlAssign<$vector> for $vector { + #[inline] + fn shl_assign(&mut self, rhs: Self) { + *self = *self << rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::ShlAssign<$scalar> for $vector { + #[inline] + fn shl_assign(&mut self, rhs: $scalar) { + *self = *self << rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::Shr<$vector> for $vector { + type Output = Self; + + #[inline] + fn shr(self, rhs: $vector) -> Self::Output { + // TODO there is probably a better way of doing this + if AsRef::<[$scalar]>::as_ref(&rhs) + .iter() + .copied() + .any(invalid_shift_rhs) + { + panic!("attempt to shift with overflow"); + } + unsafe { crate::intrinsics::simd_shr(self, rhs) } + } + } + } + + impl_ref_ops! { + impl core::ops::Shr<$scalar> for $vector { + type Output = $vector; + + #[inline] + fn shr(self, rhs: $scalar) -> Self::Output { + if invalid_shift_rhs(rhs) { + panic!("attempt to shift with overflow"); + } + let rhs = Self::splat(rhs); + unsafe { crate::intrinsics::simd_shr(self, rhs) } + } + } + } + + + impl_ref_ops! { + impl core::ops::ShrAssign<$vector> for $vector { + #[inline] + fn shr_assign(&mut self, rhs: Self) { + *self = *self >> rhs; + } + } + } + + impl_ref_ops! { + impl core::ops::ShrAssign<$scalar> for $vector { + #[inline] + fn shr_assign(&mut self, rhs: $scalar) { + *self = *self >> rhs; + } + } + } + )* + )* + }; +} + +/// Implements unsigned integer operators for the provided types. +macro_rules! impl_signed_int_ops { + { $($scalar:ty => $($vector:ty),*;)* } => { + impl_unsigned_int_ops! { $($scalar => $($vector),*;)* } + $( // scalar + $( // vector + impl_op! { impl Neg for $vector, $scalar } + )* + )* + }; +} + +impl_unsigned_int_ops! { + u8 => crate::u8x8, crate::u8x16, crate::u8x32, crate::u8x64; + u16 => crate::u16x4, crate::u16x8, crate::u16x16, crate::u16x32; + u32 => crate::u32x2, crate::u32x4, crate::u32x8, crate::u32x16; + u64 => crate::u64x2, crate::u64x4, crate::u64x8; + u128 => crate::u128x2, crate::u128x4; + usize => crate::usizex2, crate::usizex4, crate::usizex8; +} + +impl_signed_int_ops! { + i8 => crate::i8x8, crate::i8x16, crate::i8x32, crate::i8x64; + i16 => crate::i16x4, crate::i16x8, crate::i16x16, crate::i16x32; + i32 => crate::i32x2, crate::i32x4, crate::i32x8, crate::i32x16; + i64 => crate::i64x2, crate::i64x4, crate::i64x8; + i128 => crate::i128x2, crate::i128x4; + isize => crate::isizex2, crate::isizex4, crate::isizex8; +} + +impl_float_ops! { + f32 => crate::f32x2, crate::f32x4, crate::f32x8, crate::f32x16; + f64 => crate::f64x2, crate::f64x4, crate::f64x8; +} + +impl_mask_ops! { + crate::mask8 => crate::mask8x8, crate::mask8x16, crate::mask8x32, crate::mask8x64; + crate::mask16 => crate::mask16x4, crate::mask16x8, crate::mask16x16, crate::mask16x32; + crate::mask32 => crate::mask32x2, crate::mask32x4, crate::mask32x8, crate::mask32x16; + crate::mask64 => crate::mask64x2, crate::mask64x4, crate::mask64x8; + crate::mask128 => crate::mask128x2, crate::mask128x4; + crate::masksize => crate::masksizex2, crate::masksizex4, crate::masksizex8; +} diff --git a/crates/core_simd/tests/helpers/biteq.rs b/crates/core_simd/tests/helpers/biteq.rs new file mode 100644 index 0000000000000..f932eba907c34 --- /dev/null +++ b/crates/core_simd/tests/helpers/biteq.rs @@ -0,0 +1,127 @@ +pub(crate) trait BitEq { + fn biteq(&self, other: &Self) -> bool; + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result; +} + +macro_rules! impl_biteq { + { integer impl BitEq for $($type:ty,)* } => { + $( + impl BitEq for $type { + fn biteq(&self, other: &Self) -> bool { + self == other + } + + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{:?} ({:x})", self, self) + } + } + )* + }; + { float impl BitEq for $($type:ty,)* } => { + $( + impl BitEq for $type { + fn biteq(&self, other: &Self) -> bool { + self.to_bits() == other.to_bits() + } + + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{:?} ({:x})", self, self.to_bits()) + } + } + )* + }; + { vector impl BitEq for $($type:ty,)* } => { + $( + impl BitEq for $type { + fn biteq(&self, other: &Self) -> bool { + let a: &[_] = self.as_ref(); + let b: &[_] = other.as_ref(); + if a.len() == b.len() { + a.iter().zip(b.iter()).fold(true, |value, (left, right)| { + value && left.biteq(right) + }) + } else { + false + } + } + + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + #[repr(transparent)] + struct Wrapper<'a, T: BitEq>(&'a T); + + impl core::fmt::Debug for Wrapper<'_, T> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + self.0.fmt(f) + } + } + + let slice: &[_] = self.as_ref(); + + f.debug_list() + .entries(slice.iter().map(|x| Wrapper(x))) + .finish() + } + } + )* + }; +} + +impl_biteq! { + integer impl BitEq for + u8, u16, u32, u64, u128, usize, + i8, i16, i32, i64, i128, isize, + core_simd::mask8, core_simd::mask16, core_simd::mask32, core_simd::mask64, core_simd::mask128, core_simd::masksize, +} + +impl_biteq! { + float impl BitEq for f32, f64, +} + +impl_biteq! { + vector impl BitEq for + core_simd::u8x8, core_simd::u8x16, core_simd::u8x32, core_simd::u8x64, + core_simd::i8x8, core_simd::i8x16, core_simd::i8x32, core_simd::i8x64, + core_simd::u16x4, core_simd::u16x8, core_simd::u16x16, core_simd::u16x32, + core_simd::i16x4, core_simd::i16x8, core_simd::i16x16, core_simd::i16x32, + core_simd::u32x2, core_simd::u32x4, core_simd::u32x8, core_simd::u32x16, + core_simd::i32x2, core_simd::i32x4, core_simd::i32x8, core_simd::i32x16, + core_simd::u64x2, core_simd::u64x4, core_simd::u64x8, + core_simd::i64x2, core_simd::i64x4, core_simd::i64x8, + core_simd::u128x2, core_simd::u128x4, + core_simd::i128x2, core_simd::i128x4, + core_simd::usizex2, core_simd::usizex4, core_simd::usizex8, + core_simd::isizex2, core_simd::isizex4, core_simd::isizex8, + core_simd::f32x2, core_simd::f32x4, core_simd::f32x8, core_simd::f32x16, + core_simd::f64x2, core_simd::f64x4, core_simd::f64x8, + core_simd::mask8x8, core_simd::mask8x16, core_simd::mask8x32, core_simd::mask8x64, + core_simd::mask16x4, core_simd::mask16x8, core_simd::mask16x16, core_simd::mask16x32, + core_simd::mask32x2, core_simd::mask32x4, core_simd::mask32x8, core_simd::mask32x16, + core_simd::mask64x2, core_simd::mask64x4, core_simd::mask64x8, + core_simd::mask128x2, core_simd::mask128x4, + core_simd::masksizex2, core_simd::masksizex4, core_simd::masksizex8, +} + +pub(crate) struct BitEqWrapper<'a, T>(pub(crate) &'a T); + +impl PartialEq for BitEqWrapper<'_, T> { + fn eq(&self, other: &Self) -> bool { + self.0.biteq(other.0) + } +} + +impl core::fmt::Debug for BitEqWrapper<'_, T> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + self.0.fmt(f) + } +} + +macro_rules! assert_biteq { + { $a:expr, $b:expr } => { + { + use helpers::biteq::BitEqWrapper; + let a = $a; + let b = $b; + assert_eq!(BitEqWrapper(&a), BitEqWrapper(&b)); + } + } +} diff --git a/crates/core_simd/tests/helpers/lanewise.rs b/crates/core_simd/tests/helpers/lanewise.rs new file mode 100644 index 0000000000000..6ab7803a96780 --- /dev/null +++ b/crates/core_simd/tests/helpers/lanewise.rs @@ -0,0 +1,49 @@ +pub fn apply_unary_lanewise + Default>(mut x: V, f: impl Fn(T) -> T) -> V { + for lane in x.as_mut() { + *lane = f(*lane) + } + x +} + +pub fn apply_binary_lanewise + AsMut<[T]> + Default>( + a: V, + b: V, + f: impl Fn(T, T) -> T, +) -> V { + let mut out = V::default(); + let out_slice = out.as_mut(); + let a_slice = a.as_ref(); + let b_slice = b.as_ref(); + for (o, (a, b)) in out_slice.iter_mut().zip(a_slice.iter().zip(b_slice.iter())) { + *o = f(*a, *b); + } + out +} + +pub fn apply_binary_scalar_rhs_lanewise + AsMut<[T]> + Default>( + a: V, + b: T, + f: impl Fn(T, T) -> T, +) -> V { + let mut out = V::default(); + let out_slice = out.as_mut(); + let a_slice = a.as_ref(); + for (o, a) in out_slice.iter_mut().zip(a_slice.iter()) { + *o = f(*a, b); + } + out +} + +pub fn apply_binary_scalar_lhs_lanewise + AsMut<[T]> + Default>( + a: T, + b: V, + f: impl Fn(T, T) -> T, +) -> V { + let mut out = V::default(); + let out_slice = out.as_mut(); + let b_slice = b.as_ref(); + for (o, b) in out_slice.iter_mut().zip(b_slice.iter()) { + *o = f(a, *b); + } + out +} diff --git a/crates/core_simd/tests/helpers/mod.rs b/crates/core_simd/tests/helpers/mod.rs new file mode 100644 index 0000000000000..b128f8251ca32 --- /dev/null +++ b/crates/core_simd/tests/helpers/mod.rs @@ -0,0 +1,4 @@ +#[macro_use] +pub mod biteq; + +pub mod lanewise; diff --git a/crates/core_simd/tests/ops.rs b/crates/core_simd/tests/ops.rs new file mode 100644 index 0000000000000..60aff06a76a65 --- /dev/null +++ b/crates/core_simd/tests/ops.rs @@ -0,0 +1 @@ +mod ops_impl; diff --git a/crates/core_simd/tests/ops_impl/f32.rs b/crates/core_simd/tests/ops_impl/f32.rs new file mode 100644 index 0000000000000..f87909b68cd39 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/f32.rs @@ -0,0 +1,6 @@ +use super::helpers; + +float_tests! { f32x2, f32 } +float_tests! { f32x4, f32 } +float_tests! { f32x8, f32 } +float_tests! { f32x16, f32 } diff --git a/crates/core_simd/tests/ops_impl/f64.rs b/crates/core_simd/tests/ops_impl/f64.rs new file mode 100644 index 0000000000000..19ae476bd0e1c --- /dev/null +++ b/crates/core_simd/tests/ops_impl/f64.rs @@ -0,0 +1,5 @@ +use super::helpers; + +float_tests! { f64x2, f64 } +float_tests! { f64x4, f64 } +float_tests! { f64x8, f64 } diff --git a/crates/core_simd/tests/ops_impl/float_macros.rs b/crates/core_simd/tests/ops_impl/float_macros.rs new file mode 100644 index 0000000000000..ddf3bbbe9369e --- /dev/null +++ b/crates/core_simd/tests/ops_impl/float_macros.rs @@ -0,0 +1,237 @@ +macro_rules! float_tests { + { $vector:ident, $scalar:ident } => { + #[cfg(test)] + mod $vector { + use super::*; + use helpers::lanewise::*; + + // TODO impl this as an associated fn on vectors + fn from_slice(slice: &[$scalar]) -> core_simd::$vector { + let mut value = core_simd::$vector::default(); + let value_slice: &mut [_] = value.as_mut(); + value_slice.copy_from_slice(&slice[0..value_slice.len()]); + value + } + + const A: [$scalar; 16] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]; + const B: [$scalar; 16] = [16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31.]; + + #[test] + fn add() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Add::add); + a += b; + assert_biteq!(a, expected); + } + + #[test] + fn add_scalar_rhs() { + let a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_scalar_lhs() { + let a = 5.; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_assign_scalar() { + let mut a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add); + a += b; + assert_biteq!(a, expected); + } + + #[test] + fn sub() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub); + a -= b; + assert_biteq!(a, expected); + } + + #[test] + fn sub_scalar_rhs() { + let a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_scalar_lhs() { + let a = 5.; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_assign_scalar() { + let mut a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub); + a -= b; + assert_biteq!(a, expected); + } + + #[test] + fn mul() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul); + a *= b; + assert_biteq!(a, expected); + } + + #[test] + fn mul_scalar_rhs() { + let a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_scalar_lhs() { + let a = 5.; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_assign_scalar() { + let mut a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul); + a *= b; + assert_biteq!(a, expected); + } + + #[test] + fn div() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Div::div); + a /= b; + assert_biteq!(a, expected); + } + + #[test] + fn div_scalar_rhs() { + let a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_scalar_lhs() { + let a = 5.; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_assign_scalar() { + let mut a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div); + a /= b; + assert_biteq!(a, expected); + } + + #[test] + fn rem() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem); + a %= b; + assert_biteq!(a, expected); + } + + #[test] + fn rem_scalar_rhs() { + let a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_scalar_lhs() { + let a = 5.; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_assign_scalar() { + let mut a = from_slice(&A); + let b = 5.; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem); + a %= b; + assert_biteq!(a, expected); + } + + #[test] + fn neg() { + let v = from_slice(&A); + let expected = apply_unary_lanewise(v, core::ops::Neg::neg); + assert_biteq!(-v, expected); + } + } + } +} diff --git a/crates/core_simd/tests/ops_impl/i128.rs b/crates/core_simd/tests/ops_impl/i128.rs new file mode 100644 index 0000000000000..8a0a279b8dce2 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/i128.rs @@ -0,0 +1,4 @@ +use super::helpers; + +int_tests! { i128x2, i128 } +int_tests! { i128x4, i128 } diff --git a/crates/core_simd/tests/ops_impl/i16.rs b/crates/core_simd/tests/ops_impl/i16.rs new file mode 100644 index 0000000000000..445436b77a893 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/i16.rs @@ -0,0 +1,6 @@ +use super::helpers; + +int_tests! { i16x4, i16 } +int_tests! { i16x8, i16 } +int_tests! { i16x16, i16 } +int_tests! { i16x32, i16 } diff --git a/crates/core_simd/tests/ops_impl/i32.rs b/crates/core_simd/tests/ops_impl/i32.rs new file mode 100644 index 0000000000000..f13ab833a3359 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/i32.rs @@ -0,0 +1,6 @@ +use super::helpers; + +int_tests! { i32x2, i32 } +int_tests! { i32x4, i32 } +int_tests! { i32x8, i32 } +int_tests! { i32x16, i32 } diff --git a/crates/core_simd/tests/ops_impl/i64.rs b/crates/core_simd/tests/ops_impl/i64.rs new file mode 100644 index 0000000000000..08479c4b994b3 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/i64.rs @@ -0,0 +1,5 @@ +use super::helpers; + +int_tests! { i64x2, i64 } +int_tests! { i64x4, i64 } +int_tests! { i64x8, i64 } diff --git a/crates/core_simd/tests/ops_impl/i8.rs b/crates/core_simd/tests/ops_impl/i8.rs new file mode 100644 index 0000000000000..2a7db7906ac1e --- /dev/null +++ b/crates/core_simd/tests/ops_impl/i8.rs @@ -0,0 +1,6 @@ +use super::helpers; + +int_tests! { i8x8, i8 } +int_tests! { i8x16, i8 } +int_tests! { i8x32, i8 } +int_tests! { i8x64, i8 } diff --git a/crates/core_simd/tests/ops_impl/int_macros.rs b/crates/core_simd/tests/ops_impl/int_macros.rs new file mode 100644 index 0000000000000..4175541e892dc --- /dev/null +++ b/crates/core_simd/tests/ops_impl/int_macros.rs @@ -0,0 +1,388 @@ +macro_rules! int_tests { + { $vector:ident, $scalar:ident } => { + #[cfg(test)] + mod $vector { + use super::*; + use helpers::lanewise::*; + + // TODO impl this as an associated fn on vectors + fn from_slice(slice: &[$scalar]) -> core_simd::$vector { + let mut value = core_simd::$vector::default(); + let value_slice: &mut [_] = value.as_mut(); + value_slice.copy_from_slice(&slice[0..value_slice.len()]); + value + } + + const A: [$scalar; 64] = [ + 7, 7, 7, 7, -7, -7, -7, -7, + 6, 6, 6, 6, -6, -6, -6, -6, + 5, 5, 5, 5, -5, -5, -5, -5, + 4, 4, 4, 4, -4, -4, -4, -4, + 3, 3, 3, 3, -3, -3, -3, -3, + 2, 2, 2, 2, -2, -2, -2, -2, + 1, 1, 1, 1, -1, -1, -1, -1, + 0, 0, 0, 0, 0, 0, 0, 0, + ]; + const B: [$scalar; 64] = [ + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + -1, -2, -3, -4, -5, -6, -7, -8, + -1, -2, -3, -4, -5, -6, -7, -8, + -1, -2, -3, -4, -5, -6, -7, -8, + -1, -2, -3, -4, -5, -6, -7, -8, + ]; + + #[test] + fn add() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Add::add); + a += b; + assert_biteq!(a, expected); + } + + #[test] + fn add_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add); + a += b; + assert_biteq!(a, expected); + } + + #[test] + fn sub() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub); + a -= b; + assert_biteq!(a, expected); + } + + #[test] + fn sub_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub); + a -= b; + assert_biteq!(a, expected); + } + + #[test] + fn mul() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul); + a *= b; + assert_biteq!(a, expected); + } + + #[test] + fn mul_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul); + a *= b; + assert_biteq!(a, expected); + } + + #[test] + fn div() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Div::div); + a /= b; + assert_biteq!(a, expected); + } + + #[test] + fn div_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div); + a /= b; + assert_biteq!(a, expected); + } + + #[test] + fn rem() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem); + a %= b; + assert_biteq!(a, expected); + } + + #[test] + fn rem_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem); + a %= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitand() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand); + assert_biteq!(a & b, expected); + } + + #[test] + fn bitand_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand); + a &= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitand_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand); + assert_biteq!(a & b, expected); + } + + #[test] + fn bitand_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitAnd::bitand); + assert_biteq!(a & b, expected); + } + + #[test] + fn bitand_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand); + a &= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitor() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor); + assert_biteq!(a | b, expected); + } + + #[test] + fn bitor_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor); + a |= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitor_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor); + assert_biteq!(a | b, expected); + } + + #[test] + fn bitor_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitOr::bitor); + assert_biteq!(a | b, expected); + } + + #[test] + fn bitor_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor); + a |= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitxor() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor); + assert_biteq!(a ^ b, expected); + } + + #[test] + fn bitxor_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor); + a ^= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitxor_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor); + assert_biteq!(a ^ b, expected); + } + + #[test] + fn bitxor_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitXor::bitxor); + assert_biteq!(a ^ b, expected); + } + + #[test] + fn bitxor_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor); + a ^= b; + assert_biteq!(a, expected); + } + + #[test] + fn neg() { + let v = from_slice(&A); + let expected = apply_unary_lanewise(v, core::ops::Neg::neg); + assert_biteq!(-v, expected); + } + + #[test] + fn not() { + let v = from_slice(&A); + let expected = apply_unary_lanewise(v, core::ops::Not::not); + assert_biteq!(!v, expected); + } + } + } +} diff --git a/crates/core_simd/tests/ops_impl/isize.rs b/crates/core_simd/tests/ops_impl/isize.rs new file mode 100644 index 0000000000000..9943e9c357a45 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/isize.rs @@ -0,0 +1,5 @@ +use super::helpers; + +int_tests! { isizex2, isize } +int_tests! { isizex4, isize } +int_tests! { isizex8, isize } diff --git a/crates/core_simd/tests/ops_impl/mask128.rs b/crates/core_simd/tests/ops_impl/mask128.rs new file mode 100644 index 0000000000000..f0bcdb4d4df97 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/mask128.rs @@ -0,0 +1,4 @@ +use super::helpers; + +mask_tests! { mask128x2, mask128 } +mask_tests! { mask128x4, mask128 } diff --git a/crates/core_simd/tests/ops_impl/mask16.rs b/crates/core_simd/tests/ops_impl/mask16.rs new file mode 100644 index 0000000000000..6f3f8e0ee02e7 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/mask16.rs @@ -0,0 +1,6 @@ +use super::helpers; + +mask_tests! { mask16x4, mask16 } +mask_tests! { mask16x8, mask16 } +mask_tests! { mask16x16, mask16 } +mask_tests! { mask16x32, mask16 } diff --git a/crates/core_simd/tests/ops_impl/mask32.rs b/crates/core_simd/tests/ops_impl/mask32.rs new file mode 100644 index 0000000000000..5c35885a2f5b7 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/mask32.rs @@ -0,0 +1,6 @@ +use super::helpers; + +mask_tests! { mask32x2, mask32 } +mask_tests! { mask32x4, mask32 } +mask_tests! { mask32x8, mask32 } +mask_tests! { mask32x16, mask32 } diff --git a/crates/core_simd/tests/ops_impl/mask64.rs b/crates/core_simd/tests/ops_impl/mask64.rs new file mode 100644 index 0000000000000..88d3211465c5a --- /dev/null +++ b/crates/core_simd/tests/ops_impl/mask64.rs @@ -0,0 +1,5 @@ +use super::helpers; + +mask_tests! { mask64x2, mask64 } +mask_tests! { mask64x4, mask64 } +mask_tests! { mask64x8, mask64 } diff --git a/crates/core_simd/tests/ops_impl/mask8.rs b/crates/core_simd/tests/ops_impl/mask8.rs new file mode 100644 index 0000000000000..fa4bcf09f367f --- /dev/null +++ b/crates/core_simd/tests/ops_impl/mask8.rs @@ -0,0 +1,6 @@ +use super::helpers; + +mask_tests! { mask8x8, mask8 } +mask_tests! { mask8x16, mask8 } +mask_tests! { mask8x32, mask8 } +mask_tests! { mask8x64, mask8 } diff --git a/crates/core_simd/tests/ops_impl/mask_macros.rs b/crates/core_simd/tests/ops_impl/mask_macros.rs new file mode 100644 index 0000000000000..9d6bc0cd692f2 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/mask_macros.rs @@ -0,0 +1,179 @@ +macro_rules! mask_tests { + { $vector:ident, $scalar:ident } => { + #[cfg(test)] + mod $vector { + use super::*; + use helpers::lanewise::*; + + fn from_slice(slice: &[bool]) -> core_simd::$vector { + let mut value = core_simd::$vector::default(); + let value_slice: &mut [_] = value.as_mut(); + for (m, b) in value_slice.iter_mut().zip(slice.iter()) { + *m = (*b).into(); + } + value + } + + const A: [bool; 64] = [ + false, true, false, true, false, false, true, true, + false, true, false, true, false, false, true, true, + false, true, false, true, false, false, true, true, + false, true, false, true, false, false, true, true, + false, true, false, true, false, false, true, true, + false, true, false, true, false, false, true, true, + false, true, false, true, false, false, true, true, + false, true, false, true, false, false, true, true, + ]; + const B: [bool; 64] = [ + false, false, true, true, false, true, false, true, + false, false, true, true, false, true, false, true, + false, false, true, true, false, true, false, true, + false, false, true, true, false, true, false, true, + false, false, true, true, false, true, false, true, + false, false, true, true, false, true, false, true, + false, false, true, true, false, true, false, true, + false, false, true, true, false, true, false, true, + ]; + + const SET_SCALAR: core_simd::$scalar = core_simd::$scalar::new(true); + const UNSET_SCALAR: core_simd::$scalar = core_simd::$scalar::new(false); + const SET_VECTOR: core_simd::$vector = core_simd::$vector::splat(SET_SCALAR); + const UNSET_VECTOR: core_simd::$vector = core_simd::$vector::splat(UNSET_SCALAR); + + #[test] + fn bitand() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand); + assert_biteq!(a & b, expected); + } + + #[test] + fn bitand_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand); + a &= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitand_scalar_rhs() { + let a = from_slice(&A); + let expected = a; + assert_biteq!(a & SET_SCALAR, expected); + assert_biteq!(a & UNSET_SCALAR, UNSET_VECTOR); + } + + #[test] + fn bitand_scalar_lhs() { + let a = from_slice(&A); + let expected = a; + assert_biteq!(SET_SCALAR & a, expected); + assert_biteq!(UNSET_SCALAR & a, UNSET_VECTOR); + } + + #[test] + fn bitand_assign_scalar() { + let mut a = from_slice(&A); + let expected = a; + a &= SET_SCALAR; + assert_biteq!(a, expected); + a &= UNSET_SCALAR; + assert_biteq!(a, UNSET_VECTOR); + } + + #[test] + fn bitor() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor); + assert_biteq!(a | b, expected); + } + + #[test] + fn bitor_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor); + a |= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitor_scalar_rhs() { + let a = from_slice(&A); + assert_biteq!(a | UNSET_SCALAR, a); + assert_biteq!(a | SET_SCALAR, SET_VECTOR); + } + + #[test] + fn bitor_scalar_lhs() { + let a = from_slice(&A); + assert_biteq!(UNSET_SCALAR | a, a); + assert_biteq!(SET_SCALAR | a, SET_VECTOR); + } + + #[test] + fn bitor_assign_scalar() { + let mut a = from_slice(&A); + let expected = a; + a |= UNSET_SCALAR; + assert_biteq!(a, expected); + a |= SET_SCALAR; + assert_biteq!(a, SET_VECTOR); + } + + #[test] + fn bitxor() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor); + assert_biteq!(a ^ b, expected); + } + + #[test] + fn bitxor_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor); + a ^= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitxor_scalar_rhs() { + let a = from_slice(&A); + let expected = apply_binary_scalar_rhs_lanewise(a, SET_SCALAR, core::ops::BitXor::bitxor); + assert_biteq!(a ^ UNSET_SCALAR, a); + assert_biteq!(a ^ SET_SCALAR, expected); + } + + #[test] + fn bitxor_scalar_lhs() { + let a = from_slice(&A); + let expected = apply_binary_scalar_lhs_lanewise(SET_SCALAR, a, core::ops::BitXor::bitxor); + assert_biteq!(UNSET_SCALAR ^ a, a); + assert_biteq!(SET_SCALAR ^ a, expected); + } + + #[test] + fn bitxor_assign_scalar() { + let mut a = from_slice(&A); + let expected_unset = a; + let expected_set = apply_binary_scalar_rhs_lanewise(a, SET_SCALAR, core::ops::BitXor::bitxor); + a ^= UNSET_SCALAR; + assert_biteq!(a, expected_unset); + a ^= SET_SCALAR; + assert_biteq!(a, expected_set); + } + + #[test] + fn not() { + let v = from_slice(&A); + let expected = apply_unary_lanewise(v, core::ops::Not::not); + assert_biteq!(!v, expected); + } + } + } +} diff --git a/crates/core_simd/tests/ops_impl/masksize.rs b/crates/core_simd/tests/ops_impl/masksize.rs new file mode 100644 index 0000000000000..76e333f3c154b --- /dev/null +++ b/crates/core_simd/tests/ops_impl/masksize.rs @@ -0,0 +1,5 @@ +use super::helpers; + +mask_tests! { masksizex2, masksize } +mask_tests! { masksizex4, masksize } +mask_tests! { masksizex8, masksize } diff --git a/crates/core_simd/tests/ops_impl/mod.rs b/crates/core_simd/tests/ops_impl/mod.rs new file mode 100644 index 0000000000000..814f2d04b59c3 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/mod.rs @@ -0,0 +1,39 @@ +#[macro_use] +#[path = "../helpers/mod.rs"] +mod helpers; + +#[macro_use] +mod float_macros; + +mod r#f32; +mod r#f64; + +#[macro_use] +mod int_macros; + +mod r#i8; +mod r#i16; +mod r#i32; +mod r#i64; +mod r#i128; +mod r#isize; + +#[macro_use] +mod uint_macros; + +mod r#u8; +mod r#u16; +mod r#u32; +mod r#u64; +mod r#u128; +mod r#usize; + +#[macro_use] +mod mask_macros; + +mod mask8; +mod mask16; +mod mask32; +mod mask64; +mod mask128; +mod masksize; diff --git a/crates/core_simd/tests/ops_impl/u128.rs b/crates/core_simd/tests/ops_impl/u128.rs new file mode 100644 index 0000000000000..cfd849640ffef --- /dev/null +++ b/crates/core_simd/tests/ops_impl/u128.rs @@ -0,0 +1,4 @@ +use super::helpers; + +uint_tests! { u128x2, u128 } +uint_tests! { u128x4, u128 } diff --git a/crates/core_simd/tests/ops_impl/u16.rs b/crates/core_simd/tests/ops_impl/u16.rs new file mode 100644 index 0000000000000..50af4dd48b382 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/u16.rs @@ -0,0 +1,6 @@ +use super::helpers; + +uint_tests! { u16x4, u16 } +uint_tests! { u16x8, u16 } +uint_tests! { u16x16, u16 } +uint_tests! { u16x32, u16 } diff --git a/crates/core_simd/tests/ops_impl/u32.rs b/crates/core_simd/tests/ops_impl/u32.rs new file mode 100644 index 0000000000000..8e7faa9d74038 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/u32.rs @@ -0,0 +1,6 @@ +use super::helpers; + +uint_tests! { u32x2, u32 } +uint_tests! { u32x4, u32 } +uint_tests! { u32x8, u32 } +uint_tests! { u32x16, u32 } diff --git a/crates/core_simd/tests/ops_impl/u64.rs b/crates/core_simd/tests/ops_impl/u64.rs new file mode 100644 index 0000000000000..1a6385d37bae5 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/u64.rs @@ -0,0 +1,5 @@ +use super::helpers; + +uint_tests! { u64x2, u64 } +uint_tests! { u64x4, u64 } +uint_tests! { u64x8, u64 } diff --git a/crates/core_simd/tests/ops_impl/u8.rs b/crates/core_simd/tests/ops_impl/u8.rs new file mode 100644 index 0000000000000..31568b1eacbef --- /dev/null +++ b/crates/core_simd/tests/ops_impl/u8.rs @@ -0,0 +1,6 @@ +use super::helpers; + +uint_tests! { u8x8, u8 } +uint_tests! { u8x16, u8 } +uint_tests! { u8x32, u8 } +uint_tests! { u8x64, u8 } diff --git a/crates/core_simd/tests/ops_impl/uint_macros.rs b/crates/core_simd/tests/ops_impl/uint_macros.rs new file mode 100644 index 0000000000000..eb9ac34d7efd8 --- /dev/null +++ b/crates/core_simd/tests/ops_impl/uint_macros.rs @@ -0,0 +1,381 @@ +macro_rules! uint_tests { + { $vector:ident, $scalar:ident } => { + #[cfg(test)] + mod $vector { + use super::*; + use helpers::lanewise::*; + + // TODO impl this as an associated fn on vectors + fn from_slice(slice: &[$scalar]) -> core_simd::$vector { + let mut value = core_simd::$vector::default(); + let value_slice: &mut [_] = value.as_mut(); + value_slice.copy_from_slice(&slice[0..value_slice.len()]); + value + } + + const A: [$scalar; 64] = [ + 16, 16, 16, 16, 16, 16, 16, 16, + 14, 14, 14, 14, 14, 14, 14, 14, + 12, 12, 12, 12, 12, 12, 12, 12, + 10, 10, 10, 10, 10, 10, 10, 10, + 8, 8, 8, 8, 8, 8, 8, 8, + 6, 6, 6, 6, 6, 6, 7, 8, + 4, 4, 4, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + ]; + const B: [$scalar; 64] = [ + 1, 2, 3, 4, 1, 2, 3, 4, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + ]; + + #[test] + fn add() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Add::add); + a += b; + assert_biteq!(a, expected); + } + + #[test] + fn add_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Add::add); + assert_biteq!(a + b, expected); + } + + #[test] + fn add_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Add::add); + a += b; + assert_biteq!(a, expected); + } + + #[test] + fn sub() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Sub::sub); + a -= b; + assert_biteq!(a, expected); + } + + #[test] + fn sub_scalar_rhs() { + let a = from_slice(&A); + let b = 1; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_scalar_lhs() { + let a = 40; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Sub::sub); + assert_biteq!(a - b, expected); + } + + #[test] + fn sub_assign_scalar() { + let mut a = from_slice(&A); + let b = 1; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Sub::sub); + a -= b; + assert_biteq!(a, expected); + } + + #[test] + fn mul() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Mul::mul); + a *= b; + assert_biteq!(a, expected); + } + + #[test] + fn mul_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Mul::mul); + assert_biteq!(a * b, expected); + } + + #[test] + fn mul_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Mul::mul); + a *= b; + assert_biteq!(a, expected); + } + + #[test] + fn div() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Div::div); + a /= b; + assert_biteq!(a, expected); + } + + #[test] + fn div_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Div::div); + assert_biteq!(a / b, expected); + } + + #[test] + fn div_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Div::div); + a /= b; + assert_biteq!(a, expected); + } + + #[test] + fn rem() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::Rem::rem); + a %= b; + assert_biteq!(a, expected); + } + + #[test] + fn rem_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::Rem::rem); + assert_biteq!(a % b, expected); + } + + #[test] + fn rem_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::Rem::rem); + a %= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitand() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand); + assert_biteq!(a & b, expected); + } + + #[test] + fn bitand_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitAnd::bitand); + a &= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitand_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand); + assert_biteq!(a & b, expected); + } + + #[test] + fn bitand_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitAnd::bitand); + assert_biteq!(a & b, expected); + } + + #[test] + fn bitand_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitAnd::bitand); + a &= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitor() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor); + assert_biteq!(a | b, expected); + } + + #[test] + fn bitor_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitOr::bitor); + a |= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitor_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor); + assert_biteq!(a | b, expected); + } + + #[test] + fn bitor_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitOr::bitor); + assert_biteq!(a | b, expected); + } + + #[test] + fn bitor_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitOr::bitor); + a |= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitxor() { + let a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor); + assert_biteq!(a ^ b, expected); + } + + #[test] + fn bitxor_assign() { + let mut a = from_slice(&A); + let b = from_slice(&B); + let expected = apply_binary_lanewise(a, b, core::ops::BitXor::bitxor); + a ^= b; + assert_biteq!(a, expected); + } + + #[test] + fn bitxor_scalar_rhs() { + let a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor); + assert_biteq!(a ^ b, expected); + } + + #[test] + fn bitxor_scalar_lhs() { + let a = 5; + let b = from_slice(&B); + let expected = apply_binary_scalar_lhs_lanewise(a, b, core::ops::BitXor::bitxor); + assert_biteq!(a ^ b, expected); + } + + #[test] + fn bitxor_assign_scalar() { + let mut a = from_slice(&A); + let b = 5; + let expected = apply_binary_scalar_rhs_lanewise(a, b, core::ops::BitXor::bitxor); + a ^= b; + assert_biteq!(a, expected); + } + + #[test] + fn not() { + let v = from_slice(&A); + let expected = apply_unary_lanewise(v, core::ops::Not::not); + assert_biteq!(!v, expected); + } + } + } +} diff --git a/crates/core_simd/tests/ops_impl/usize.rs b/crates/core_simd/tests/ops_impl/usize.rs new file mode 100644 index 0000000000000..13da57f15869b --- /dev/null +++ b/crates/core_simd/tests/ops_impl/usize.rs @@ -0,0 +1,5 @@ +use super::helpers; + +uint_tests! { usizex2, usize } +uint_tests! { usizex4, usize } +uint_tests! { usizex8, usize }