diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 077c47fba..e09862d47 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -177,6 +177,8 @@ jobs: steps: - uses: actions/checkout@v4 + with: + submodules: recursive - name: install stable toolchain uses: dtolnay/rust-toolchain@master @@ -190,7 +192,9 @@ jobs: save-if: ${{ github.event_name != 'merge_group' }} - name: run code generator - run: cargo run --bin fearless_simd_gen + run: | + cargo run --bin fearless_simd_gen --release + cargo run --bin fearless_simd_gen --release -- core-arch - name: check for uncommitted changes run: git diff --exit-code diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..9f0d2a7c5 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "fearless_simd_gen/stdarch"] + path = fearless_simd_gen/stdarch + url = https://github.com/rust-lang/stdarch/ diff --git a/Cargo.lock b/Cargo.lock index 8428edbb1..9dd8c545a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -128,6 +128,7 @@ dependencies = [ "clap", "proc-macro2", "quote", + "syn", ] [[package]] diff --git a/fearless_simd/src/core_arch/aarch64/mod.rs b/fearless_simd/src/core_arch/aarch64/mod.rs index 86f5f0d77..bf411c05b 100644 --- a/fearless_simd/src/core_arch/aarch64/mod.rs +++ b/fearless_simd/src/core_arch/aarch64/mod.rs @@ -1,4070 +1,8 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to intrinsics on aarch64. +// This file is autogenerated by fearless_simd_gen -// These implementations are cut and pasted from pulp. - -/// A token for Neon intrinsics on aarch64. -#[derive(Clone, Copy, Debug)] -pub struct Neon { - _private: (), -} - -impl Neon { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. - #[inline] - pub const unsafe fn new_unchecked() -> Self { - Self { _private: () } - } -} - -#[cfg(feature = "safe_wrappers")] -use {crate::impl_macros::delegate, core::arch::aarch64::*}; - -#[cfg(feature = "safe_wrappers")] -type p8 = u8; -#[cfg(feature = "safe_wrappers")] -type p16 = u16; -#[cfg(feature = "safe_wrappers")] -type p64 = u64; -#[cfg(feature = "safe_wrappers")] -type p128 = u128; - -#[cfg(feature = "safe_wrappers")] -#[expect( - clippy::missing_safety_doc, - reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" -)] -impl Neon { - delegate! { core::arch::aarch64: - fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t; - fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; - fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; - fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t; - fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t; - fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t; - fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t; - fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t; - fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t; - fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t; - fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t; - fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t; - fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t; - fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t; - fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t; - fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t; - fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t; - fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t; - fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t; - fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t; - fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t; - fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vabs_f32(a: float32x2_t) -> float32x2_t; - fn vabsq_f32(a: float32x4_t) -> float32x4_t; - fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t; - fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t; - fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t; - fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t; - fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t; - fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t; - fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t; - fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t; - fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t; - fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t; - fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t; - fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t; - fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t; - fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t; - fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t; - fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t; - fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t; - fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t; - fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t; - fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t; - fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t; - fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t; - fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t; - fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t; - fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vcls_s8(a: int8x8_t) -> int8x8_t; - fn vclsq_s8(a: int8x16_t) -> int8x16_t; - fn vcls_s16(a: int16x4_t) -> int16x4_t; - fn vclsq_s16(a: int16x8_t) -> int16x8_t; - fn vcls_s32(a: int32x2_t) -> int32x2_t; - fn vclsq_s32(a: int32x4_t) -> int32x4_t; - fn vcls_u8(a: uint8x8_t) -> int8x8_t; - fn vclsq_u8(a: uint8x16_t) -> int8x16_t; - fn vcls_u16(a: uint16x4_t) -> int16x4_t; - fn vclsq_u16(a: uint16x8_t) -> int16x8_t; - fn vcls_u32(a: uint32x2_t) -> int32x2_t; - fn vclsq_u32(a: uint32x4_t) -> int32x4_t; - fn vclz_s8(a: int8x8_t) -> int8x8_t; - fn vclzq_s8(a: int8x16_t) -> int8x16_t; - fn vclz_s16(a: int16x4_t) -> int16x4_t; - fn vclzq_s16(a: int16x8_t) -> int16x8_t; - fn vclz_s32(a: int32x2_t) -> int32x2_t; - fn vclzq_s32(a: int32x4_t) -> int32x4_t; - fn vclz_u8(a: uint8x8_t) -> uint8x8_t; - fn vclzq_u8(a: uint8x16_t) -> uint8x16_t; - fn vclz_u16(a: uint16x4_t) -> uint16x4_t; - fn vclzq_u16(a: uint16x8_t) -> uint16x8_t; - fn vclz_u32(a: uint32x2_t) -> uint32x2_t; - fn vclzq_u32(a: uint32x4_t) -> uint32x4_t; - fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t; - fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t; - fn vcreate_s8(a: u64) -> int8x8_t; - fn vcreate_s16(a: u64) -> int16x4_t; - fn vcreate_s32(a: u64) -> int32x2_t; - fn vcreate_s64(a: u64) -> int64x1_t; - fn vcreate_u8(a: u64) -> uint8x8_t; - fn vcreate_u16(a: u64) -> uint16x4_t; - fn vcreate_u32(a: u64) -> uint32x2_t; - fn vcreate_u64(a: u64) -> uint64x1_t; - fn vcreate_p8(a: u64) -> poly8x8_t; - fn vcreate_p16(a: u64) -> poly16x4_t; - fn vcreate_f32(a: u64) -> float32x2_t; - fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t; - fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t; - fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t; - fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t; - fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t; - fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t; - fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t; - fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t; - fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t; - fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t; - fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t; - fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t; - fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t; - fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t; - fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t; - fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t; - fn vdup_lane_s8(a: int8x8_t) -> int8x8_t; - fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t; - fn vdup_lane_s16(a: int16x4_t) -> int16x4_t; - fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t; - fn vdup_lane_s32(a: int32x2_t) -> int32x2_t; - fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t; - fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t; - fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t; - fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t; - fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t; - fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t; - fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t; - fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t; - fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t; - fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t; - fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t; - fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t; - fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t; - fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t; - fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t; - fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t; - fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t; - fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t; - fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t; - fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t; - fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t; - fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t; - fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t; - fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t; - fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t; - fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t; - fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t; - fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t; - fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t; - fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t; - fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t; - fn vdup_lane_f32(a: float32x2_t) -> float32x2_t; - fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t; - fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t; - fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t; - fn vdup_lane_s64(a: int64x1_t) -> int64x1_t; - fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t; - fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t; - fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t; - fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t; - fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t; - fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t; - fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t; - fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t; - fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; - fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t; - fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t; - fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t; - fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t; - fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t; - fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t; - fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t; - fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t; - fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t; - fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t; - fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t; - fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t; - fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t; - fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t; - fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vmla_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, - ) -> uint16x4_t; - fn vmla_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, - ) -> uint16x4_t; - fn vmlaq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, - ) -> uint16x8_t; - fn vmlaq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, - ) -> uint16x8_t; - fn vmla_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, - ) -> uint32x2_t; - fn vmla_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, - ) -> uint32x2_t; - fn vmlaq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, - ) -> uint32x4_t; - fn vmlaq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, - ) -> uint32x4_t; - fn vmla_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - ) -> float32x2_t; - fn vmla_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, - ) -> float32x2_t; - fn vmlaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, - ) -> float32x4_t; - fn vmlaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - ) -> float32x4_t; - fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t; - fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t; - fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t; - fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t; - fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t; - fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t; - fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t; - fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t; - fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t; - fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t; - fn vmlal_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, - ) -> uint32x4_t; - fn vmlal_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, - ) -> uint32x4_t; - fn vmlal_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, - ) -> uint64x2_t; - fn vmlal_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, - ) -> uint64x2_t; - fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t; - fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t; - fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t; - fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t; - fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t; - fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; - fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t; - fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t; - fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t; - fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t; - fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t; - fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t; - fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t; - fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t; - fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t; - fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t; - fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t; - fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t; - fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t; - fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t; - fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vmls_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x4_t, - ) -> uint16x4_t; - fn vmls_laneq_u16( - a: uint16x4_t, - b: uint16x4_t, - c: uint16x8_t, - ) -> uint16x4_t; - fn vmlsq_lane_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x4_t, - ) -> uint16x8_t; - fn vmlsq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - c: uint16x8_t, - ) -> uint16x8_t; - fn vmls_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x2_t, - ) -> uint32x2_t; - fn vmls_laneq_u32( - a: uint32x2_t, - b: uint32x2_t, - c: uint32x4_t, - ) -> uint32x2_t; - fn vmlsq_lane_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x2_t, - ) -> uint32x4_t; - fn vmlsq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - c: uint32x4_t, - ) -> uint32x4_t; - fn vmls_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - ) -> float32x2_t; - fn vmls_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, - ) -> float32x2_t; - fn vmlsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, - ) -> float32x4_t; - fn vmlsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - ) -> float32x4_t; - fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t; - fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t; - fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t; - fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t; - fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t; - fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t; - fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t; - fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t; - fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t; - fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t; - fn vmlsl_lane_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x4_t, - ) -> uint32x4_t; - fn vmlsl_laneq_u16( - a: uint32x4_t, - b: uint16x4_t, - c: uint16x8_t, - ) -> uint32x4_t; - fn vmlsl_lane_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x2_t, - ) -> uint64x2_t; - fn vmlsl_laneq_u32( - a: uint64x2_t, - b: uint32x2_t, - c: uint32x4_t, - ) -> uint64x2_t; - fn vneg_s8(a: int8x8_t) -> int8x8_t; - fn vnegq_s8(a: int8x16_t) -> int8x16_t; - fn vneg_s16(a: int16x4_t) -> int16x4_t; - fn vnegq_s16(a: int16x8_t) -> int16x8_t; - fn vneg_s32(a: int32x2_t) -> int32x2_t; - fn vnegq_s32(a: int32x4_t) -> int32x4_t; - fn vneg_f32(a: float32x2_t) -> float32x2_t; - fn vnegq_f32(a: float32x4_t) -> float32x4_t; - fn vqneg_s8(a: int8x8_t) -> int8x8_t; - fn vqnegq_s8(a: int8x16_t) -> int8x16_t; - fn vqneg_s16(a: int16x4_t) -> int16x4_t; - fn vqnegq_s16(a: int16x8_t) -> int16x8_t; - fn vqneg_s32(a: int32x2_t) -> int32x2_t; - fn vqnegq_s32(a: int32x4_t) -> int32x4_t; - fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vrndn_f32(a: float32x2_t) -> float32x2_t; - fn vrndnq_f32(a: float32x4_t) -> float32x4_t; - fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t; - unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t; - unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t; - unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t; - unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t; - unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t; - unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t; - unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t; - unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t; - unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t; - unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t; - unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t; - unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t; - unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t; - unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t; - unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t; - unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t; - unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t; - unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t; - unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t; - unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t; - unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t; - unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t; - unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t; - unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t; - unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t; - unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t; - unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t; - unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t; - unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t; - unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t; - unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t; - unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t; - unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t; - unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t; - unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t; - unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t; - unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t; - unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t; - unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t; - unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t; - unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t; - unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t; - unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t; - unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t; - unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t; - unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t; - unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t; - unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t; - unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t; - unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t; - unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t; - unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t; - unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t; - unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t; - unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t; - unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t; - unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t; - unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t; - unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t; - unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t; - unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t; - unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t; - unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t; - unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t; - unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t; - unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t; - unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t; - unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t; - unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t; - unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t; - unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t; - unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t; - unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t; - unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t; - unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t; - unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t; - unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t; - unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t; - unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t; - unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t; - unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t; - unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t; - unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t; - unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t; - unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t; - unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t; - unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t; - unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t; - unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t; - unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t; - unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t; - unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t; - unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t; - unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t; - unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t; - unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t; - unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t; - unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t; - unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t; - unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t; - unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t; - unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t; - unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t; - unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t; - unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t; - unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8x8x2_t; - unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> int16x4x2_t; - unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> int32x2x2_t; - unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> int16x8x2_t; - unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> int32x4x2_t; - unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t; - unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t; - unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t; - unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t; - unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t; - unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t; - unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t; - unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t; - unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> float32x2x2_t; - unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - -> float32x4x2_t; - unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t; - unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t; - unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t; - unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t; - unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t; - unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t; - unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t; - unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t; - unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t; - unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t; - unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t; - unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t; - unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t; - unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t; - unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t; - unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t; - unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t; - unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t; - unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t; - unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t; - unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t; - unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t; - unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t; - unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t; - unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t; - unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t; - unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t; - unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t; - unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t; - unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t; - unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t; - unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t; - unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t; - unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t; - unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t; - unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t; - unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t; - unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t; - unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t; - unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t; - unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8x8x3_t; - unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> int16x4x3_t; - unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> int32x2x3_t; - unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> int16x8x3_t; - unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> int32x4x3_t; - unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t; - unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t; - unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t; - unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t; - unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t; - unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t; - unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t; - unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t; - unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> float32x2x3_t; - unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - -> float32x4x3_t; - unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t; - unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t; - unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t; - unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t; - unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t; - unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t; - unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t; - unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t; - unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t; - unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t; - unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t; - unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t; - unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t; - unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t; - unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t; - unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t; - unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t; - unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t; - unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t; - unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t; - unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t; - unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t; - unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t; - unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t; - unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t; - unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t; - unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t; - unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t; - unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t; - unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t; - unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t; - unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t; - unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t; - unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t; - unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t; - unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t; - unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t; - unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t; - unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t; - unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t; - unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8x8x4_t; - unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> int16x4x4_t; - unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> int32x2x4_t; - unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> int16x8x4_t; - unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> int32x4x4_t; - unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t; - unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t; - unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t; - unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t; - unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t; - unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t; - unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t; - unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t; - unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> float32x2x4_t; - unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - -> float32x4x4_t; - unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t); - unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t); - unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t); - unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t); - unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t); - unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t); - unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t); - unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t); - unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t); - unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t); - unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t); - unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t); - unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t); - unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t); - unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t); - unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t); - unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t); - unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t); - unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t); - unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t); - unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t); - unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t); - unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t); - unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t); - unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t); - unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t); - unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t); - unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t); - unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t); - unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t); - unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t); - unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t); - unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t); - unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t); - unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t); - unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t); - unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t); - unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t); - unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t); - unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t); - unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t); - unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t); - unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t); - unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t); - unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t); - unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t); - unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t); - unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t); - unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t); - unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t); - unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t); - unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t); - unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t); - unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t); - unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t); - unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t); - unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t); - unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t); - unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t); - unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t); - unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t); - unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t); - unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t); - unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t); - unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t); - unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t); - unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t); - unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t); - unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t); - unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t); - unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t); - unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t); - unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t); - unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t); - unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t); - unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t); - unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t); - unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t); - unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t); - unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t); - unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t); - unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t); - unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t); - unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t); - unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t); - unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t); - unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t); - unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t); - unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t); - unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t); - unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t); - unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t); - unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t); - unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t); - unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t); - unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t); - unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t); - unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t); - unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t); - unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t); - unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t); - unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t); - unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t); - unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t); - unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t); - unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t); - unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t); - unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t); - unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t); - unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t); - unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t); - unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t); - unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t); - unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t); - unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t); - unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t); - unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t); - unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t); - unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t); - unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t); - unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t); - unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t); - unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t); - unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t); - unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t); - unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t); - unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t); - unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t); - unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t); - unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t); - unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t); - unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t); - unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t); - unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t); - unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t); - unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t); - unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t); - unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t); - unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t); - unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t); - unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t); - unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t); - unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t); - unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t); - unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t); - unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t); - unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t); - unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t); - unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t); - unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t); - unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t); - unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t); - unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t); - unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t); - unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t); - unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t); - unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t); - unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t); - unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t); - unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t); - unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t); - unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t); - unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t); - unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t); - unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t); - unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t); - unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t); - unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t); - unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t); - unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t); - unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t); - unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t); - unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t); - unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t); - unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t); - unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t); - unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t); - unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t); - unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t); - unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t); - unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t); - unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t); - unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t); - unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t); - unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t); - unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t); - unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t); - unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t); - unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t); - unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t); - unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t); - unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t); - unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t); - unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t); - fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t; - fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t; - fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t; - fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t; - fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t; - fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t; - fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t; - fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t; - fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t; - fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t; - fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t; - fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t; - fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t; - fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t; - fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t; - fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t; - fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t; - fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t; - fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t; - fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t; - fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t; - fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; - fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; - fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t; - fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t; - fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t; - fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t; - fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t; - fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t; - fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t; - fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; - fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t; - fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; - fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t; - fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t; - fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t; - fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t; - fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t; - fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t; - fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vaddq_p128(a: p128, b: p128) -> p128; - fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t; - fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t; - fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t; - fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t; - fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t; - fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t; - fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t; - fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t; - fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t; - fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t; - fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t; - fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t; - fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t; - fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t; - fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t; - fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t; - fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; - fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; - fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t; - fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t; - fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t; - fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t; - fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t; - fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t; - fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t; - fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t; - fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t; - fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t; - fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t; - fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t; - fn vqmovn_s16(a: int16x8_t) -> int8x8_t; - fn vqmovn_s32(a: int32x4_t) -> int16x4_t; - fn vqmovn_s64(a: int64x2_t) -> int32x2_t; - fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t; - fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t; - fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t; - fn vqmovun_s16(a: int16x8_t) -> uint8x8_t; - fn vqmovun_s32(a: int32x4_t) -> uint16x4_t; - fn vqmovun_s64(a: int64x2_t) -> uint32x2_t; - fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t; - fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t; - fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t; - fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t; - fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t; - fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t; - fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t; - fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t; - fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; - fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t; - fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t; - fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t; - fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t; - fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t; - fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t; - fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t; - fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t; - fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t; - fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; - fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - fn vqshl_n_s8(a: int8x8_t) -> int8x8_t; - fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t; - fn vqshl_n_s16(a: int16x4_t) -> int16x4_t; - fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t; - fn vqshl_n_s32(a: int32x2_t) -> int32x2_t; - fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t; - fn vqshl_n_s64(a: int64x1_t) -> int64x1_t; - fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t; - fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t; - fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t; - fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t; - fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t; - fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t; - fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t; - fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t; - fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t; - fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t; - fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t; - fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t; - fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t; - fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t; - fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t; - fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t; - fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t; - fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t; - fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t; - fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t; - fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t; - fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t; - fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t; - fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t; - fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t; - fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t; - fn vrsqrte_f32(a: float32x2_t) -> float32x2_t; - fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t; - fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t; - fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t; - fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vrecpe_f32(a: float32x2_t) -> float32x2_t; - fn vrecpeq_f32(a: float32x4_t) -> float32x4_t; - fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t; - fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t; - fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t; - fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t; - fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t; - fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t; - fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t; - fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t; - fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t; - fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t; - fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t; - fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t; - fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t; - fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t; - fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t; - fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t; - fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t; - fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t; - fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t; - fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t; - fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t; - fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t; - fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t; - fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t; - fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t; - fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t; - fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t; - fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t; - fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t; - fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t; - fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t; - fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t; - fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t; - fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t; - fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t; - fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t; - fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t; - fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t; - fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t; - fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t; - fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t; - fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t; - fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t; - fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t; - fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t; - fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t; - fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t; - fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t; - fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t; - fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t; - fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t; - fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t; - fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t; - fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t; - fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t; - fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t; - fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t; - fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t; - fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t; - fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t; - fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t; - fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t; - fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t; - fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t; - fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t; - fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t; - fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t; - fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t; - fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t; - fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t; - fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t; - fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t; - fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t; - fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t; - fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t; - fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t; - fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t; - fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t; - fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t; - fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t; - fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t; - fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t; - fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t; - fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t; - fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t; - fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t; - fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t; - fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t; - fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t; - fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t; - fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t; - fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t; - fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t; - fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t; - fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t; - fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t; - fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t; - fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t; - fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t; - fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t; - fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t; - fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t; - fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t; - fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t; - fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t; - fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t; - fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t; - fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t; - fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t; - fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t; - fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t; - fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t; - fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t; - fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t; - fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t; - fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t; - fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t; - fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t; - fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t; - fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t; - fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t; - fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t; - fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t; - fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t; - fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t; - fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t; - fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t; - fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t; - fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t; - fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t; - fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t; - fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t; - fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t; - fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t; - fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t; - fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t; - fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t; - fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t; - fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t; - fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t; - fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t; - fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t; - fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t; - fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t; - fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t; - fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t; - fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t; - fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t; - fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t; - fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t; - fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t; - fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t; - fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t; - fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t; - fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t; - fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t; - fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t; - fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t; - fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t; - fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t; - fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t; - fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t; - fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t; - fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t; - fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t; - fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t; - fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t; - fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t; - fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t; - fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t; - fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t; - fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t; - fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t; - fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t; - fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t; - fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t; - fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t; - fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t; - fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t; - fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t; - fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t; - fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t; - fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t; - fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t; - fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t; - fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t; - fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t; - fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t; - fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t; - fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t; - fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t; - fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t; - fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t; - fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t; - fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t; - fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t; - fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t; - fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t; - fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t; - fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t; - fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t; - fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t; - fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t; - fn vreinterpretq_p128_f32(a: float32x4_t) -> p128; - fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t; - fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t; - fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t; - fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t; - fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t; - fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t; - fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t; - fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t; - fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t; - fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t; - fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t; - fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t; - fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t; - fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t; - fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t; - fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t; - fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t; - fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t; - fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t; - fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t; - fn vreinterpretq_f32_p128(a: p128) -> float32x4_t; - fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; - fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - fn vrshr_n_s8(a: int8x8_t) -> int8x8_t; - fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t; - fn vrshr_n_s16(a: int16x4_t) -> int16x4_t; - fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t; - fn vrshr_n_s32(a: int32x2_t) -> int32x2_t; - fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t; - fn vrshr_n_s64(a: int64x1_t) -> int64x1_t; - fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t; - fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t; - fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t; - fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t; - fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t; - fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t; - fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t; - fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t; - fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t; - fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t; - fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t; - fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t; - fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t; - fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t; - fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t; - fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t; - fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t; - fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t; - fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t; - fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t; - fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t; - fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t; - fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t; - fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t; - fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t; - fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t; - fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t; - fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t; - fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t; - fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t; - fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t; - fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t; - fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t; - fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t; - fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t; - fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t; - fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t; - fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t; - fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t; - fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t; - fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; - fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - fn vshl_n_s8(a: int8x8_t) -> int8x8_t; - fn vshlq_n_s8(a: int8x16_t) -> int8x16_t; - fn vshl_n_s16(a: int16x4_t) -> int16x4_t; - fn vshlq_n_s16(a: int16x8_t) -> int16x8_t; - fn vshl_n_s32(a: int32x2_t) -> int32x2_t; - fn vshlq_n_s32(a: int32x4_t) -> int32x4_t; - fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t; - fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t; - fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t; - fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t; - fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t; - fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t; - fn vshl_n_s64(a: int64x1_t) -> int64x1_t; - fn vshlq_n_s64(a: int64x2_t) -> int64x2_t; - fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t; - fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t; - fn vshll_n_s8(a: int8x8_t) -> int16x8_t; - fn vshll_n_s16(a: int16x4_t) -> int32x4_t; - fn vshll_n_s32(a: int32x2_t) -> int64x2_t; - fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t; - fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t; - fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t; - fn vshr_n_s8(a: int8x8_t) -> int8x8_t; - fn vshrq_n_s8(a: int8x16_t) -> int8x16_t; - fn vshr_n_s16(a: int16x4_t) -> int16x4_t; - fn vshrq_n_s16(a: int16x8_t) -> int16x8_t; - fn vshr_n_s32(a: int32x2_t) -> int32x2_t; - fn vshrq_n_s32(a: int32x4_t) -> int32x4_t; - fn vshr_n_s64(a: int64x1_t) -> int64x1_t; - fn vshrq_n_s64(a: int64x2_t) -> int64x2_t; - fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t; - fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t; - fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t; - fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t; - fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t; - fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t; - fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t; - fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t; - fn vshrn_n_s16(a: int16x8_t) -> int8x8_t; - fn vshrn_n_s32(a: int32x4_t) -> int16x4_t; - fn vshrn_n_s64(a: int64x2_t) -> int32x2_t; - fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t; - fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t; - fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t; - fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t; - fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t; - fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t; - fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t; - fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t; - fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t; - fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t; - fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t; - fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t; - fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t; - fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t; - fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t; - fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t; - fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t; - fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t; - fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t; - fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t; - fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t; - fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t; - fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t; - fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t; - fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t; - fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t; - fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t; - fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t; - fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t; - fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t; - fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t; - fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t; - fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t; - fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t; - fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t; - fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t; - fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t; - fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t; - fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t; - fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t; - fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t; - fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t; - fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t; - fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t; - fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t; - fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t; - fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t; - fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t; - fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t; - fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t; - fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t; - fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t; - fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t; - fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t; - fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t; - fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t; - fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t; - fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t; - fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t; - fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t; - fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t; - fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t; - fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t; - fn vqabs_s8(a: int8x8_t) -> int8x8_t; - fn vqabsq_s8(a: int8x16_t) -> int8x16_t; - fn vqabs_s16(a: int16x4_t) -> int16x4_t; - fn vqabsq_s16(a: int16x8_t) -> int16x8_t; - fn vqabs_s32(a: int32x2_t) -> int32x2_t; - fn vqabsq_s32(a: int32x4_t) -> int32x4_t; - - unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> int8x8_t; - unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> int8x16_t; - unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> int16x4_t; - unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) -> int16x8_t; - unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> int32x2_t; - unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) -> int32x4_t; - unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> int64x1_t; - unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) -> int64x2_t; - unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> uint8x8_t; - unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> uint8x16_t; - unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) -> uint16x4_t; - unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) -> uint16x8_t; - unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) -> uint32x2_t; - unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) -> uint32x4_t; - unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) -> uint64x1_t; - unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) -> uint64x2_t; - unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> poly8x8_t; - unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> poly8x16_t; - unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) -> poly16x4_t; - unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) -> poly16x8_t; - unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) -> float32x2_t; - unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) - -> float32x4_t; - unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t; - unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t; - unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t; - unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t; - unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t; - unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t; - unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t; - unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t; - unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t; - unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t; - unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t; - unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t; - unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t; - unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t; - unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t; - unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t; - unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t; - unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t; - unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t; - unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t; - unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t; - unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t; - fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t; - fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t; - fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t; - fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t; - fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t; - fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; - fn vabs_s8(a: int8x8_t) -> int8x8_t; - fn vabs_s16(a: int16x4_t) -> int16x4_t; - fn vabs_s32(a: int32x2_t) -> int32x2_t; - fn vabsq_s8(a: int8x16_t) -> int8x16_t; - fn vabsq_s16(a: int16x8_t) -> int16x8_t; - fn vabsq_s32(a: int32x4_t) -> int32x4_t; - fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t; - fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t; - fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t; - fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t; - fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t; - fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t; - fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t; - fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t; - fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t; - fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t; - fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t; - fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t; - fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t; - fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t; - fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t; - fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t; - fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t; - fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t; - fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; - fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; - fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; - fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t; - fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t; - fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t; - fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t; - fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t; - fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t; - fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t; - fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t; - fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t; - fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t; - fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t; - fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t; - fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t; - fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t; - fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t; - fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t; - fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t; - fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t; - fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t; - fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t; - fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t; - fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t; - fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t; - fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t; - fn vpaddl_s8(a: int8x8_t) -> int16x4_t; - fn vpaddl_s16(a: int16x4_t) -> int32x2_t; - fn vpaddl_s32(a: int32x2_t) -> int64x1_t; - fn vpaddlq_s8(a: int8x16_t) -> int16x8_t; - fn vpaddlq_s16(a: int16x8_t) -> int32x4_t; - fn vpaddlq_s32(a: int32x4_t) -> int64x2_t; - fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t; - fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t; - fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t; - fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t; - fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t; - fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t; - fn vmovn_s16(a: int16x8_t) -> int8x8_t; - fn vmovn_s32(a: int32x4_t) -> int16x4_t; - fn vmovn_s64(a: int64x2_t) -> int32x2_t; - fn vmovn_u16(a: uint16x8_t) -> uint8x8_t; - fn vmovn_u32(a: uint32x4_t) -> uint16x4_t; - fn vmovn_u64(a: uint64x2_t) -> uint32x2_t; - fn vmovl_s8(a: int8x8_t) -> int16x8_t; - fn vmovl_s16(a: int16x4_t) -> int32x4_t; - fn vmovl_s32(a: int32x2_t) -> int64x2_t; - fn vmovl_u8(a: uint8x8_t) -> uint16x8_t; - fn vmovl_u16(a: uint16x4_t) -> uint32x4_t; - fn vmovl_u32(a: uint32x2_t) -> uint64x2_t; - fn vmvn_s8(a: int8x8_t) -> int8x8_t; - fn vmvnq_s8(a: int8x16_t) -> int8x16_t; - fn vmvn_s16(a: int16x4_t) -> int16x4_t; - fn vmvnq_s16(a: int16x8_t) -> int16x8_t; - fn vmvn_s32(a: int32x2_t) -> int32x2_t; - fn vmvnq_s32(a: int32x4_t) -> int32x4_t; - fn vmvn_u8(a: uint8x8_t) -> uint8x8_t; - fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t; - fn vmvn_u16(a: uint16x4_t) -> uint16x4_t; - fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t; - fn vmvn_u32(a: uint32x2_t) -> uint32x2_t; - fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t; - fn vmvn_p8(a: poly8x8_t) -> poly8x8_t; - fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t; - fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t; - fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t; - fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t; - fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t; - fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t; - fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t; - fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t; - fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t; - fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t; - fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t; - fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t; - fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t; - fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t; - fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t; - fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t; - fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t; - fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t; - fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vgetq_lane_u64(v: uint64x2_t) -> u64; - fn vget_lane_u64(v: uint64x1_t) -> u64; - fn vget_lane_u16(v: uint16x4_t) -> u16; - fn vget_lane_s16(v: int16x4_t) -> i16; - fn vget_lane_p16(v: poly16x4_t) -> p16; - fn vget_lane_u32(v: uint32x2_t) -> u32; - fn vget_lane_s32(v: int32x2_t) -> i32; - fn vget_lane_f32(v: float32x2_t) -> f32; - fn vgetq_lane_f32(v: float32x4_t) -> f32; - fn vget_lane_p64(v: poly64x1_t) -> p64; - fn vgetq_lane_p64(v: poly64x2_t) -> p64; - fn vget_lane_s64(v: int64x1_t) -> i64; - fn vgetq_lane_s64(v: int64x2_t) -> i64; - fn vgetq_lane_u16(v: uint16x8_t) -> u16; - fn vgetq_lane_u32(v: uint32x4_t) -> u32; - fn vgetq_lane_s16(v: int16x8_t) -> i16; - fn vgetq_lane_p16(v: poly16x8_t) -> p16; - fn vgetq_lane_s32(v: int32x4_t) -> i32; - fn vget_lane_u8(v: uint8x8_t) -> u8; - fn vget_lane_s8(v: int8x8_t) -> i8; - fn vget_lane_p8(v: poly8x8_t) -> p8; - fn vgetq_lane_u8(v: uint8x16_t) -> u8; - fn vgetq_lane_s8(v: int8x16_t) -> i8; - fn vgetq_lane_p8(v: poly8x16_t) -> p8; - fn vget_high_s8(a: int8x16_t) -> int8x8_t; - fn vget_high_s16(a: int16x8_t) -> int16x4_t; - fn vget_high_s32(a: int32x4_t) -> int32x2_t; - fn vget_high_s64(a: int64x2_t) -> int64x1_t; - fn vget_high_u8(a: uint8x16_t) -> uint8x8_t; - fn vget_high_u16(a: uint16x8_t) -> uint16x4_t; - fn vget_high_u32(a: uint32x4_t) -> uint32x2_t; - fn vget_high_u64(a: uint64x2_t) -> uint64x1_t; - fn vget_high_p8(a: poly8x16_t) -> poly8x8_t; - fn vget_high_p16(a: poly16x8_t) -> poly16x4_t; - fn vget_high_f32(a: float32x4_t) -> float32x2_t; - fn vget_low_s8(a: int8x16_t) -> int8x8_t; - fn vget_low_s16(a: int16x8_t) -> int16x4_t; - fn vget_low_s32(a: int32x4_t) -> int32x2_t; - fn vget_low_s64(a: int64x2_t) -> int64x1_t; - fn vget_low_u8(a: uint8x16_t) -> uint8x8_t; - fn vget_low_u16(a: uint16x8_t) -> uint16x4_t; - fn vget_low_u32(a: uint32x4_t) -> uint32x2_t; - fn vget_low_u64(a: uint64x2_t) -> uint64x1_t; - fn vget_low_p8(a: poly8x16_t) -> poly8x8_t; - fn vget_low_p16(a: poly16x8_t) -> poly16x4_t; - fn vget_low_f32(a: float32x4_t) -> float32x2_t; - fn vdupq_n_s8(value: i8) -> int8x16_t; - fn vdupq_n_s16(value: i16) -> int16x8_t; - fn vdupq_n_s32(value: i32) -> int32x4_t; - fn vdupq_n_s64(value: i64) -> int64x2_t; - fn vdupq_n_u8(value: u8) -> uint8x16_t; - fn vdupq_n_u16(value: u16) -> uint16x8_t; - fn vdupq_n_u32(value: u32) -> uint32x4_t; - fn vdupq_n_u64(value: u64) -> uint64x2_t; - fn vdupq_n_p8(value: p8) -> poly8x16_t; - fn vdupq_n_p16(value: p16) -> poly16x8_t; - fn vdupq_n_f32(value: f32) -> float32x4_t; - fn vdup_n_s8(value: i8) -> int8x8_t; - fn vdup_n_s16(value: i16) -> int16x4_t; - fn vdup_n_s32(value: i32) -> int32x2_t; - fn vdup_n_s64(value: i64) -> int64x1_t; - fn vdup_n_u8(value: u8) -> uint8x8_t; - fn vdup_n_u16(value: u16) -> uint16x4_t; - fn vdup_n_u32(value: u32) -> uint32x2_t; - fn vdup_n_u64(value: u64) -> uint64x1_t; - fn vdup_n_p8(value: p8) -> poly8x8_t; - fn vdup_n_p16(value: p16) -> poly16x4_t; - fn vdup_n_f32(value: f32) -> float32x2_t; - unsafe fn vldrq_p128(a: *const p128) -> p128; - unsafe fn vstrq_p128(a: *mut p128, b: p128); - fn vmov_n_s8(value: i8) -> int8x8_t; - fn vmov_n_s16(value: i16) -> int16x4_t; - fn vmov_n_s32(value: i32) -> int32x2_t; - fn vmov_n_s64(value: i64) -> int64x1_t; - fn vmov_n_u8(value: u8) -> uint8x8_t; - fn vmov_n_u16(value: u16) -> uint16x4_t; - fn vmov_n_u32(value: u32) -> uint32x2_t; - fn vmov_n_u64(value: u64) -> uint64x1_t; - fn vmov_n_p8(value: p8) -> poly8x8_t; - fn vmov_n_p16(value: p16) -> poly16x4_t; - fn vmov_n_f32(value: f32) -> float32x2_t; - fn vmovq_n_s8(value: i8) -> int8x16_t; - fn vmovq_n_s16(value: i16) -> int16x8_t; - fn vmovq_n_s32(value: i32) -> int32x4_t; - fn vmovq_n_s64(value: i64) -> int64x2_t; - fn vmovq_n_u8(value: u8) -> uint8x16_t; - fn vmovq_n_u16(value: u16) -> uint16x8_t; - fn vmovq_n_u32(value: u32) -> uint32x4_t; - fn vmovq_n_u64(value: u64) -> uint64x2_t; - fn vmovq_n_p8(value: p8) -> poly8x16_t; - fn vmovq_n_p16(value: p16) -> poly16x8_t; - fn vmovq_n_f32(value: f32) -> float32x4_t; - fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t; - fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t; - fn vcnt_s8(a: int8x8_t) -> int8x8_t; - fn vcntq_s8(a: int8x16_t) -> int8x16_t; - fn vcnt_u8(a: uint8x8_t) -> uint8x8_t; - fn vcntq_u8(a: uint8x16_t) -> uint8x16_t; - fn vcnt_p8(a: poly8x8_t) -> poly8x8_t; - fn vcntq_p8(a: poly8x16_t) -> poly8x16_t; - fn vrev16_s8(a: int8x8_t) -> int8x8_t; - fn vrev16q_s8(a: int8x16_t) -> int8x16_t; - fn vrev16_u8(a: uint8x8_t) -> uint8x8_t; - fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t; - fn vrev16_p8(a: poly8x8_t) -> poly8x8_t; - fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t; - fn vrev32_s8(a: int8x8_t) -> int8x8_t; - fn vrev32q_s8(a: int8x16_t) -> int8x16_t; - fn vrev32_u8(a: uint8x8_t) -> uint8x8_t; - fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t; - fn vrev32_s16(a: int16x4_t) -> int16x4_t; - fn vrev32q_s16(a: int16x8_t) -> int16x8_t; - fn vrev32_p16(a: poly16x4_t) -> poly16x4_t; - fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t; - fn vrev32_u16(a: uint16x4_t) -> uint16x4_t; - fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t; - fn vrev32_p8(a: poly8x8_t) -> poly8x8_t; - fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t; - fn vrev64_s8(a: int8x8_t) -> int8x8_t; - fn vrev64q_s8(a: int8x16_t) -> int8x16_t; - fn vrev64_s16(a: int16x4_t) -> int16x4_t; - fn vrev64q_s16(a: int16x8_t) -> int16x8_t; - fn vrev64_s32(a: int32x2_t) -> int32x2_t; - fn vrev64q_s32(a: int32x4_t) -> int32x4_t; - fn vrev64_u8(a: uint8x8_t) -> uint8x8_t; - fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t; - fn vrev64_u16(a: uint16x4_t) -> uint16x4_t; - fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t; - fn vrev64_u32(a: uint32x2_t) -> uint32x2_t; - fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t; - fn vrev64_f32(a: float32x2_t) -> float32x2_t; - fn vrev64q_f32(a: float32x4_t) -> float32x4_t; - fn vrev64_p8(a: poly8x8_t) -> poly8x8_t; - fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t; - fn vrev64_p16(a: poly16x4_t) -> poly16x4_t; - fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t; - fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t; - fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t; - fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t; - fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; - fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; - fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; - fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t; - fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t; - fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t; - fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t; - fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t; - fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t; - fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t; - fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t; - fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t; - fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t; - fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t; - fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t; - fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t; - fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t; - fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t; - fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t; - fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t; - fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t; - - fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vabds_f32(a: f32, b: f32) -> f32; - fn vabdd_f64(a: f64, b: f64) -> f64; - fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t; - fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t; - fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t; - fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t; - fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t; - fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t; - fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t; - fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t; - fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t; - fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t; - fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vceqd_s64(a: i64, b: i64) -> u64; - fn vceqd_u64(a: u64, b: u64) -> u64; - fn vceqs_f32(a: f32, b: f32) -> u32; - fn vceqd_f64(a: f64, b: f64) -> u64; - fn vceqz_s8(a: int8x8_t) -> uint8x8_t; - fn vceqzq_s8(a: int8x16_t) -> uint8x16_t; - fn vceqz_s16(a: int16x4_t) -> uint16x4_t; - fn vceqzq_s16(a: int16x8_t) -> uint16x8_t; - fn vceqz_s32(a: int32x2_t) -> uint32x2_t; - fn vceqzq_s32(a: int32x4_t) -> uint32x4_t; - fn vceqz_s64(a: int64x1_t) -> uint64x1_t; - fn vceqzq_s64(a: int64x2_t) -> uint64x2_t; - fn vceqz_p8(a: poly8x8_t) -> uint8x8_t; - fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t; - fn vceqz_p64(a: poly64x1_t) -> uint64x1_t; - fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t; - fn vceqz_u8(a: uint8x8_t) -> uint8x8_t; - fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t; - fn vceqz_u16(a: uint16x4_t) -> uint16x4_t; - fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t; - fn vceqz_u32(a: uint32x2_t) -> uint32x2_t; - fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t; - fn vceqz_u64(a: uint64x1_t) -> uint64x1_t; - fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t; - fn vceqz_f32(a: float32x2_t) -> uint32x2_t; - fn vceqzq_f32(a: float32x4_t) -> uint32x4_t; - fn vceqz_f64(a: float64x1_t) -> uint64x1_t; - fn vceqzq_f64(a: float64x2_t) -> uint64x2_t; - fn vceqzd_s64(a: i64) -> u64; - fn vceqzd_u64(a: u64) -> u64; - fn vceqzs_f32(a: f32) -> u32; - fn vceqzd_f64(a: f64) -> u64; - fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t; - fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t; - fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t; - fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t; - fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vtstd_s64(a: i64, b: i64) -> u64; - fn vtstd_u64(a: u64, b: u64) -> u64; - fn vuqadds_s32(a: i32, b: u32) -> i32; - fn vuqaddd_s64(a: i64, b: u64) -> i64; - fn vuqaddb_s8(a: i8, b: u8) -> i8; - fn vuqaddh_s16(a: i16, b: u16) -> i16; - fn vabs_f64(a: float64x1_t) -> float64x1_t; - fn vabsq_f64(a: float64x2_t) -> float64x2_t; - fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t; - fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t; - fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcgtd_s64(a: i64, b: i64) -> u64; - fn vcgtd_u64(a: u64, b: u64) -> u64; - fn vcgts_f32(a: f32, b: f32) -> u32; - fn vcgtd_f64(a: f64, b: f64) -> u64; - fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t; - fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t; - fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcltd_s64(a: i64, b: i64) -> u64; - fn vcltd_u64(a: u64, b: u64) -> u64; - fn vclts_f32(a: f32, b: f32) -> u32; - fn vcltd_f64(a: f64, b: f64) -> u64; - fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t; - fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t; - fn vcged_s64(a: i64, b: i64) -> u64; - fn vcged_u64(a: u64, b: u64) -> u64; - fn vcges_f32(a: f32, b: f32) -> u32; - fn vcged_f64(a: f64, b: f64) -> u64; - fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcled_s64(a: i64, b: i64) -> u64; - fn vcled_u64(a: u64, b: u64) -> u64; - fn vcles_f32(a: f32, b: f32) -> u32; - fn vcled_f64(a: f64, b: f64) -> u64; - fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t; - fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t; - fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcgez_s8(a: int8x8_t) -> uint8x8_t; - fn vcgezq_s8(a: int8x16_t) -> uint8x16_t; - fn vcgez_s16(a: int16x4_t) -> uint16x4_t; - fn vcgezq_s16(a: int16x8_t) -> uint16x8_t; - fn vcgez_s32(a: int32x2_t) -> uint32x2_t; - fn vcgezq_s32(a: int32x4_t) -> uint32x4_t; - fn vcgez_s64(a: int64x1_t) -> uint64x1_t; - fn vcgezq_s64(a: int64x2_t) -> uint64x2_t; - fn vcgez_f32(a: float32x2_t) -> uint32x2_t; - fn vcgezq_f32(a: float32x4_t) -> uint32x4_t; - fn vcgez_f64(a: float64x1_t) -> uint64x1_t; - fn vcgezq_f64(a: float64x2_t) -> uint64x2_t; - fn vcgezd_s64(a: i64) -> u64; - fn vcgezs_f32(a: f32) -> u32; - fn vcgezd_f64(a: f64) -> u64; - fn vcgtz_s8(a: int8x8_t) -> uint8x8_t; - fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t; - fn vcgtz_s16(a: int16x4_t) -> uint16x4_t; - fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t; - fn vcgtz_s32(a: int32x2_t) -> uint32x2_t; - fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t; - fn vcgtz_s64(a: int64x1_t) -> uint64x1_t; - fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t; - fn vcgtz_f32(a: float32x2_t) -> uint32x2_t; - fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t; - fn vcgtz_f64(a: float64x1_t) -> uint64x1_t; - fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t; - fn vcgtzd_s64(a: i64) -> u64; - fn vcgtzs_f32(a: f32) -> u32; - fn vcgtzd_f64(a: f64) -> u64; - fn vclez_s8(a: int8x8_t) -> uint8x8_t; - fn vclezq_s8(a: int8x16_t) -> uint8x16_t; - fn vclez_s16(a: int16x4_t) -> uint16x4_t; - fn vclezq_s16(a: int16x8_t) -> uint16x8_t; - fn vclez_s32(a: int32x2_t) -> uint32x2_t; - fn vclezq_s32(a: int32x4_t) -> uint32x4_t; - fn vclez_s64(a: int64x1_t) -> uint64x1_t; - fn vclezq_s64(a: int64x2_t) -> uint64x2_t; - fn vclez_f32(a: float32x2_t) -> uint32x2_t; - fn vclezq_f32(a: float32x4_t) -> uint32x4_t; - fn vclez_f64(a: float64x1_t) -> uint64x1_t; - fn vclezq_f64(a: float64x2_t) -> uint64x2_t; - fn vclezd_s64(a: i64) -> u64; - fn vclezs_f32(a: f32) -> u32; - fn vclezd_f64(a: f64) -> u64; - fn vcltz_s8(a: int8x8_t) -> uint8x8_t; - fn vcltzq_s8(a: int8x16_t) -> uint8x16_t; - fn vcltz_s16(a: int16x4_t) -> uint16x4_t; - fn vcltzq_s16(a: int16x8_t) -> uint16x8_t; - fn vcltz_s32(a: int32x2_t) -> uint32x2_t; - fn vcltzq_s32(a: int32x4_t) -> uint32x4_t; - fn vcltz_s64(a: int64x1_t) -> uint64x1_t; - fn vcltzq_s64(a: int64x2_t) -> uint64x2_t; - fn vcltz_f32(a: float32x2_t) -> uint32x2_t; - fn vcltzq_f32(a: float32x4_t) -> uint32x4_t; - fn vcltz_f64(a: float64x1_t) -> uint64x1_t; - fn vcltzq_f64(a: float64x2_t) -> uint64x2_t; - fn vcltzd_s64(a: i64) -> u64; - fn vcltzs_f32(a: f32) -> u32; - fn vcltzd_f64(a: f64) -> u64; - fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcagts_f32(a: f32, b: f32) -> u32; - fn vcagtd_f64(a: f64, b: f64) -> u64; - fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcages_f32(a: f32, b: f32) -> u32; - fn vcaged_f64(a: f64, b: f64) -> u64; - fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcalts_f32(a: f32, b: f32) -> u32; - fn vcaltd_f64(a: f64, b: f64) -> u64; - fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t; - fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t; - fn vcales_f32(a: f32, b: f32) -> u32; - fn vcaled_f64(a: f64, b: f64) -> u64; - fn vcopy_lane_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vcopyq_laneq_s8( - a: int8x16_t, - b: int8x16_t, - ) -> int8x16_t; - fn vcopy_lane_s16( - a: int16x4_t, - b: int16x4_t, - ) -> int16x4_t; - fn vcopyq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - ) -> int16x8_t; - fn vcopy_lane_s32( - a: int32x2_t, - b: int32x2_t, - ) -> int32x2_t; - fn vcopyq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - ) -> int32x4_t; - fn vcopyq_laneq_s64( - a: int64x2_t, - b: int64x2_t, - ) -> int64x2_t; - fn vcopy_lane_u8( - a: uint8x8_t, - b: uint8x8_t, - ) -> uint8x8_t; - fn vcopyq_laneq_u8( - a: uint8x16_t, - b: uint8x16_t, - ) -> uint8x16_t; - fn vcopy_lane_u16( - a: uint16x4_t, - b: uint16x4_t, - ) -> uint16x4_t; - fn vcopyq_laneq_u16( - a: uint16x8_t, - b: uint16x8_t, - ) -> uint16x8_t; - fn vcopy_lane_u32( - a: uint32x2_t, - b: uint32x2_t, - ) -> uint32x2_t; - fn vcopyq_laneq_u32( - a: uint32x4_t, - b: uint32x4_t, - ) -> uint32x4_t; - fn vcopyq_laneq_u64( - a: uint64x2_t, - b: uint64x2_t, - ) -> uint64x2_t; - fn vcopy_lane_p8( - a: poly8x8_t, - b: poly8x8_t, - ) -> poly8x8_t; - fn vcopyq_laneq_p8( - a: poly8x16_t, - b: poly8x16_t, - ) -> poly8x16_t; - fn vcopy_lane_p16( - a: poly16x4_t, - b: poly16x4_t, - ) -> poly16x4_t; - fn vcopyq_laneq_p16( - a: poly16x8_t, - b: poly16x8_t, - ) -> poly16x8_t; - fn vcopyq_laneq_p64( - a: poly64x2_t, - b: poly64x2_t, - ) -> poly64x2_t; - fn vcopy_lane_f32( - a: float32x2_t, - b: float32x2_t, - ) -> float32x2_t; - fn vcopyq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - ) -> float32x4_t; - fn vcopyq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - ) -> float64x2_t; - fn vcopy_laneq_s8( - a: int8x8_t, - b: int8x16_t, - ) -> int8x8_t; - fn vcopy_laneq_s16( - a: int16x4_t, - b: int16x8_t, - ) -> int16x4_t; - fn vcopy_laneq_s32( - a: int32x2_t, - b: int32x4_t, - ) -> int32x2_t; - fn vcopy_laneq_u8( - a: uint8x8_t, - b: uint8x16_t, - ) -> uint8x8_t; - fn vcopy_laneq_u16( - a: uint16x4_t, - b: uint16x8_t, - ) -> uint16x4_t; - fn vcopy_laneq_u32( - a: uint32x2_t, - b: uint32x4_t, - ) -> uint32x2_t; - fn vcopy_laneq_p8( - a: poly8x8_t, - b: poly8x16_t, - ) -> poly8x8_t; - fn vcopy_laneq_p16( - a: poly16x4_t, - b: poly16x8_t, - ) -> poly16x4_t; - fn vcopy_laneq_f32( - a: float32x2_t, - b: float32x4_t, - ) -> float32x2_t; - fn vcopyq_lane_s8( - a: int8x16_t, - b: int8x8_t, - ) -> int8x16_t; - fn vcopyq_lane_s16( - a: int16x8_t, - b: int16x4_t, - ) -> int16x8_t; - fn vcopyq_lane_s32( - a: int32x4_t, - b: int32x2_t, - ) -> int32x4_t; - fn vcopyq_lane_u8( - a: uint8x16_t, - b: uint8x8_t, - ) -> uint8x16_t; - fn vcopyq_lane_u16( - a: uint16x8_t, - b: uint16x4_t, - ) -> uint16x8_t; - fn vcopyq_lane_u32( - a: uint32x4_t, - b: uint32x2_t, - ) -> uint32x4_t; - fn vcopyq_lane_p8( - a: poly8x16_t, - b: poly8x8_t, - ) -> poly8x16_t; - fn vcopyq_lane_p16( - a: poly16x8_t, - b: poly16x4_t, - ) -> poly16x8_t; - fn vcopyq_lane_s64( - a: int64x2_t, - b: int64x1_t, - ) -> int64x2_t; - fn vcopyq_lane_u64( - a: uint64x2_t, - b: uint64x1_t, - ) -> uint64x2_t; - fn vcopyq_lane_p64( - a: poly64x2_t, - b: poly64x1_t, - ) -> poly64x2_t; - fn vcopyq_lane_f32( - a: float32x4_t, - b: float32x2_t, - ) -> float32x4_t; - fn vcopyq_lane_f64( - a: float64x2_t, - b: float64x1_t, - ) -> float64x2_t; - fn vcreate_f64(a: u64) -> float64x1_t; - fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t; - fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t; - fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t; - fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t; - fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t; - fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t; - fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t; - fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t; - fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t; - fn vcvtxd_f32_f64(a: f64) -> f32; - fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t; - fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t; - fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t; - fn vcvts_n_f32_s32(a: i32) -> f32; - fn vcvtd_n_f64_s64(a: i64) -> f64; - fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t; - fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t; - fn vcvts_n_f32_u32(a: u32) -> f32; - fn vcvtd_n_f64_u64(a: u64) -> f64; - fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t; - fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t; - fn vcvts_n_s32_f32(a: f32) -> i32; - fn vcvtd_n_s64_f64(a: f64) -> i64; - fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t; - fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t; - fn vcvts_n_u32_f32(a: f32) -> u32; - fn vcvtd_n_u64_f64(a: f64) -> u64; - fn vcvts_f32_s32(a: i32) -> f32; - fn vcvtd_f64_s64(a: i64) -> f64; - fn vcvts_f32_u32(a: u32) -> f32; - fn vcvtd_f64_u64(a: u64) -> f64; - fn vcvts_s32_f32(a: f32) -> i32; - fn vcvtd_s64_f64(a: f64) -> i64; - fn vcvts_u32_f32(a: f32) -> u32; - fn vcvtd_u64_f64(a: f64) -> u64; - fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t; - fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t; - fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t; - fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t; - fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t; - fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t; - fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t; - fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t; - fn vcvtas_s32_f32(a: f32) -> i32; - fn vcvtad_s64_f64(a: f64) -> i64; - fn vcvtas_u32_f32(a: f32) -> u32; - fn vcvtad_u64_f64(a: f64) -> u64; - fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t; - fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t; - fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t; - fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t; - fn vcvtns_s32_f32(a: f32) -> i32; - fn vcvtnd_s64_f64(a: f64) -> i64; - fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t; - fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t; - fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t; - fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t; - fn vcvtms_s32_f32(a: f32) -> i32; - fn vcvtmd_s64_f64(a: f64) -> i64; - fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t; - fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t; - fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t; - fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t; - fn vcvtps_s32_f32(a: f32) -> i32; - fn vcvtpd_s64_f64(a: f64) -> i64; - fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t; - fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t; - fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t; - fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t; - fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t; - fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t; - fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t; - fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t; - fn vcvtns_u32_f32(a: f32) -> u32; - fn vcvtnd_u64_f64(a: f64) -> u64; - fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t; - fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t; - fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t; - fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t; - fn vcvtms_u32_f32(a: f32) -> u32; - fn vcvtmd_u64_f64(a: f64) -> u64; - fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t; - fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t; - fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t; - fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t; - fn vcvtps_u32_f32(a: f32) -> u32; - fn vcvtpd_u64_f64(a: f64) -> u64; - fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t; - fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t; - fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t; - fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t; - fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t; - fn vdup_lane_f64(a: float64x1_t) -> float64x1_t; - fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t; - fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t; - fn vdupb_lane_s8(a: int8x8_t) -> i8; - fn vdupb_laneq_s8(a: int8x16_t) -> i8; - fn vduph_lane_s16(a: int16x4_t) -> i16; - fn vduph_laneq_s16(a: int16x8_t) -> i16; - fn vdups_lane_s32(a: int32x2_t) -> i32; - fn vdups_laneq_s32(a: int32x4_t) -> i32; - fn vdupd_lane_s64(a: int64x1_t) -> i64; - fn vdupd_laneq_s64(a: int64x2_t) -> i64; - fn vdupb_lane_u8(a: uint8x8_t) -> u8; - fn vdupb_laneq_u8(a: uint8x16_t) -> u8; - fn vduph_lane_u16(a: uint16x4_t) -> u16; - fn vduph_laneq_u16(a: uint16x8_t) -> u16; - fn vdups_lane_u32(a: uint32x2_t) -> u32; - fn vdups_laneq_u32(a: uint32x4_t) -> u32; - fn vdupd_lane_u64(a: uint64x1_t) -> u64; - fn vdupd_laneq_u64(a: uint64x2_t) -> u64; - fn vdupb_lane_p8(a: poly8x8_t) -> p8; - fn vdupb_laneq_p8(a: poly8x16_t) -> p8; - fn vduph_lane_p16(a: poly16x4_t) -> p16; - fn vduph_laneq_p16(a: poly16x8_t) -> p16; - fn vdups_lane_f32(a: float32x2_t) -> f32; - fn vdups_laneq_f32(a: float32x4_t) -> f32; - fn vdupd_lane_f64(a: float64x1_t) -> f64; - fn vdupd_laneq_f64(a: float64x2_t) -> f64; - fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; - fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t; - fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t; - fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t; - fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t; - fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t; - fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t; - fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t; - fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t; - fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t; - fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t; - fn vmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, - ) -> int32x4_t; - fn vmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, - ) -> int32x4_t; - fn vmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, - ) -> int64x2_t; - fn vmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, - ) -> int64x2_t; - fn vmlal_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, - ) -> uint32x4_t; - fn vmlal_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, - ) -> uint32x4_t; - fn vmlal_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, - ) -> uint64x2_t; - fn vmlal_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, - ) -> uint64x2_t; - fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; - fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t; - fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t; - fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t; - fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t; - fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t; - fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t; - fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t; - fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t; - fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t; - fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t; - fn vmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, - ) -> int32x4_t; - fn vmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, - ) -> int32x4_t; - fn vmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, - ) -> int64x2_t; - fn vmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, - ) -> int64x2_t; - fn vmlsl_high_lane_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x4_t, - ) -> uint32x4_t; - fn vmlsl_high_laneq_u16( - a: uint32x4_t, - b: uint16x8_t, - c: uint16x8_t, - ) -> uint32x4_t; - fn vmlsl_high_lane_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x2_t, - ) -> uint64x2_t; - fn vmlsl_high_laneq_u32( - a: uint64x2_t, - b: uint32x4_t, - c: uint32x4_t, - ) -> uint64x2_t; - fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t; - fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t; - fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t; - fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t; - fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t; - fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t; - fn vneg_s64(a: int64x1_t) -> int64x1_t; - fn vnegq_s64(a: int64x2_t) -> int64x2_t; - fn vnegd_s64(a: i64) -> i64; - fn vneg_f64(a: float64x1_t) -> float64x1_t; - fn vnegq_f64(a: float64x2_t) -> float64x2_t; - fn vqneg_s64(a: int64x1_t) -> int64x1_t; - fn vqnegq_s64(a: int64x2_t) -> int64x2_t; - fn vqnegb_s8(a: i8) -> i8; - fn vqnegh_s16(a: i16) -> i16; - fn vqnegs_s32(a: i32) -> i32; - fn vqnegd_s64(a: i64) -> i64; - fn vqsubb_s8(a: i8, b: i8) -> i8; - fn vqsubh_s16(a: i16, b: i16) -> i16; - fn vqsubb_u8(a: u8, b: u8) -> u8; - fn vqsubh_u16(a: u16, b: u16) -> u16; - fn vqsubs_u32(a: u32, b: u32) -> u32; - fn vqsubd_u64(a: u64, b: u64) -> u64; - fn vqsubs_s32(a: i32, b: i32) -> i32; - fn vqsubd_s64(a: i64, b: i64) -> i64; - fn vrbit_s8(a: int8x8_t) -> int8x8_t; - fn vrbitq_s8(a: int8x16_t) -> int8x16_t; - fn vrbit_u8(a: uint8x8_t) -> uint8x8_t; - fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t; - fn vrbit_p8(a: poly8x8_t) -> poly8x8_t; - fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t; - fn vrndx_f32(a: float32x2_t) -> float32x2_t; - fn vrndxq_f32(a: float32x4_t) -> float32x4_t; - fn vrndx_f64(a: float64x1_t) -> float64x1_t; - fn vrndxq_f64(a: float64x2_t) -> float64x2_t; - fn vrnda_f32(a: float32x2_t) -> float32x2_t; - fn vrndaq_f32(a: float32x4_t) -> float32x4_t; - fn vrnda_f64(a: float64x1_t) -> float64x1_t; - fn vrndaq_f64(a: float64x2_t) -> float64x2_t; - fn vrndn_f64(a: float64x1_t) -> float64x1_t; - fn vrndnq_f64(a: float64x2_t) -> float64x2_t; - fn vrndns_f32(a: f32) -> f32; - fn vrndm_f32(a: float32x2_t) -> float32x2_t; - fn vrndmq_f32(a: float32x4_t) -> float32x4_t; - fn vrndm_f64(a: float64x1_t) -> float64x1_t; - fn vrndmq_f64(a: float64x2_t) -> float64x2_t; - fn vrndp_f32(a: float32x2_t) -> float32x2_t; - fn vrndpq_f32(a: float32x4_t) -> float32x4_t; - fn vrndp_f64(a: float64x1_t) -> float64x1_t; - fn vrndpq_f64(a: float64x2_t) -> float64x2_t; - fn vrnd_f32(a: float32x2_t) -> float32x2_t; - fn vrndq_f32(a: float32x4_t) -> float32x4_t; - fn vrnd_f64(a: float64x1_t) -> float64x1_t; - fn vrndq_f64(a: float64x2_t) -> float64x2_t; - fn vrndi_f32(a: float32x2_t) -> float32x2_t; - fn vrndiq_f32(a: float32x4_t) -> float32x4_t; - fn vrndi_f64(a: float64x1_t) -> float64x1_t; - fn vrndiq_f64(a: float64x2_t) -> float64x2_t; - fn vqaddb_s8(a: i8, b: i8) -> i8; - fn vqaddh_s16(a: i16, b: i16) -> i16; - fn vqaddb_u8(a: u8, b: u8) -> u8; - fn vqaddh_u16(a: u16, b: u16) -> u16; - fn vqadds_u32(a: u32, b: u32) -> u32; - fn vqaddd_u64(a: u64, b: u64) -> u64; - fn vqadds_s32(a: i32, b: i32) -> i32; - fn vqaddd_s64(a: i64, b: i64) -> i64; - unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t; - unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t; - unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t; - unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t; - unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t; - unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t; - unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t; - unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t; - unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t; - unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t; - unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t; - unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t; - unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t; - unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t; - unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> int8x16x2_t; - unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> int64x1x2_t; - unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> int64x2x2_t; - unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t; - unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t; - unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t; - unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t; - unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> float64x1x2_t; - unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - -> float64x2x2_t; - unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t; - unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t; - unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t; - unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t; - unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t; - unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t; - unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t; - unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t; - unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t; - unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> int64x1x3_t; - unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> int64x2x3_t; - unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t; - unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t; - unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t; - unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t; - unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> float64x1x3_t; - unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) - -> float64x2x3_t; - unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t; - unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t; - unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t; - unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t; - unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t; - unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t; - unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t; - unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t; - unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t; - unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> int64x1x4_t; - unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> int64x2x4_t; - unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t; - unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t; - unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t; - unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t; - unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> float64x1x4_t; - unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - -> float64x2x4_t; - unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t); - unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t); - unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t); - unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t); - unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t); - unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t); - unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t); - unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t); - unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t); - unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t); - unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t); - unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t); - unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t); - unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t); - unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t); - unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t); - unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t); - unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t); - unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t); - unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t); - unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t); - unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t); - unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t); - unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t); - unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t); - unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t); - unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t); - unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t); - unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t); - unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t); - unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t); - unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t); - unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t); - unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t); - unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t); - unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t); - unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t); - unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t); - unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t); - unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t); - unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t); - unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t); - unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t); - unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t); - unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t); - unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t); - unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t); - fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t; - fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t; - fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t; - fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t; - fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32; - fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32; - fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64; - fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64; - fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t; - fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t; - fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t; - fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t; - fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t; - fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t; - fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t; - fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t; - fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t; - fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t; - fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t; - fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t; - fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t; - fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t; - fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t; - fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t; - fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t; - fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t; - fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t; - fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float64x1_t; - fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float32x2_t; - fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float32x4_t; - fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float64x2_t; - fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vmulxs_f32(a: f32, b: f32) -> f32; - fn vmulxd_f64(a: f64, b: f64) -> f64; - fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32; - fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32; - fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64; - fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64; - fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; - fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t; - fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t; - fn vfma_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - ) -> float32x2_t; - fn vfma_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, - ) -> float32x2_t; - fn vfmaq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, - ) -> float32x4_t; - fn vfmaq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - ) -> float32x4_t; - fn vfma_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - ) -> float64x1_t; - fn vfma_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, - ) -> float64x1_t; - fn vfmaq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, - ) -> float64x2_t; - fn vfmaq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - ) -> float64x2_t; - fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32; - fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32; - fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64; - fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64; - fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; - fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t; - fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t; - fn vfms_lane_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x2_t, - ) -> float32x2_t; - fn vfms_laneq_f32( - a: float32x2_t, - b: float32x2_t, - c: float32x4_t, - ) -> float32x2_t; - fn vfmsq_lane_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x2_t, - ) -> float32x4_t; - fn vfmsq_laneq_f32( - a: float32x4_t, - b: float32x4_t, - c: float32x4_t, - ) -> float32x4_t; - fn vfms_lane_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x1_t, - ) -> float64x1_t; - fn vfms_laneq_f64( - a: float64x1_t, - b: float64x1_t, - c: float64x2_t, - ) -> float64x1_t; - fn vfmsq_lane_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x1_t, - ) -> float64x2_t; - fn vfmsq_laneq_f64( - a: float64x2_t, - b: float64x2_t, - c: float64x2_t, - ) -> float64x2_t; - fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32; - fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32; - fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64; - fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64; - fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vsubd_s64(a: i64, b: i64) -> i64; - fn vsubd_u64(a: u64, b: u64) -> u64; - fn vaddv_f32(a: float32x2_t) -> f32; - fn vaddvq_f32(a: float32x4_t) -> f32; - fn vaddvq_f64(a: float64x2_t) -> f64; - fn vaddlv_s16(a: int16x4_t) -> i32; - fn vaddlvq_s16(a: int16x8_t) -> i32; - fn vaddlv_s32(a: int32x2_t) -> i64; - fn vaddlvq_s32(a: int32x4_t) -> i64; - fn vaddlv_u16(a: uint16x4_t) -> u32; - fn vaddlvq_u16(a: uint16x8_t) -> u32; - fn vaddlv_u32(a: uint32x2_t) -> u64; - fn vaddlvq_u32(a: uint32x4_t) -> u64; - fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t; - fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t; - fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t; - fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t; - fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t; - fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t; - fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t; - fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t; - fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t; - fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t; - fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t; - fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t; - fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vmaxnmv_f32(a: float32x2_t) -> f32; - fn vmaxnmvq_f64(a: float64x2_t) -> f64; - fn vmaxnmvq_f32(a: float32x4_t) -> f32; - fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vpmaxnms_f32(a: float32x2_t) -> f32; - fn vpmaxnmqd_f64(a: float64x2_t) -> f64; - fn vpmaxs_f32(a: float32x2_t) -> f32; - fn vpmaxqd_f64(a: float64x2_t) -> f64; - fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vminnmv_f32(a: float32x2_t) -> f32; - fn vminnmvq_f64(a: float64x2_t) -> f64; - fn vminnmvq_f32(a: float32x4_t) -> f32; - fn vmovl_high_s8(a: int8x16_t) -> int16x8_t; - fn vmovl_high_s16(a: int16x8_t) -> int32x4_t; - fn vmovl_high_s32(a: int32x4_t) -> int64x2_t; - fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t; - fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t; - fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t; - fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vpadds_f32(a: float32x2_t) -> f32; - fn vpaddd_f64(a: float64x2_t) -> f64; - fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vpminnms_f32(a: float32x2_t) -> f32; - fn vpminnmqd_f64(a: float64x2_t) -> f64; - fn vpmins_f32(a: float32x2_t) -> f32; - fn vpminqd_f64(a: float64x2_t) -> f64; - fn vqdmullh_s16(a: i16, b: i16) -> i32; - fn vqdmulls_s32(a: i32, b: i32) -> i64; - fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t; - fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t; - fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t; - fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t; - fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t; - fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t; - fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32; - fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32; - fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64; - fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64; - fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x4_t; - fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x2_t; - fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t; - fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t; - fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t; - fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t; - fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t; - fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t; - fn vqdmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t; - fn vqdmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t; - fn vqdmlal_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, - ) -> int32x4_t; - fn vqdmlal_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, - ) -> int32x4_t; - fn vqdmlal_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, - ) -> int64x2_t; - fn vqdmlal_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, - ) -> int64x2_t; - fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32; - fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64; - fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32; - fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32; - fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64; - fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64; - fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t; - fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t; - fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t; - fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t; - fn vqdmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t; - fn vqdmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t; - fn vqdmlsl_high_lane_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x4_t, - ) -> int32x4_t; - fn vqdmlsl_high_laneq_s16( - a: int32x4_t, - b: int16x8_t, - c: int16x8_t, - ) -> int32x4_t; - fn vqdmlsl_high_lane_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x2_t, - ) -> int64x2_t; - fn vqdmlsl_high_laneq_s32( - a: int64x2_t, - b: int32x4_t, - c: int32x4_t, - ) -> int64x2_t; - fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32; - fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64; - fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32; - fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32; - fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64; - fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64; - fn vqdmulhh_s16(a: i16, b: i16) -> i16; - fn vqdmulhs_s32(a: i32, b: i32) -> i32; - fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16; - fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16; - fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32; - fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32; - fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t; - fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t; - fn vqmovnh_s16(a: i16) -> i8; - fn vqmovns_s32(a: i32) -> i16; - fn vqmovnh_u16(a: u16) -> u8; - fn vqmovns_u32(a: u32) -> u16; - fn vqmovnd_s64(a: i64) -> i32; - fn vqmovnd_u64(a: u64) -> u32; - fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t; - fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t; - fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t; - fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t; - fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t; - fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t; - fn vqmovunh_s16(a: i16) -> u8; - fn vqmovuns_s32(a: i32) -> u16; - fn vqmovund_s64(a: i64) -> u32; - fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t; - fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t; - fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t; - fn vqrdmulhh_s16(a: i16, b: i16) -> i16; - fn vqrdmulhs_s32(a: i32, b: i32) -> i32; - fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16; - fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16; - fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32; - fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32; - fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16; - fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32; - fn vqrdmlah_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - ) -> int16x4_t; - fn vqrdmlah_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, - ) -> int16x4_t; - fn vqrdmlahq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, - ) -> int16x8_t; - fn vqrdmlahq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - ) -> int16x8_t; - fn vqrdmlah_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - ) -> int32x2_t; - fn vqrdmlah_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, - ) -> int32x2_t; - fn vqrdmlahq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, - ) -> int32x4_t; - fn vqrdmlahq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - ) -> int32x4_t; - fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16; - fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16; - fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32; - fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32; - fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t; - fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t; - fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t; - fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t; - fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16; - fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32; - fn vqrdmlsh_lane_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x4_t, - ) -> int16x4_t; - fn vqrdmlsh_laneq_s16( - a: int16x4_t, - b: int16x4_t, - c: int16x8_t, - ) -> int16x4_t; - fn vqrdmlshq_lane_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x4_t, - ) -> int16x8_t; - fn vqrdmlshq_laneq_s16( - a: int16x8_t, - b: int16x8_t, - c: int16x8_t, - ) -> int16x8_t; - fn vqrdmlsh_lane_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x2_t, - ) -> int32x2_t; - fn vqrdmlsh_laneq_s32( - a: int32x2_t, - b: int32x2_t, - c: int32x4_t, - ) -> int32x2_t; - fn vqrdmlshq_lane_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x2_t, - ) -> int32x4_t; - fn vqrdmlshq_laneq_s32( - a: int32x4_t, - b: int32x4_t, - c: int32x4_t, - ) -> int32x4_t; - fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16; - fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16; - fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32; - fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32; - fn vqrshls_s32(a: i32, b: i32) -> i32; - fn vqrshld_s64(a: i64, b: i64) -> i64; - fn vqrshlb_s8(a: i8, b: i8) -> i8; - fn vqrshlh_s16(a: i16, b: i16) -> i16; - fn vqrshls_u32(a: u32, b: i32) -> u32; - fn vqrshld_u64(a: u64, b: i64) -> u64; - fn vqrshlb_u8(a: u8, b: i8) -> u8; - fn vqrshlh_u16(a: u16, b: i16) -> u16; - fn vqrshrnh_n_s16(a: i16) -> i8; - fn vqrshrns_n_s32(a: i32) -> i16; - fn vqrshrnd_n_s64(a: i64) -> i32; - fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t; - fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t; - fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t; - fn vqrshrnh_n_u16(a: u16) -> u8; - fn vqrshrns_n_u32(a: u32) -> u16; - fn vqrshrnd_n_u64(a: u64) -> u32; - fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t; - fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t; - fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t; - fn vqrshrunh_n_s16(a: i16) -> u8; - fn vqrshruns_n_s32(a: i32) -> u16; - fn vqrshrund_n_s64(a: i64) -> u32; - fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t; - fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t; - fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t; - fn vqshld_s64(a: i64, b: i64) -> i64; - fn vqshlb_s8(a: i8, b: i8) -> i8; - fn vqshlh_s16(a: i16, b: i16) -> i16; - fn vqshls_s32(a: i32, b: i32) -> i32; - fn vqshld_u64(a: u64, b: i64) -> u64; - fn vqshlb_u8(a: u8, b: i8) -> u8; - fn vqshlh_u16(a: u16, b: i16) -> u16; - fn vqshls_u32(a: u32, b: i32) -> u32; - fn vqshlb_n_s8(a: i8) -> i8; - fn vqshlh_n_s16(a: i16) -> i16; - fn vqshls_n_s32(a: i32) -> i32; - fn vqshld_n_s64(a: i64) -> i64; - fn vqshlb_n_u8(a: u8) -> u8; - fn vqshlh_n_u16(a: u16) -> u16; - fn vqshls_n_u32(a: u32) -> u32; - fn vqshld_n_u64(a: u64) -> u64; - fn vqshlub_n_s8(a: i8) -> u8; - fn vqshluh_n_s16(a: i16) -> u16; - fn vqshlus_n_s32(a: i32) -> u32; - fn vqshlud_n_s64(a: i64) -> u64; - fn vqshrnd_n_s64(a: i64) -> i32; - fn vqshrnh_n_s16(a: i16) -> i8; - fn vqshrns_n_s32(a: i32) -> i16; - fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t; - fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t; - fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t; - fn vqshrnd_n_u64(a: u64) -> u32; - fn vqshrnh_n_u16(a: u16) -> u8; - fn vqshrns_n_u32(a: u32) -> u16; - fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t; - fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t; - fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t; - fn vqshrunh_n_s16(a: i16) -> u8; - fn vqshruns_n_s32(a: i32) -> u16; - fn vqshrund_n_s64(a: i64) -> u32; - fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t; - fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t; - fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t; - fn vsqaddb_u8(a: u8, b: i8) -> u8; - fn vsqaddh_u16(a: u16, b: i16) -> u16; - fn vsqadds_u32(a: u32, b: i32) -> u32; - fn vsqaddd_u64(a: u64, b: i64) -> u64; - fn vsqrt_f32(a: float32x2_t) -> float32x2_t; - fn vsqrtq_f32(a: float32x4_t) -> float32x4_t; - fn vsqrt_f64(a: float64x1_t) -> float64x1_t; - fn vsqrtq_f64(a: float64x2_t) -> float64x2_t; - fn vrsqrte_f64(a: float64x1_t) -> float64x1_t; - fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t; - fn vrsqrtes_f32(a: f32) -> f32; - fn vrsqrted_f64(a: f64) -> f64; - fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vrsqrtss_f32(a: f32, b: f32) -> f32; - fn vrsqrtsd_f64(a: f64, b: f64) -> f64; - fn vrecpe_f64(a: float64x1_t) -> float64x1_t; - fn vrecpeq_f64(a: float64x2_t) -> float64x2_t; - fn vrecpes_f32(a: f32) -> f32; - fn vrecped_f64(a: f64) -> f64; - fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vrecpss_f32(a: f32, b: f32) -> f32; - fn vrecpsd_f64(a: f64, b: f64) -> f64; - fn vrecpxs_f32(a: f32) -> f32; - fn vrecpxd_f64(a: f64) -> f64; - fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t; - fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t; - fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t; - fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t; - fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t; - fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t; - fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t; - fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t; - fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t; - fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t; - fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t; - fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t; - fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t; - fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t; - fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t; - fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t; - fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t; - fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t; - fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t; - fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t; - fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t; - fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t; - fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t; - fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t; - fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t; - fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t; - fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t; - fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t; - fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t; - fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t; - fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t; - fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t; - fn vreinterpretq_p128_f64(a: float64x2_t) -> p128; - fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t; - fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t; - fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t; - fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t; - fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t; - fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t; - fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t; - fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t; - fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t; - fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t; - fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t; - fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t; - fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t; - fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t; - fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t; - fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t; - fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t; - fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t; - fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t; - fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t; - fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t; - fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t; - fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t; - fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t; - fn vreinterpretq_f64_p128(a: p128) -> float64x2_t; - fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t; - fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t; - fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t; - fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t; - fn vrshld_s64(a: i64, b: i64) -> i64; - fn vrshld_u64(a: u64, b: i64) -> u64; - fn vrshrd_n_s64(a: i64) -> i64; - fn vrshrd_n_u64(a: u64) -> u64; - fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t; - fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t; - fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t; - fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t; - fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t; - fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t; - fn vrsrad_n_s64(a: i64, b: i64) -> i64; - fn vrsrad_n_u64(a: u64, b: u64) -> u64; - fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t; - fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t; - fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t; - fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t; - fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t; - fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t; - fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t; - fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t; - fn vshld_s64(a: i64, b: i64) -> i64; - fn vshld_u64(a: u64, b: i64) -> u64; - fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t; - fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t; - fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t; - fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t; - fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t; - fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t; - fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t; - fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t; - fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t; - fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t; - fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t; - fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t; - fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t; - fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t; - fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t; - fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t; - fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t; - fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t; - fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t; - fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t; - fn vqabs_s64(a: int64x1_t) -> int64x1_t; - fn vqabsq_s64(a: int64x2_t) -> int64x2_t; - fn vqabsb_s8(a: i8) -> i8; - fn vqabsh_s16(a: i16) -> i16; - fn vqabss_s32(a: i32) -> i32; - fn vqabsd_s64(a: i64) -> i64; - fn vslid_n_s64(a: i64, b: i64) -> i64; - fn vslid_n_u64(a: u64, b: u64) -> u64; - fn vsrid_n_s64(a: i64, b: i64) -> i64; - fn vsrid_n_u64(a: u64, b: u64) -> u64; - - fn vcopy_lane_s64(_a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vcopy_lane_u64( - _a: uint64x1_t, - b: uint64x1_t, - ) -> uint64x1_t; - fn vcopy_lane_p64( - _a: poly64x1_t, - b: poly64x1_t, - ) -> poly64x1_t; - fn vcopy_lane_f64( - _a: float64x1_t, - b: float64x1_t, - ) -> float64x1_t; - fn vcopy_laneq_s64( - _a: int64x1_t, - b: int64x2_t, - ) -> int64x1_t; - fn vcopy_laneq_u64( - _a: uint64x1_t, - b: uint64x2_t, - ) -> uint64x1_t; - fn vcopy_laneq_p64( - _a: poly64x1_t, - b: poly64x2_t, - ) -> poly64x1_t; - fn vcopy_laneq_f64( - _a: float64x1_t, - b: float64x2_t, - ) -> float64x1_t; - unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t; - unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t; - unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t; - unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t; - unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t; - unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t; - unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t; - unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t; - unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t; - unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t; - unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t; - unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t; - unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t; - unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t; - unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t; - unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t; - unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t; - unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t; - unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t; - unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t; - unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t; - unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t; - unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t; - unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t; - unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t; - unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t; - unsafe fn vld1_lane_f64(ptr: *const f64, src: float64x1_t) -> float64x1_t; - unsafe fn vld1q_lane_f64(ptr: *const f64, src: float64x2_t) - -> float64x2_t; - unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t); - unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t); - unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t); - unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t); - unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t); - unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t); - unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t); - unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t); - unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t); - unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t); - unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t); - unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t); - unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t); - unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t); - unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t); - unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t); - unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t); - unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t); - unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t); - unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t); - unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t); - unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t); - unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t); - unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t); - fn vabsd_s64(a: i64) -> i64; - fn vabs_s64(a: int64x1_t) -> int64x1_t; - fn vabsq_s64(a: int64x2_t) -> int64x2_t; - fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t; - fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t; - fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t; - fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t; - fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t; - fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t; - fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t; - fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t; - fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t; - fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t; - fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t; - fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t; - fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t; - fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t; - fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t; - fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t; - fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t; - fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t; - fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t; - fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t; - fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vpaddd_s64(a: int64x2_t) -> i64; - fn vpaddd_u64(a: uint64x2_t) -> u64; - fn vaddv_s16(a: int16x4_t) -> i16; - fn vaddv_s32(a: int32x2_t) -> i32; - fn vaddv_s8(a: int8x8_t) -> i8; - fn vaddv_u16(a: uint16x4_t) -> u16; - fn vaddv_u32(a: uint32x2_t) -> u32; - fn vaddv_u8(a: uint8x8_t) -> u8; - fn vaddvq_s16(a: int16x8_t) -> i16; - fn vaddvq_s32(a: int32x4_t) -> i32; - fn vaddvq_s8(a: int8x16_t) -> i8; - fn vaddvq_u16(a: uint16x8_t) -> u16; - fn vaddvq_u32(a: uint32x4_t) -> u32; - fn vaddvq_u8(a: uint8x16_t) -> u8; - fn vaddvq_s64(a: int64x2_t) -> i64; - fn vaddvq_u64(a: uint64x2_t) -> u64; - fn vaddlv_s8(a: int8x8_t) -> i16; - fn vaddlvq_s8(a: int8x16_t) -> i16; - fn vaddlv_u8(a: uint8x8_t) -> u16; - fn vaddlvq_u8(a: uint8x16_t) -> u16; - fn vadd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t; - fn vaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vaddd_s64(a: i64, b: i64) -> i64; - fn vaddd_u64(a: u64, b: u64) -> u64; - fn vmaxv_s8(a: int8x8_t) -> i8; - fn vmaxvq_s8(a: int8x16_t) -> i8; - fn vmaxv_s16(a: int16x4_t) -> i16; - fn vmaxvq_s16(a: int16x8_t) -> i16; - fn vmaxv_s32(a: int32x2_t) -> i32; - fn vmaxvq_s32(a: int32x4_t) -> i32; - fn vmaxv_u8(a: uint8x8_t) -> u8; - fn vmaxvq_u8(a: uint8x16_t) -> u8; - fn vmaxv_u16(a: uint16x4_t) -> u16; - fn vmaxvq_u16(a: uint16x8_t) -> u16; - fn vmaxv_u32(a: uint32x2_t) -> u32; - fn vmaxvq_u32(a: uint32x4_t) -> u32; - fn vmaxv_f32(a: float32x2_t) -> f32; - fn vmaxvq_f32(a: float32x4_t) -> f32; - fn vmaxvq_f64(a: float64x2_t) -> f64; - fn vminv_s8(a: int8x8_t) -> i8; - fn vminvq_s8(a: int8x16_t) -> i8; - fn vminv_s16(a: int16x4_t) -> i16; - fn vminvq_s16(a: int16x8_t) -> i16; - fn vminv_s32(a: int32x2_t) -> i32; - fn vminvq_s32(a: int32x4_t) -> i32; - fn vminv_u8(a: uint8x8_t) -> u8; - fn vminvq_u8(a: uint8x16_t) -> u8; - fn vminv_u16(a: uint16x4_t) -> u16; - fn vminvq_u16(a: uint16x8_t) -> u16; - fn vminv_u32(a: uint32x2_t) -> u32; - fn vminvq_u32(a: uint32x4_t) -> u32; - fn vminv_f32(a: float32x2_t) -> f32; - fn vminvq_f32(a: float32x4_t) -> f32; - fn vminvq_f64(a: float64x2_t) -> f64; - fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t; - fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t; - fn vext_p64(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t; - fn vext_f64(a: float64x1_t, _b: float64x1_t) -> float64x1_t; - fn vdup_n_p64(value: p64) -> poly64x1_t; - fn vdup_n_f64(value: f64) -> float64x1_t; - fn vdupq_n_p64(value: p64) -> poly64x2_t; - fn vdupq_n_f64(value: f64) -> float64x2_t; - fn vmov_n_p64(value: p64) -> poly64x1_t; - fn vmov_n_f64(value: f64) -> float64x1_t; - fn vmovq_n_p64(value: p64) -> poly64x2_t; - fn vmovq_n_f64(value: f64) -> float64x2_t; - fn vget_high_f64(a: float64x2_t) -> float64x1_t; - fn vget_high_p64(a: poly64x2_t) -> poly64x1_t; - fn vget_low_f64(a: float64x2_t) -> float64x1_t; - fn vget_low_p64(a: poly64x2_t) -> poly64x1_t; - fn vget_lane_f64(v: float64x1_t) -> f64; - fn vgetq_lane_f64(v: float64x2_t) -> f64; - fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t; - fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t; - fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t; - fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t; - fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t; - fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t; - fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t; - fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t; - fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t; - fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t; - fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t; - fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t; - fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t; - fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t; - fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t; - fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t; - fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t; - fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t; - fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t; - fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t; - fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t; - fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t; - fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t; - fn vqtbl1_s8(t: int8x16_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbl1q_s8(t: int8x16_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbl1_u8(t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbl1q_u8(t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbl1_p8(t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbl1q_p8(t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t; - fn vqtbx1_s8(a: int8x8_t, t: int8x16_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbx1q_s8(a: int8x16_t, t: int8x16_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbx1_u8(a: uint8x8_t, t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbx1q_u8(a: uint8x16_t, t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbx1_p8(a: poly8x8_t, t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbx1q_p8(a: poly8x16_t, t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t; - fn vqtbl2_s8(t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbl2q_s8(t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbl2_u8(t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbl2q_u8(t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbl2_p8(t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbl2q_p8(t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t; - fn vqtbx2_s8(a: int8x8_t, t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbx2q_s8(a: int8x16_t, t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbx2_u8(a: uint8x8_t, t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbx2q_u8(a: uint8x16_t, t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbx2_p8(a: poly8x8_t, t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbx2q_p8(a: poly8x16_t, t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t; - fn vqtbl3_s8(t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbl3q_s8(t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbl3_u8(t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbl3q_u8(t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbl3_p8(t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbl3q_p8(t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t; - fn vqtbx3_s8(a: int8x8_t, t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbx3q_s8(a: int8x16_t, t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbx3_u8(a: uint8x8_t, t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbx3q_u8(a: uint8x16_t, t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbx3_p8(a: poly8x8_t, t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbx3q_p8(a: poly8x16_t, t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t; - fn vqtbl4_s8(t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbl4q_s8(t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbl4_u8(t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbl4q_u8(t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbl4_p8(t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbl4q_p8(t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t; - fn vqtbx4_s8(a: int8x8_t, t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t; - fn vqtbx4q_s8(a: int8x16_t, t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t; - fn vqtbx4_u8(a: uint8x8_t, t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t; - fn vqtbx4q_u8(a: uint8x16_t, t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t; - fn vqtbx4_p8(a: poly8x8_t, t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t; - fn vqtbx4q_p8(a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t; - fn vshld_n_s64(a: i64) -> i64; - fn vshld_n_u64(a: u64) -> u64; - fn vshrd_n_s64(a: i64) -> i64; - fn vshrd_n_u64(a: u64) -> u64; - fn vsrad_n_s64(a: i64, b: i64) -> i64; - fn vsrad_n_u64(a: u64, b: u64) -> u64; - fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t; - fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t; - fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t; - fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t; - fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t; - fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t; - fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t; - fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t; - fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t; - fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t; - fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t; - fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t; - fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t; - fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t; - fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t; - fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t; - fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t; - fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t; - fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t; - fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t; - } -} +#![doc = "Access to intrinsics on `aarch64`."] +mod neon; +pub use neon::Neon; diff --git a/fearless_simd/src/core_arch/aarch64/neon.rs b/fearless_simd/src/core_arch/aarch64/neon.rs new file mode 100644 index 000000000..56dc15be9 --- /dev/null +++ b/fearless_simd/src/core_arch/aarch64/neon.rs @@ -0,0 +1,18453 @@ +// Copyright 2026 the Fearless_SIMD Authors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// This file is autogenerated by fearless_simd_gen + +use arch::*; +use core::arch::aarch64 as arch; +type p8 = u8; +type p16 = u16; +type p64 = u64; +type p128 = u128; +#[doc = "A token for `Neon` intrinsics on `aarch64`."] +#[derive(Clone, Copy, Debug)] +pub struct Neon { + _private: (), +} +#[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" +)] +impl Neon { + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] + #[inline] + pub const unsafe fn new_unchecked() -> Self { + Self { _private: () } + } + #[doc = "See [`arch::vabal_high_s8`]."] + #[inline(always)] + pub fn vabal_high_s8(self, a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + unsafe { vabal_high_s8(a, b, c) } + } + #[doc = "See [`arch::vabal_high_s16`]."] + #[inline(always)] + pub fn vabal_high_s16(self, a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + unsafe { vabal_high_s16(a, b, c) } + } + #[doc = "See [`arch::vabal_high_s32`]."] + #[inline(always)] + pub fn vabal_high_s32(self, a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + unsafe { vabal_high_s32(a, b, c) } + } + #[doc = "See [`arch::vabal_high_u8`]."] + #[inline(always)] + pub fn vabal_high_u8(self, a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + unsafe { vabal_high_u8(a, b, c) } + } + #[doc = "See [`arch::vabal_high_u16`]."] + #[inline(always)] + pub fn vabal_high_u16(self, a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + unsafe { vabal_high_u16(a, b, c) } + } + #[doc = "See [`arch::vabal_high_u32`]."] + #[inline(always)] + pub fn vabal_high_u32(self, a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + unsafe { vabal_high_u32(a, b, c) } + } + #[doc = "See [`arch::vabd_f64`]."] + #[inline(always)] + pub fn vabd_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vabd_f64(a, b) } + } + #[doc = "See [`arch::vabdq_f64`]."] + #[inline(always)] + pub fn vabdq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vabdq_f64(a, b) } + } + #[doc = "See [`arch::vabdd_f64`]."] + #[inline(always)] + pub fn vabdd_f64(self, a: f64, b: f64) -> f64 { + unsafe { vabdd_f64(a, b) } + } + #[doc = "See [`arch::vabds_f32`]."] + #[inline(always)] + pub fn vabds_f32(self, a: f32, b: f32) -> f32 { + unsafe { vabds_f32(a, b) } + } + #[doc = "See [`arch::vabdl_high_s16`]."] + #[inline(always)] + pub fn vabdl_high_s16(self, a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { vabdl_high_s16(a, b) } + } + #[doc = "See [`arch::vabdl_high_s32`]."] + #[inline(always)] + pub fn vabdl_high_s32(self, a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { vabdl_high_s32(a, b) } + } + #[doc = "See [`arch::vabdl_high_s8`]."] + #[inline(always)] + pub fn vabdl_high_s8(self, a: int8x16_t, b: int8x16_t) -> int16x8_t { + unsafe { vabdl_high_s8(a, b) } + } + #[doc = "See [`arch::vabdl_high_u8`]."] + #[inline(always)] + pub fn vabdl_high_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + unsafe { vabdl_high_u8(a, b) } + } + #[doc = "See [`arch::vabdl_high_u16`]."] + #[inline(always)] + pub fn vabdl_high_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vabdl_high_u16(a, b) } + } + #[doc = "See [`arch::vabdl_high_u32`]."] + #[inline(always)] + pub fn vabdl_high_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vabdl_high_u32(a, b) } + } + #[doc = "See [`arch::vabs_f64`]."] + #[inline(always)] + pub fn vabs_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vabs_f64(a) } + } + #[doc = "See [`arch::vabsq_f64`]."] + #[inline(always)] + pub fn vabsq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vabsq_f64(a) } + } + #[doc = "See [`arch::vabs_s64`]."] + #[inline(always)] + pub fn vabs_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vabs_s64(a) } + } + #[doc = "See [`arch::vabsq_s64`]."] + #[inline(always)] + pub fn vabsq_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vabsq_s64(a) } + } + #[doc = "See [`arch::vabsd_s64`]."] + #[inline(always)] + pub fn vabsd_s64(self, a: i64) -> i64 { + unsafe { vabsd_s64(a) } + } + #[doc = "See [`arch::vaddd_s64`]."] + #[inline(always)] + pub fn vaddd_s64(self, a: i64, b: i64) -> i64 { + unsafe { vaddd_s64(a, b) } + } + #[doc = "See [`arch::vaddd_u64`]."] + #[inline(always)] + pub fn vaddd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vaddd_u64(a, b) } + } + #[doc = "See [`arch::vaddlv_s16`]."] + #[inline(always)] + pub fn vaddlv_s16(self, a: int16x4_t) -> i32 { + unsafe { vaddlv_s16(a) } + } + #[doc = "See [`arch::vaddlvq_s16`]."] + #[inline(always)] + pub fn vaddlvq_s16(self, a: int16x8_t) -> i32 { + unsafe { vaddlvq_s16(a) } + } + #[doc = "See [`arch::vaddlvq_s32`]."] + #[inline(always)] + pub fn vaddlvq_s32(self, a: int32x4_t) -> i64 { + unsafe { vaddlvq_s32(a) } + } + #[doc = "See [`arch::vaddlv_s32`]."] + #[inline(always)] + pub fn vaddlv_s32(self, a: int32x2_t) -> i64 { + unsafe { vaddlv_s32(a) } + } + #[doc = "See [`arch::vaddlv_s8`]."] + #[inline(always)] + pub fn vaddlv_s8(self, a: int8x8_t) -> i16 { + unsafe { vaddlv_s8(a) } + } + #[doc = "See [`arch::vaddlvq_s8`]."] + #[inline(always)] + pub fn vaddlvq_s8(self, a: int8x16_t) -> i16 { + unsafe { vaddlvq_s8(a) } + } + #[doc = "See [`arch::vaddlv_u16`]."] + #[inline(always)] + pub fn vaddlv_u16(self, a: uint16x4_t) -> u32 { + unsafe { vaddlv_u16(a) } + } + #[doc = "See [`arch::vaddlvq_u16`]."] + #[inline(always)] + pub fn vaddlvq_u16(self, a: uint16x8_t) -> u32 { + unsafe { vaddlvq_u16(a) } + } + #[doc = "See [`arch::vaddlvq_u32`]."] + #[inline(always)] + pub fn vaddlvq_u32(self, a: uint32x4_t) -> u64 { + unsafe { vaddlvq_u32(a) } + } + #[doc = "See [`arch::vaddlv_u32`]."] + #[inline(always)] + pub fn vaddlv_u32(self, a: uint32x2_t) -> u64 { + unsafe { vaddlv_u32(a) } + } + #[doc = "See [`arch::vaddlv_u8`]."] + #[inline(always)] + pub fn vaddlv_u8(self, a: uint8x8_t) -> u16 { + unsafe { vaddlv_u8(a) } + } + #[doc = "See [`arch::vaddlvq_u8`]."] + #[inline(always)] + pub fn vaddlvq_u8(self, a: uint8x16_t) -> u16 { + unsafe { vaddlvq_u8(a) } + } + #[doc = "See [`arch::vaddv_f32`]."] + #[inline(always)] + pub fn vaddv_f32(self, a: float32x2_t) -> f32 { + unsafe { vaddv_f32(a) } + } + #[doc = "See [`arch::vaddvq_f32`]."] + #[inline(always)] + pub fn vaddvq_f32(self, a: float32x4_t) -> f32 { + unsafe { vaddvq_f32(a) } + } + #[doc = "See [`arch::vaddvq_f64`]."] + #[inline(always)] + pub fn vaddvq_f64(self, a: float64x2_t) -> f64 { + unsafe { vaddvq_f64(a) } + } + #[doc = "See [`arch::vaddv_s32`]."] + #[inline(always)] + pub fn vaddv_s32(self, a: int32x2_t) -> i32 { + unsafe { vaddv_s32(a) } + } + #[doc = "See [`arch::vaddv_s8`]."] + #[inline(always)] + pub fn vaddv_s8(self, a: int8x8_t) -> i8 { + unsafe { vaddv_s8(a) } + } + #[doc = "See [`arch::vaddvq_s8`]."] + #[inline(always)] + pub fn vaddvq_s8(self, a: int8x16_t) -> i8 { + unsafe { vaddvq_s8(a) } + } + #[doc = "See [`arch::vaddv_s16`]."] + #[inline(always)] + pub fn vaddv_s16(self, a: int16x4_t) -> i16 { + unsafe { vaddv_s16(a) } + } + #[doc = "See [`arch::vaddvq_s16`]."] + #[inline(always)] + pub fn vaddvq_s16(self, a: int16x8_t) -> i16 { + unsafe { vaddvq_s16(a) } + } + #[doc = "See [`arch::vaddvq_s32`]."] + #[inline(always)] + pub fn vaddvq_s32(self, a: int32x4_t) -> i32 { + unsafe { vaddvq_s32(a) } + } + #[doc = "See [`arch::vaddv_u32`]."] + #[inline(always)] + pub fn vaddv_u32(self, a: uint32x2_t) -> u32 { + unsafe { vaddv_u32(a) } + } + #[doc = "See [`arch::vaddv_u8`]."] + #[inline(always)] + pub fn vaddv_u8(self, a: uint8x8_t) -> u8 { + unsafe { vaddv_u8(a) } + } + #[doc = "See [`arch::vaddvq_u8`]."] + #[inline(always)] + pub fn vaddvq_u8(self, a: uint8x16_t) -> u8 { + unsafe { vaddvq_u8(a) } + } + #[doc = "See [`arch::vaddv_u16`]."] + #[inline(always)] + pub fn vaddv_u16(self, a: uint16x4_t) -> u16 { + unsafe { vaddv_u16(a) } + } + #[doc = "See [`arch::vaddvq_u16`]."] + #[inline(always)] + pub fn vaddvq_u16(self, a: uint16x8_t) -> u16 { + unsafe { vaddvq_u16(a) } + } + #[doc = "See [`arch::vaddvq_u32`]."] + #[inline(always)] + pub fn vaddvq_u32(self, a: uint32x4_t) -> u32 { + unsafe { vaddvq_u32(a) } + } + #[doc = "See [`arch::vaddvq_s64`]."] + #[inline(always)] + pub fn vaddvq_s64(self, a: int64x2_t) -> i64 { + unsafe { vaddvq_s64(a) } + } + #[doc = "See [`arch::vaddvq_u64`]."] + #[inline(always)] + pub fn vaddvq_u64(self, a: uint64x2_t) -> u64 { + unsafe { vaddvq_u64(a) } + } + #[doc = "See [`arch::vbcaxq_s8`]."] + #[inline(always)] + pub fn vbcaxq_s8(self, a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { vbcaxq_s8(a, b, c) } + } + #[doc = "See [`arch::vbcaxq_s16`]."] + #[inline(always)] + pub fn vbcaxq_s16(self, a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { vbcaxq_s16(a, b, c) } + } + #[doc = "See [`arch::vbcaxq_s32`]."] + #[inline(always)] + pub fn vbcaxq_s32(self, a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { vbcaxq_s32(a, b, c) } + } + #[doc = "See [`arch::vbcaxq_s64`]."] + #[inline(always)] + pub fn vbcaxq_s64(self, a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + unsafe { vbcaxq_s64(a, b, c) } + } + #[doc = "See [`arch::vbcaxq_u8`]."] + #[inline(always)] + pub fn vbcaxq_u8(self, a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vbcaxq_u8(a, b, c) } + } + #[doc = "See [`arch::vbcaxq_u16`]."] + #[inline(always)] + pub fn vbcaxq_u16(self, a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { vbcaxq_u16(a, b, c) } + } + #[doc = "See [`arch::vbcaxq_u32`]."] + #[inline(always)] + pub fn vbcaxq_u32(self, a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { vbcaxq_u32(a, b, c) } + } + #[doc = "See [`arch::vbcaxq_u64`]."] + #[inline(always)] + pub fn vbcaxq_u64(self, a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe { vbcaxq_u64(a, b, c) } + } + #[doc = "See [`arch::vcage_f64`]."] + #[inline(always)] + pub fn vcage_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vcage_f64(a, b) } + } + #[doc = "See [`arch::vcageq_f64`]."] + #[inline(always)] + pub fn vcageq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcageq_f64(a, b) } + } + #[doc = "See [`arch::vcaged_f64`]."] + #[inline(always)] + pub fn vcaged_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcaged_f64(a, b) } + } + #[doc = "See [`arch::vcages_f32`]."] + #[inline(always)] + pub fn vcages_f32(self, a: f32, b: f32) -> u32 { + unsafe { vcages_f32(a, b) } + } + #[doc = "See [`arch::vcagt_f64`]."] + #[inline(always)] + pub fn vcagt_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vcagt_f64(a, b) } + } + #[doc = "See [`arch::vcagtq_f64`]."] + #[inline(always)] + pub fn vcagtq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcagtq_f64(a, b) } + } + #[doc = "See [`arch::vcagtd_f64`]."] + #[inline(always)] + pub fn vcagtd_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcagtd_f64(a, b) } + } + #[doc = "See [`arch::vcagts_f32`]."] + #[inline(always)] + pub fn vcagts_f32(self, a: f32, b: f32) -> u32 { + unsafe { vcagts_f32(a, b) } + } + #[doc = "See [`arch::vcale_f64`]."] + #[inline(always)] + pub fn vcale_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vcale_f64(a, b) } + } + #[doc = "See [`arch::vcaleq_f64`]."] + #[inline(always)] + pub fn vcaleq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcaleq_f64(a, b) } + } + #[doc = "See [`arch::vcaled_f64`]."] + #[inline(always)] + pub fn vcaled_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcaled_f64(a, b) } + } + #[doc = "See [`arch::vcales_f32`]."] + #[inline(always)] + pub fn vcales_f32(self, a: f32, b: f32) -> u32 { + unsafe { vcales_f32(a, b) } + } + #[doc = "See [`arch::vcalt_f64`]."] + #[inline(always)] + pub fn vcalt_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vcalt_f64(a, b) } + } + #[doc = "See [`arch::vcaltq_f64`]."] + #[inline(always)] + pub fn vcaltq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcaltq_f64(a, b) } + } + #[doc = "See [`arch::vcaltd_f64`]."] + #[inline(always)] + pub fn vcaltd_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcaltd_f64(a, b) } + } + #[doc = "See [`arch::vcalts_f32`]."] + #[inline(always)] + pub fn vcalts_f32(self, a: f32, b: f32) -> u32 { + unsafe { vcalts_f32(a, b) } + } + #[doc = "See [`arch::vceq_f64`]."] + #[inline(always)] + pub fn vceq_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vceq_f64(a, b) } + } + #[doc = "See [`arch::vceqq_f64`]."] + #[inline(always)] + pub fn vceqq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vceqq_f64(a, b) } + } + #[doc = "See [`arch::vceq_s64`]."] + #[inline(always)] + pub fn vceq_s64(self, a: int64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vceq_s64(a, b) } + } + #[doc = "See [`arch::vceqq_s64`]."] + #[inline(always)] + pub fn vceqq_s64(self, a: int64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vceqq_s64(a, b) } + } + #[doc = "See [`arch::vceq_u64`]."] + #[inline(always)] + pub fn vceq_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vceq_u64(a, b) } + } + #[doc = "See [`arch::vceqq_u64`]."] + #[inline(always)] + pub fn vceqq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vceqq_u64(a, b) } + } + #[doc = "See [`arch::vceq_p64`]."] + #[inline(always)] + pub fn vceq_p64(self, a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { + unsafe { vceq_p64(a, b) } + } + #[doc = "See [`arch::vceqq_p64`]."] + #[inline(always)] + pub fn vceqq_p64(self, a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + unsafe { vceqq_p64(a, b) } + } + #[doc = "See [`arch::vceqd_f64`]."] + #[inline(always)] + pub fn vceqd_f64(self, a: f64, b: f64) -> u64 { + unsafe { vceqd_f64(a, b) } + } + #[doc = "See [`arch::vceqs_f32`]."] + #[inline(always)] + pub fn vceqs_f32(self, a: f32, b: f32) -> u32 { + unsafe { vceqs_f32(a, b) } + } + #[doc = "See [`arch::vceqd_s64`]."] + #[inline(always)] + pub fn vceqd_s64(self, a: i64, b: i64) -> u64 { + unsafe { vceqd_s64(a, b) } + } + #[doc = "See [`arch::vceqd_u64`]."] + #[inline(always)] + pub fn vceqd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vceqd_u64(a, b) } + } + #[doc = "See [`arch::vceqz_f32`]."] + #[inline(always)] + pub fn vceqz_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vceqz_f32(a) } + } + #[doc = "See [`arch::vceqzq_f32`]."] + #[inline(always)] + pub fn vceqzq_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vceqzq_f32(a) } + } + #[doc = "See [`arch::vceqz_f64`]."] + #[inline(always)] + pub fn vceqz_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vceqz_f64(a) } + } + #[doc = "See [`arch::vceqzq_f64`]."] + #[inline(always)] + pub fn vceqzq_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vceqzq_f64(a) } + } + #[doc = "See [`arch::vceqz_s8`]."] + #[inline(always)] + pub fn vceqz_s8(self, a: int8x8_t) -> uint8x8_t { + unsafe { vceqz_s8(a) } + } + #[doc = "See [`arch::vceqzq_s8`]."] + #[inline(always)] + pub fn vceqzq_s8(self, a: int8x16_t) -> uint8x16_t { + unsafe { vceqzq_s8(a) } + } + #[doc = "See [`arch::vceqz_s16`]."] + #[inline(always)] + pub fn vceqz_s16(self, a: int16x4_t) -> uint16x4_t { + unsafe { vceqz_s16(a) } + } + #[doc = "See [`arch::vceqzq_s16`]."] + #[inline(always)] + pub fn vceqzq_s16(self, a: int16x8_t) -> uint16x8_t { + unsafe { vceqzq_s16(a) } + } + #[doc = "See [`arch::vceqz_s32`]."] + #[inline(always)] + pub fn vceqz_s32(self, a: int32x2_t) -> uint32x2_t { + unsafe { vceqz_s32(a) } + } + #[doc = "See [`arch::vceqzq_s32`]."] + #[inline(always)] + pub fn vceqzq_s32(self, a: int32x4_t) -> uint32x4_t { + unsafe { vceqzq_s32(a) } + } + #[doc = "See [`arch::vceqz_s64`]."] + #[inline(always)] + pub fn vceqz_s64(self, a: int64x1_t) -> uint64x1_t { + unsafe { vceqz_s64(a) } + } + #[doc = "See [`arch::vceqzq_s64`]."] + #[inline(always)] + pub fn vceqzq_s64(self, a: int64x2_t) -> uint64x2_t { + unsafe { vceqzq_s64(a) } + } + #[doc = "See [`arch::vceqz_p8`]."] + #[inline(always)] + pub fn vceqz_p8(self, a: poly8x8_t) -> uint8x8_t { + unsafe { vceqz_p8(a) } + } + #[doc = "See [`arch::vceqzq_p8`]."] + #[inline(always)] + pub fn vceqzq_p8(self, a: poly8x16_t) -> uint8x16_t { + unsafe { vceqzq_p8(a) } + } + #[doc = "See [`arch::vceqz_p64`]."] + #[inline(always)] + pub fn vceqz_p64(self, a: poly64x1_t) -> uint64x1_t { + unsafe { vceqz_p64(a) } + } + #[doc = "See [`arch::vceqzq_p64`]."] + #[inline(always)] + pub fn vceqzq_p64(self, a: poly64x2_t) -> uint64x2_t { + unsafe { vceqzq_p64(a) } + } + #[doc = "See [`arch::vceqz_u8`]."] + #[inline(always)] + pub fn vceqz_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vceqz_u8(a) } + } + #[doc = "See [`arch::vceqzq_u8`]."] + #[inline(always)] + pub fn vceqzq_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vceqzq_u8(a) } + } + #[doc = "See [`arch::vceqz_u16`]."] + #[inline(always)] + pub fn vceqz_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vceqz_u16(a) } + } + #[doc = "See [`arch::vceqzq_u16`]."] + #[inline(always)] + pub fn vceqzq_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vceqzq_u16(a) } + } + #[doc = "See [`arch::vceqz_u32`]."] + #[inline(always)] + pub fn vceqz_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vceqz_u32(a) } + } + #[doc = "See [`arch::vceqzq_u32`]."] + #[inline(always)] + pub fn vceqzq_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vceqzq_u32(a) } + } + #[doc = "See [`arch::vceqz_u64`]."] + #[inline(always)] + pub fn vceqz_u64(self, a: uint64x1_t) -> uint64x1_t { + unsafe { vceqz_u64(a) } + } + #[doc = "See [`arch::vceqzq_u64`]."] + #[inline(always)] + pub fn vceqzq_u64(self, a: uint64x2_t) -> uint64x2_t { + unsafe { vceqzq_u64(a) } + } + #[doc = "See [`arch::vceqzd_s64`]."] + #[inline(always)] + pub fn vceqzd_s64(self, a: i64) -> u64 { + unsafe { vceqzd_s64(a) } + } + #[doc = "See [`arch::vceqzd_u64`]."] + #[inline(always)] + pub fn vceqzd_u64(self, a: u64) -> u64 { + unsafe { vceqzd_u64(a) } + } + #[doc = "See [`arch::vceqzs_f32`]."] + #[inline(always)] + pub fn vceqzs_f32(self, a: f32) -> u32 { + unsafe { vceqzs_f32(a) } + } + #[doc = "See [`arch::vceqzd_f64`]."] + #[inline(always)] + pub fn vceqzd_f64(self, a: f64) -> u64 { + unsafe { vceqzd_f64(a) } + } + #[doc = "See [`arch::vcge_f64`]."] + #[inline(always)] + pub fn vcge_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vcge_f64(a, b) } + } + #[doc = "See [`arch::vcgeq_f64`]."] + #[inline(always)] + pub fn vcgeq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcgeq_f64(a, b) } + } + #[doc = "See [`arch::vcge_s64`]."] + #[inline(always)] + pub fn vcge_s64(self, a: int64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vcge_s64(a, b) } + } + #[doc = "See [`arch::vcgeq_s64`]."] + #[inline(always)] + pub fn vcgeq_s64(self, a: int64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vcgeq_s64(a, b) } + } + #[doc = "See [`arch::vcge_u64`]."] + #[inline(always)] + pub fn vcge_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vcge_u64(a, b) } + } + #[doc = "See [`arch::vcgeq_u64`]."] + #[inline(always)] + pub fn vcgeq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vcgeq_u64(a, b) } + } + #[doc = "See [`arch::vcged_f64`]."] + #[inline(always)] + pub fn vcged_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcged_f64(a, b) } + } + #[doc = "See [`arch::vcges_f32`]."] + #[inline(always)] + pub fn vcges_f32(self, a: f32, b: f32) -> u32 { + unsafe { vcges_f32(a, b) } + } + #[doc = "See [`arch::vcged_s64`]."] + #[inline(always)] + pub fn vcged_s64(self, a: i64, b: i64) -> u64 { + unsafe { vcged_s64(a, b) } + } + #[doc = "See [`arch::vcged_u64`]."] + #[inline(always)] + pub fn vcged_u64(self, a: u64, b: u64) -> u64 { + unsafe { vcged_u64(a, b) } + } + #[doc = "See [`arch::vcgez_f32`]."] + #[inline(always)] + pub fn vcgez_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcgez_f32(a) } + } + #[doc = "See [`arch::vcgezq_f32`]."] + #[inline(always)] + pub fn vcgezq_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcgezq_f32(a) } + } + #[doc = "See [`arch::vcgez_f64`]."] + #[inline(always)] + pub fn vcgez_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcgez_f64(a) } + } + #[doc = "See [`arch::vcgezq_f64`]."] + #[inline(always)] + pub fn vcgezq_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcgezq_f64(a) } + } + #[doc = "See [`arch::vcgez_s8`]."] + #[inline(always)] + pub fn vcgez_s8(self, a: int8x8_t) -> uint8x8_t { + unsafe { vcgez_s8(a) } + } + #[doc = "See [`arch::vcgezq_s8`]."] + #[inline(always)] + pub fn vcgezq_s8(self, a: int8x16_t) -> uint8x16_t { + unsafe { vcgezq_s8(a) } + } + #[doc = "See [`arch::vcgez_s16`]."] + #[inline(always)] + pub fn vcgez_s16(self, a: int16x4_t) -> uint16x4_t { + unsafe { vcgez_s16(a) } + } + #[doc = "See [`arch::vcgezq_s16`]."] + #[inline(always)] + pub fn vcgezq_s16(self, a: int16x8_t) -> uint16x8_t { + unsafe { vcgezq_s16(a) } + } + #[doc = "See [`arch::vcgez_s32`]."] + #[inline(always)] + pub fn vcgez_s32(self, a: int32x2_t) -> uint32x2_t { + unsafe { vcgez_s32(a) } + } + #[doc = "See [`arch::vcgezq_s32`]."] + #[inline(always)] + pub fn vcgezq_s32(self, a: int32x4_t) -> uint32x4_t { + unsafe { vcgezq_s32(a) } + } + #[doc = "See [`arch::vcgez_s64`]."] + #[inline(always)] + pub fn vcgez_s64(self, a: int64x1_t) -> uint64x1_t { + unsafe { vcgez_s64(a) } + } + #[doc = "See [`arch::vcgezq_s64`]."] + #[inline(always)] + pub fn vcgezq_s64(self, a: int64x2_t) -> uint64x2_t { + unsafe { vcgezq_s64(a) } + } + #[doc = "See [`arch::vcgezd_f64`]."] + #[inline(always)] + pub fn vcgezd_f64(self, a: f64) -> u64 { + unsafe { vcgezd_f64(a) } + } + #[doc = "See [`arch::vcgezs_f32`]."] + #[inline(always)] + pub fn vcgezs_f32(self, a: f32) -> u32 { + unsafe { vcgezs_f32(a) } + } + #[doc = "See [`arch::vcgezd_s64`]."] + #[inline(always)] + pub fn vcgezd_s64(self, a: i64) -> u64 { + unsafe { vcgezd_s64(a) } + } + #[doc = "See [`arch::vcgt_f64`]."] + #[inline(always)] + pub fn vcgt_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vcgt_f64(a, b) } + } + #[doc = "See [`arch::vcgtq_f64`]."] + #[inline(always)] + pub fn vcgtq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcgtq_f64(a, b) } + } + #[doc = "See [`arch::vcgt_s64`]."] + #[inline(always)] + pub fn vcgt_s64(self, a: int64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vcgt_s64(a, b) } + } + #[doc = "See [`arch::vcgtq_s64`]."] + #[inline(always)] + pub fn vcgtq_s64(self, a: int64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vcgtq_s64(a, b) } + } + #[doc = "See [`arch::vcgt_u64`]."] + #[inline(always)] + pub fn vcgt_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vcgt_u64(a, b) } + } + #[doc = "See [`arch::vcgtq_u64`]."] + #[inline(always)] + pub fn vcgtq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vcgtq_u64(a, b) } + } + #[doc = "See [`arch::vcgtd_f64`]."] + #[inline(always)] + pub fn vcgtd_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcgtd_f64(a, b) } + } + #[doc = "See [`arch::vcgts_f32`]."] + #[inline(always)] + pub fn vcgts_f32(self, a: f32, b: f32) -> u32 { + unsafe { vcgts_f32(a, b) } + } + #[doc = "See [`arch::vcgtd_s64`]."] + #[inline(always)] + pub fn vcgtd_s64(self, a: i64, b: i64) -> u64 { + unsafe { vcgtd_s64(a, b) } + } + #[doc = "See [`arch::vcgtd_u64`]."] + #[inline(always)] + pub fn vcgtd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vcgtd_u64(a, b) } + } + #[doc = "See [`arch::vcgtz_f32`]."] + #[inline(always)] + pub fn vcgtz_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcgtz_f32(a) } + } + #[doc = "See [`arch::vcgtzq_f32`]."] + #[inline(always)] + pub fn vcgtzq_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcgtzq_f32(a) } + } + #[doc = "See [`arch::vcgtz_f64`]."] + #[inline(always)] + pub fn vcgtz_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcgtz_f64(a) } + } + #[doc = "See [`arch::vcgtzq_f64`]."] + #[inline(always)] + pub fn vcgtzq_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcgtzq_f64(a) } + } + #[doc = "See [`arch::vcgtz_s8`]."] + #[inline(always)] + pub fn vcgtz_s8(self, a: int8x8_t) -> uint8x8_t { + unsafe { vcgtz_s8(a) } + } + #[doc = "See [`arch::vcgtzq_s8`]."] + #[inline(always)] + pub fn vcgtzq_s8(self, a: int8x16_t) -> uint8x16_t { + unsafe { vcgtzq_s8(a) } + } + #[doc = "See [`arch::vcgtz_s16`]."] + #[inline(always)] + pub fn vcgtz_s16(self, a: int16x4_t) -> uint16x4_t { + unsafe { vcgtz_s16(a) } + } + #[doc = "See [`arch::vcgtzq_s16`]."] + #[inline(always)] + pub fn vcgtzq_s16(self, a: int16x8_t) -> uint16x8_t { + unsafe { vcgtzq_s16(a) } + } + #[doc = "See [`arch::vcgtz_s32`]."] + #[inline(always)] + pub fn vcgtz_s32(self, a: int32x2_t) -> uint32x2_t { + unsafe { vcgtz_s32(a) } + } + #[doc = "See [`arch::vcgtzq_s32`]."] + #[inline(always)] + pub fn vcgtzq_s32(self, a: int32x4_t) -> uint32x4_t { + unsafe { vcgtzq_s32(a) } + } + #[doc = "See [`arch::vcgtz_s64`]."] + #[inline(always)] + pub fn vcgtz_s64(self, a: int64x1_t) -> uint64x1_t { + unsafe { vcgtz_s64(a) } + } + #[doc = "See [`arch::vcgtzq_s64`]."] + #[inline(always)] + pub fn vcgtzq_s64(self, a: int64x2_t) -> uint64x2_t { + unsafe { vcgtzq_s64(a) } + } + #[doc = "See [`arch::vcgtzd_f64`]."] + #[inline(always)] + pub fn vcgtzd_f64(self, a: f64) -> u64 { + unsafe { vcgtzd_f64(a) } + } + #[doc = "See [`arch::vcgtzs_f32`]."] + #[inline(always)] + pub fn vcgtzs_f32(self, a: f32) -> u32 { + unsafe { vcgtzs_f32(a) } + } + #[doc = "See [`arch::vcgtzd_s64`]."] + #[inline(always)] + pub fn vcgtzd_s64(self, a: i64) -> u64 { + unsafe { vcgtzd_s64(a) } + } + #[doc = "See [`arch::vcle_f64`]."] + #[inline(always)] + pub fn vcle_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vcle_f64(a, b) } + } + #[doc = "See [`arch::vcleq_f64`]."] + #[inline(always)] + pub fn vcleq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcleq_f64(a, b) } + } + #[doc = "See [`arch::vcle_s64`]."] + #[inline(always)] + pub fn vcle_s64(self, a: int64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vcle_s64(a, b) } + } + #[doc = "See [`arch::vcleq_s64`]."] + #[inline(always)] + pub fn vcleq_s64(self, a: int64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vcleq_s64(a, b) } + } + #[doc = "See [`arch::vcle_u64`]."] + #[inline(always)] + pub fn vcle_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vcle_u64(a, b) } + } + #[doc = "See [`arch::vcleq_u64`]."] + #[inline(always)] + pub fn vcleq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vcleq_u64(a, b) } + } + #[doc = "See [`arch::vcled_f64`]."] + #[inline(always)] + pub fn vcled_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcled_f64(a, b) } + } + #[doc = "See [`arch::vcles_f32`]."] + #[inline(always)] + pub fn vcles_f32(self, a: f32, b: f32) -> u32 { + unsafe { vcles_f32(a, b) } + } + #[doc = "See [`arch::vcled_u64`]."] + #[inline(always)] + pub fn vcled_u64(self, a: u64, b: u64) -> u64 { + unsafe { vcled_u64(a, b) } + } + #[doc = "See [`arch::vcled_s64`]."] + #[inline(always)] + pub fn vcled_s64(self, a: i64, b: i64) -> u64 { + unsafe { vcled_s64(a, b) } + } + #[doc = "See [`arch::vclez_f32`]."] + #[inline(always)] + pub fn vclez_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vclez_f32(a) } + } + #[doc = "See [`arch::vclezq_f32`]."] + #[inline(always)] + pub fn vclezq_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vclezq_f32(a) } + } + #[doc = "See [`arch::vclez_f64`]."] + #[inline(always)] + pub fn vclez_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vclez_f64(a) } + } + #[doc = "See [`arch::vclezq_f64`]."] + #[inline(always)] + pub fn vclezq_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vclezq_f64(a) } + } + #[doc = "See [`arch::vclez_s8`]."] + #[inline(always)] + pub fn vclez_s8(self, a: int8x8_t) -> uint8x8_t { + unsafe { vclez_s8(a) } + } + #[doc = "See [`arch::vclezq_s8`]."] + #[inline(always)] + pub fn vclezq_s8(self, a: int8x16_t) -> uint8x16_t { + unsafe { vclezq_s8(a) } + } + #[doc = "See [`arch::vclez_s16`]."] + #[inline(always)] + pub fn vclez_s16(self, a: int16x4_t) -> uint16x4_t { + unsafe { vclez_s16(a) } + } + #[doc = "See [`arch::vclezq_s16`]."] + #[inline(always)] + pub fn vclezq_s16(self, a: int16x8_t) -> uint16x8_t { + unsafe { vclezq_s16(a) } + } + #[doc = "See [`arch::vclez_s32`]."] + #[inline(always)] + pub fn vclez_s32(self, a: int32x2_t) -> uint32x2_t { + unsafe { vclez_s32(a) } + } + #[doc = "See [`arch::vclezq_s32`]."] + #[inline(always)] + pub fn vclezq_s32(self, a: int32x4_t) -> uint32x4_t { + unsafe { vclezq_s32(a) } + } + #[doc = "See [`arch::vclez_s64`]."] + #[inline(always)] + pub fn vclez_s64(self, a: int64x1_t) -> uint64x1_t { + unsafe { vclez_s64(a) } + } + #[doc = "See [`arch::vclezq_s64`]."] + #[inline(always)] + pub fn vclezq_s64(self, a: int64x2_t) -> uint64x2_t { + unsafe { vclezq_s64(a) } + } + #[doc = "See [`arch::vclezd_f64`]."] + #[inline(always)] + pub fn vclezd_f64(self, a: f64) -> u64 { + unsafe { vclezd_f64(a) } + } + #[doc = "See [`arch::vclezs_f32`]."] + #[inline(always)] + pub fn vclezs_f32(self, a: f32) -> u32 { + unsafe { vclezs_f32(a) } + } + #[doc = "See [`arch::vclezd_s64`]."] + #[inline(always)] + pub fn vclezd_s64(self, a: i64) -> u64 { + unsafe { vclezd_s64(a) } + } + #[doc = "See [`arch::vclt_f64`]."] + #[inline(always)] + pub fn vclt_f64(self, a: float64x1_t, b: float64x1_t) -> uint64x1_t { + unsafe { vclt_f64(a, b) } + } + #[doc = "See [`arch::vcltq_f64`]."] + #[inline(always)] + pub fn vcltq_f64(self, a: float64x2_t, b: float64x2_t) -> uint64x2_t { + unsafe { vcltq_f64(a, b) } + } + #[doc = "See [`arch::vclt_s64`]."] + #[inline(always)] + pub fn vclt_s64(self, a: int64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vclt_s64(a, b) } + } + #[doc = "See [`arch::vcltq_s64`]."] + #[inline(always)] + pub fn vcltq_s64(self, a: int64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vcltq_s64(a, b) } + } + #[doc = "See [`arch::vclt_u64`]."] + #[inline(always)] + pub fn vclt_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vclt_u64(a, b) } + } + #[doc = "See [`arch::vcltq_u64`]."] + #[inline(always)] + pub fn vcltq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vcltq_u64(a, b) } + } + #[doc = "See [`arch::vcltd_u64`]."] + #[inline(always)] + pub fn vcltd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vcltd_u64(a, b) } + } + #[doc = "See [`arch::vcltd_s64`]."] + #[inline(always)] + pub fn vcltd_s64(self, a: i64, b: i64) -> u64 { + unsafe { vcltd_s64(a, b) } + } + #[doc = "See [`arch::vclts_f32`]."] + #[inline(always)] + pub fn vclts_f32(self, a: f32, b: f32) -> u32 { + unsafe { vclts_f32(a, b) } + } + #[doc = "See [`arch::vcltd_f64`]."] + #[inline(always)] + pub fn vcltd_f64(self, a: f64, b: f64) -> u64 { + unsafe { vcltd_f64(a, b) } + } + #[doc = "See [`arch::vcltz_f32`]."] + #[inline(always)] + pub fn vcltz_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcltz_f32(a) } + } + #[doc = "See [`arch::vcltzq_f32`]."] + #[inline(always)] + pub fn vcltzq_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcltzq_f32(a) } + } + #[doc = "See [`arch::vcltz_f64`]."] + #[inline(always)] + pub fn vcltz_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcltz_f64(a) } + } + #[doc = "See [`arch::vcltzq_f64`]."] + #[inline(always)] + pub fn vcltzq_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcltzq_f64(a) } + } + #[doc = "See [`arch::vcltz_s8`]."] + #[inline(always)] + pub fn vcltz_s8(self, a: int8x8_t) -> uint8x8_t { + unsafe { vcltz_s8(a) } + } + #[doc = "See [`arch::vcltzq_s8`]."] + #[inline(always)] + pub fn vcltzq_s8(self, a: int8x16_t) -> uint8x16_t { + unsafe { vcltzq_s8(a) } + } + #[doc = "See [`arch::vcltz_s16`]."] + #[inline(always)] + pub fn vcltz_s16(self, a: int16x4_t) -> uint16x4_t { + unsafe { vcltz_s16(a) } + } + #[doc = "See [`arch::vcltzq_s16`]."] + #[inline(always)] + pub fn vcltzq_s16(self, a: int16x8_t) -> uint16x8_t { + unsafe { vcltzq_s16(a) } + } + #[doc = "See [`arch::vcltz_s32`]."] + #[inline(always)] + pub fn vcltz_s32(self, a: int32x2_t) -> uint32x2_t { + unsafe { vcltz_s32(a) } + } + #[doc = "See [`arch::vcltzq_s32`]."] + #[inline(always)] + pub fn vcltzq_s32(self, a: int32x4_t) -> uint32x4_t { + unsafe { vcltzq_s32(a) } + } + #[doc = "See [`arch::vcltz_s64`]."] + #[inline(always)] + pub fn vcltz_s64(self, a: int64x1_t) -> uint64x1_t { + unsafe { vcltz_s64(a) } + } + #[doc = "See [`arch::vcltzq_s64`]."] + #[inline(always)] + pub fn vcltzq_s64(self, a: int64x2_t) -> uint64x2_t { + unsafe { vcltzq_s64(a) } + } + #[doc = "See [`arch::vcltzd_f64`]."] + #[inline(always)] + pub fn vcltzd_f64(self, a: f64) -> u64 { + unsafe { vcltzd_f64(a) } + } + #[doc = "See [`arch::vcltzs_f32`]."] + #[inline(always)] + pub fn vcltzs_f32(self, a: f32) -> u32 { + unsafe { vcltzs_f32(a) } + } + #[doc = "See [`arch::vcltzd_s64`]."] + #[inline(always)] + pub fn vcltzd_s64(self, a: i64) -> u64 { + unsafe { vcltzd_s64(a) } + } + #[doc = "See [`arch::vcopy_lane_f32`]."] + #[inline(always)] + pub fn vcopy_lane_f32( + self, + a: float32x2_t, + b: float32x2_t, + ) -> float32x2_t { + unsafe { vcopy_lane_f32::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_s8`]."] + #[inline(always)] + pub fn vcopy_lane_s8( + self, + a: int8x8_t, + b: int8x8_t, + ) -> int8x8_t { + unsafe { vcopy_lane_s8::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_s16`]."] + #[inline(always)] + pub fn vcopy_lane_s16( + self, + a: int16x4_t, + b: int16x4_t, + ) -> int16x4_t { + unsafe { vcopy_lane_s16::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_s32`]."] + #[inline(always)] + pub fn vcopy_lane_s32( + self, + a: int32x2_t, + b: int32x2_t, + ) -> int32x2_t { + unsafe { vcopy_lane_s32::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_u8`]."] + #[inline(always)] + pub fn vcopy_lane_u8( + self, + a: uint8x8_t, + b: uint8x8_t, + ) -> uint8x8_t { + unsafe { vcopy_lane_u8::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_u16`]."] + #[inline(always)] + pub fn vcopy_lane_u16( + self, + a: uint16x4_t, + b: uint16x4_t, + ) -> uint16x4_t { + unsafe { vcopy_lane_u16::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_u32`]."] + #[inline(always)] + pub fn vcopy_lane_u32( + self, + a: uint32x2_t, + b: uint32x2_t, + ) -> uint32x2_t { + unsafe { vcopy_lane_u32::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_p8`]."] + #[inline(always)] + pub fn vcopy_lane_p8( + self, + a: poly8x8_t, + b: poly8x8_t, + ) -> poly8x8_t { + unsafe { vcopy_lane_p8::(a, b) } + } + #[doc = "See [`arch::vcopy_lane_p16`]."] + #[inline(always)] + pub fn vcopy_lane_p16( + self, + a: poly16x4_t, + b: poly16x4_t, + ) -> poly16x4_t { + unsafe { vcopy_lane_p16::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_f32`]."] + #[inline(always)] + pub fn vcopy_laneq_f32( + self, + a: float32x2_t, + b: float32x4_t, + ) -> float32x2_t { + unsafe { vcopy_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_s8`]."] + #[inline(always)] + pub fn vcopy_laneq_s8( + self, + a: int8x8_t, + b: int8x16_t, + ) -> int8x8_t { + unsafe { vcopy_laneq_s8::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_s16`]."] + #[inline(always)] + pub fn vcopy_laneq_s16( + self, + a: int16x4_t, + b: int16x8_t, + ) -> int16x4_t { + unsafe { vcopy_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_s32`]."] + #[inline(always)] + pub fn vcopy_laneq_s32( + self, + a: int32x2_t, + b: int32x4_t, + ) -> int32x2_t { + unsafe { vcopy_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_u8`]."] + #[inline(always)] + pub fn vcopy_laneq_u8( + self, + a: uint8x8_t, + b: uint8x16_t, + ) -> uint8x8_t { + unsafe { vcopy_laneq_u8::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_u16`]."] + #[inline(always)] + pub fn vcopy_laneq_u16( + self, + a: uint16x4_t, + b: uint16x8_t, + ) -> uint16x4_t { + unsafe { vcopy_laneq_u16::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_u32`]."] + #[inline(always)] + pub fn vcopy_laneq_u32( + self, + a: uint32x2_t, + b: uint32x4_t, + ) -> uint32x2_t { + unsafe { vcopy_laneq_u32::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_p8`]."] + #[inline(always)] + pub fn vcopy_laneq_p8( + self, + a: poly8x8_t, + b: poly8x16_t, + ) -> poly8x8_t { + unsafe { vcopy_laneq_p8::(a, b) } + } + #[doc = "See [`arch::vcopy_laneq_p16`]."] + #[inline(always)] + pub fn vcopy_laneq_p16( + self, + a: poly16x4_t, + b: poly16x8_t, + ) -> poly16x4_t { + unsafe { vcopy_laneq_p16::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_f32`]."] + #[inline(always)] + pub fn vcopyq_lane_f32( + self, + a: float32x4_t, + b: float32x2_t, + ) -> float32x4_t { + unsafe { vcopyq_lane_f32::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_f64`]."] + #[inline(always)] + pub fn vcopyq_lane_f64( + self, + a: float64x2_t, + b: float64x1_t, + ) -> float64x2_t { + unsafe { vcopyq_lane_f64::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_s64`]."] + #[inline(always)] + pub fn vcopyq_lane_s64( + self, + a: int64x2_t, + b: int64x1_t, + ) -> int64x2_t { + unsafe { vcopyq_lane_s64::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_u64`]."] + #[inline(always)] + pub fn vcopyq_lane_u64( + self, + a: uint64x2_t, + b: uint64x1_t, + ) -> uint64x2_t { + unsafe { vcopyq_lane_u64::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_p64`]."] + #[inline(always)] + pub fn vcopyq_lane_p64( + self, + a: poly64x2_t, + b: poly64x1_t, + ) -> poly64x2_t { + unsafe { vcopyq_lane_p64::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_s8`]."] + #[inline(always)] + pub fn vcopyq_lane_s8( + self, + a: int8x16_t, + b: int8x8_t, + ) -> int8x16_t { + unsafe { vcopyq_lane_s8::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_s16`]."] + #[inline(always)] + pub fn vcopyq_lane_s16( + self, + a: int16x8_t, + b: int16x4_t, + ) -> int16x8_t { + unsafe { vcopyq_lane_s16::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_s32`]."] + #[inline(always)] + pub fn vcopyq_lane_s32( + self, + a: int32x4_t, + b: int32x2_t, + ) -> int32x4_t { + unsafe { vcopyq_lane_s32::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_u8`]."] + #[inline(always)] + pub fn vcopyq_lane_u8( + self, + a: uint8x16_t, + b: uint8x8_t, + ) -> uint8x16_t { + unsafe { vcopyq_lane_u8::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_u16`]."] + #[inline(always)] + pub fn vcopyq_lane_u16( + self, + a: uint16x8_t, + b: uint16x4_t, + ) -> uint16x8_t { + unsafe { vcopyq_lane_u16::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_u32`]."] + #[inline(always)] + pub fn vcopyq_lane_u32( + self, + a: uint32x4_t, + b: uint32x2_t, + ) -> uint32x4_t { + unsafe { vcopyq_lane_u32::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_p8`]."] + #[inline(always)] + pub fn vcopyq_lane_p8( + self, + a: poly8x16_t, + b: poly8x8_t, + ) -> poly8x16_t { + unsafe { vcopyq_lane_p8::(a, b) } + } + #[doc = "See [`arch::vcopyq_lane_p16`]."] + #[inline(always)] + pub fn vcopyq_lane_p16( + self, + a: poly16x8_t, + b: poly16x4_t, + ) -> poly16x8_t { + unsafe { vcopyq_lane_p16::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_f32`]."] + #[inline(always)] + pub fn vcopyq_laneq_f32( + self, + a: float32x4_t, + b: float32x4_t, + ) -> float32x4_t { + unsafe { vcopyq_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_f64`]."] + #[inline(always)] + pub fn vcopyq_laneq_f64( + self, + a: float64x2_t, + b: float64x2_t, + ) -> float64x2_t { + unsafe { vcopyq_laneq_f64::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_s8`]."] + #[inline(always)] + pub fn vcopyq_laneq_s8( + self, + a: int8x16_t, + b: int8x16_t, + ) -> int8x16_t { + unsafe { vcopyq_laneq_s8::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_s16`]."] + #[inline(always)] + pub fn vcopyq_laneq_s16( + self, + a: int16x8_t, + b: int16x8_t, + ) -> int16x8_t { + unsafe { vcopyq_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_s32`]."] + #[inline(always)] + pub fn vcopyq_laneq_s32( + self, + a: int32x4_t, + b: int32x4_t, + ) -> int32x4_t { + unsafe { vcopyq_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_s64`]."] + #[inline(always)] + pub fn vcopyq_laneq_s64( + self, + a: int64x2_t, + b: int64x2_t, + ) -> int64x2_t { + unsafe { vcopyq_laneq_s64::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_u8`]."] + #[inline(always)] + pub fn vcopyq_laneq_u8( + self, + a: uint8x16_t, + b: uint8x16_t, + ) -> uint8x16_t { + unsafe { vcopyq_laneq_u8::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_u16`]."] + #[inline(always)] + pub fn vcopyq_laneq_u16( + self, + a: uint16x8_t, + b: uint16x8_t, + ) -> uint16x8_t { + unsafe { vcopyq_laneq_u16::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_u32`]."] + #[inline(always)] + pub fn vcopyq_laneq_u32( + self, + a: uint32x4_t, + b: uint32x4_t, + ) -> uint32x4_t { + unsafe { vcopyq_laneq_u32::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_u64`]."] + #[inline(always)] + pub fn vcopyq_laneq_u64( + self, + a: uint64x2_t, + b: uint64x2_t, + ) -> uint64x2_t { + unsafe { vcopyq_laneq_u64::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_p8`]."] + #[inline(always)] + pub fn vcopyq_laneq_p8( + self, + a: poly8x16_t, + b: poly8x16_t, + ) -> poly8x16_t { + unsafe { vcopyq_laneq_p8::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_p16`]."] + #[inline(always)] + pub fn vcopyq_laneq_p16( + self, + a: poly16x8_t, + b: poly16x8_t, + ) -> poly16x8_t { + unsafe { vcopyq_laneq_p16::(a, b) } + } + #[doc = "See [`arch::vcopyq_laneq_p64`]."] + #[inline(always)] + pub fn vcopyq_laneq_p64( + self, + a: poly64x2_t, + b: poly64x2_t, + ) -> poly64x2_t { + unsafe { vcopyq_laneq_p64::(a, b) } + } + #[doc = "See [`arch::vcreate_f64`]."] + #[inline(always)] + pub fn vcreate_f64(self, a: u64) -> float64x1_t { + unsafe { vcreate_f64(a) } + } + #[doc = "See [`arch::vcvt_f32_f64`]."] + #[inline(always)] + pub fn vcvt_f32_f64(self, a: float64x2_t) -> float32x2_t { + unsafe { vcvt_f32_f64(a) } + } + #[doc = "See [`arch::vcvt_f64_f32`]."] + #[inline(always)] + pub fn vcvt_f64_f32(self, a: float32x2_t) -> float64x2_t { + unsafe { vcvt_f64_f32(a) } + } + #[doc = "See [`arch::vcvt_f64_s64`]."] + #[inline(always)] + pub fn vcvt_f64_s64(self, a: int64x1_t) -> float64x1_t { + unsafe { vcvt_f64_s64(a) } + } + #[doc = "See [`arch::vcvtq_f64_s64`]."] + #[inline(always)] + pub fn vcvtq_f64_s64(self, a: int64x2_t) -> float64x2_t { + unsafe { vcvtq_f64_s64(a) } + } + #[doc = "See [`arch::vcvt_f64_u64`]."] + #[inline(always)] + pub fn vcvt_f64_u64(self, a: uint64x1_t) -> float64x1_t { + unsafe { vcvt_f64_u64(a) } + } + #[doc = "See [`arch::vcvtq_f64_u64`]."] + #[inline(always)] + pub fn vcvtq_f64_u64(self, a: uint64x2_t) -> float64x2_t { + unsafe { vcvtq_f64_u64(a) } + } + #[doc = "See [`arch::vcvt_high_f32_f64`]."] + #[inline(always)] + pub fn vcvt_high_f32_f64(self, a: float32x2_t, b: float64x2_t) -> float32x4_t { + unsafe { vcvt_high_f32_f64(a, b) } + } + #[doc = "See [`arch::vcvt_high_f64_f32`]."] + #[inline(always)] + pub fn vcvt_high_f64_f32(self, a: float32x4_t) -> float64x2_t { + unsafe { vcvt_high_f64_f32(a) } + } + #[doc = "See [`arch::vcvt_n_f64_s64`]."] + #[inline(always)] + pub fn vcvt_n_f64_s64(self, a: int64x1_t) -> float64x1_t { + unsafe { vcvt_n_f64_s64::(a) } + } + #[doc = "See [`arch::vcvtq_n_f64_s64`]."] + #[inline(always)] + pub fn vcvtq_n_f64_s64(self, a: int64x2_t) -> float64x2_t { + unsafe { vcvtq_n_f64_s64::(a) } + } + #[doc = "See [`arch::vcvt_n_f64_u64`]."] + #[inline(always)] + pub fn vcvt_n_f64_u64(self, a: uint64x1_t) -> float64x1_t { + unsafe { vcvt_n_f64_u64::(a) } + } + #[doc = "See [`arch::vcvtq_n_f64_u64`]."] + #[inline(always)] + pub fn vcvtq_n_f64_u64(self, a: uint64x2_t) -> float64x2_t { + unsafe { vcvtq_n_f64_u64::(a) } + } + #[doc = "See [`arch::vcvt_n_s64_f64`]."] + #[inline(always)] + pub fn vcvt_n_s64_f64(self, a: float64x1_t) -> int64x1_t { + unsafe { vcvt_n_s64_f64::(a) } + } + #[doc = "See [`arch::vcvtq_n_s64_f64`]."] + #[inline(always)] + pub fn vcvtq_n_s64_f64(self, a: float64x2_t) -> int64x2_t { + unsafe { vcvtq_n_s64_f64::(a) } + } + #[doc = "See [`arch::vcvt_n_u64_f64`]."] + #[inline(always)] + pub fn vcvt_n_u64_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcvt_n_u64_f64::(a) } + } + #[doc = "See [`arch::vcvtq_n_u64_f64`]."] + #[inline(always)] + pub fn vcvtq_n_u64_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcvtq_n_u64_f64::(a) } + } + #[doc = "See [`arch::vcvt_s64_f64`]."] + #[inline(always)] + pub fn vcvt_s64_f64(self, a: float64x1_t) -> int64x1_t { + unsafe { vcvt_s64_f64(a) } + } + #[doc = "See [`arch::vcvtq_s64_f64`]."] + #[inline(always)] + pub fn vcvtq_s64_f64(self, a: float64x2_t) -> int64x2_t { + unsafe { vcvtq_s64_f64(a) } + } + #[doc = "See [`arch::vcvt_u64_f64`]."] + #[inline(always)] + pub fn vcvt_u64_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcvt_u64_f64(a) } + } + #[doc = "See [`arch::vcvtq_u64_f64`]."] + #[inline(always)] + pub fn vcvtq_u64_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcvtq_u64_f64(a) } + } + #[doc = "See [`arch::vcvta_s32_f32`]."] + #[inline(always)] + pub fn vcvta_s32_f32(self, a: float32x2_t) -> int32x2_t { + unsafe { vcvta_s32_f32(a) } + } + #[doc = "See [`arch::vcvtaq_s32_f32`]."] + #[inline(always)] + pub fn vcvtaq_s32_f32(self, a: float32x4_t) -> int32x4_t { + unsafe { vcvtaq_s32_f32(a) } + } + #[doc = "See [`arch::vcvta_s64_f64`]."] + #[inline(always)] + pub fn vcvta_s64_f64(self, a: float64x1_t) -> int64x1_t { + unsafe { vcvta_s64_f64(a) } + } + #[doc = "See [`arch::vcvtaq_s64_f64`]."] + #[inline(always)] + pub fn vcvtaq_s64_f64(self, a: float64x2_t) -> int64x2_t { + unsafe { vcvtaq_s64_f64(a) } + } + #[doc = "See [`arch::vcvta_u32_f32`]."] + #[inline(always)] + pub fn vcvta_u32_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcvta_u32_f32(a) } + } + #[doc = "See [`arch::vcvtaq_u32_f32`]."] + #[inline(always)] + pub fn vcvtaq_u32_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcvtaq_u32_f32(a) } + } + #[doc = "See [`arch::vcvta_u64_f64`]."] + #[inline(always)] + pub fn vcvta_u64_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcvta_u64_f64(a) } + } + #[doc = "See [`arch::vcvtaq_u64_f64`]."] + #[inline(always)] + pub fn vcvtaq_u64_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcvtaq_u64_f64(a) } + } + #[doc = "See [`arch::vcvtas_s32_f32`]."] + #[inline(always)] + pub fn vcvtas_s32_f32(self, a: f32) -> i32 { + unsafe { vcvtas_s32_f32(a) } + } + #[doc = "See [`arch::vcvtad_s64_f64`]."] + #[inline(always)] + pub fn vcvtad_s64_f64(self, a: f64) -> i64 { + unsafe { vcvtad_s64_f64(a) } + } + #[doc = "See [`arch::vcvtas_u32_f32`]."] + #[inline(always)] + pub fn vcvtas_u32_f32(self, a: f32) -> u32 { + unsafe { vcvtas_u32_f32(a) } + } + #[doc = "See [`arch::vcvtad_u64_f64`]."] + #[inline(always)] + pub fn vcvtad_u64_f64(self, a: f64) -> u64 { + unsafe { vcvtad_u64_f64(a) } + } + #[doc = "See [`arch::vcvtd_f64_s64`]."] + #[inline(always)] + pub fn vcvtd_f64_s64(self, a: i64) -> f64 { + unsafe { vcvtd_f64_s64(a) } + } + #[doc = "See [`arch::vcvts_f32_s32`]."] + #[inline(always)] + pub fn vcvts_f32_s32(self, a: i32) -> f32 { + unsafe { vcvts_f32_s32(a) } + } + #[doc = "See [`arch::vcvtm_s32_f32`]."] + #[inline(always)] + pub fn vcvtm_s32_f32(self, a: float32x2_t) -> int32x2_t { + unsafe { vcvtm_s32_f32(a) } + } + #[doc = "See [`arch::vcvtmq_s32_f32`]."] + #[inline(always)] + pub fn vcvtmq_s32_f32(self, a: float32x4_t) -> int32x4_t { + unsafe { vcvtmq_s32_f32(a) } + } + #[doc = "See [`arch::vcvtm_s64_f64`]."] + #[inline(always)] + pub fn vcvtm_s64_f64(self, a: float64x1_t) -> int64x1_t { + unsafe { vcvtm_s64_f64(a) } + } + #[doc = "See [`arch::vcvtmq_s64_f64`]."] + #[inline(always)] + pub fn vcvtmq_s64_f64(self, a: float64x2_t) -> int64x2_t { + unsafe { vcvtmq_s64_f64(a) } + } + #[doc = "See [`arch::vcvtm_u32_f32`]."] + #[inline(always)] + pub fn vcvtm_u32_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcvtm_u32_f32(a) } + } + #[doc = "See [`arch::vcvtmq_u32_f32`]."] + #[inline(always)] + pub fn vcvtmq_u32_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcvtmq_u32_f32(a) } + } + #[doc = "See [`arch::vcvtm_u64_f64`]."] + #[inline(always)] + pub fn vcvtm_u64_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcvtm_u64_f64(a) } + } + #[doc = "See [`arch::vcvtmq_u64_f64`]."] + #[inline(always)] + pub fn vcvtmq_u64_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcvtmq_u64_f64(a) } + } + #[doc = "See [`arch::vcvtms_s32_f32`]."] + #[inline(always)] + pub fn vcvtms_s32_f32(self, a: f32) -> i32 { + unsafe { vcvtms_s32_f32(a) } + } + #[doc = "See [`arch::vcvtmd_s64_f64`]."] + #[inline(always)] + pub fn vcvtmd_s64_f64(self, a: f64) -> i64 { + unsafe { vcvtmd_s64_f64(a) } + } + #[doc = "See [`arch::vcvtms_u32_f32`]."] + #[inline(always)] + pub fn vcvtms_u32_f32(self, a: f32) -> u32 { + unsafe { vcvtms_u32_f32(a) } + } + #[doc = "See [`arch::vcvtmd_u64_f64`]."] + #[inline(always)] + pub fn vcvtmd_u64_f64(self, a: f64) -> u64 { + unsafe { vcvtmd_u64_f64(a) } + } + #[doc = "See [`arch::vcvtn_s32_f32`]."] + #[inline(always)] + pub fn vcvtn_s32_f32(self, a: float32x2_t) -> int32x2_t { + unsafe { vcvtn_s32_f32(a) } + } + #[doc = "See [`arch::vcvtnq_s32_f32`]."] + #[inline(always)] + pub fn vcvtnq_s32_f32(self, a: float32x4_t) -> int32x4_t { + unsafe { vcvtnq_s32_f32(a) } + } + #[doc = "See [`arch::vcvtn_s64_f64`]."] + #[inline(always)] + pub fn vcvtn_s64_f64(self, a: float64x1_t) -> int64x1_t { + unsafe { vcvtn_s64_f64(a) } + } + #[doc = "See [`arch::vcvtnq_s64_f64`]."] + #[inline(always)] + pub fn vcvtnq_s64_f64(self, a: float64x2_t) -> int64x2_t { + unsafe { vcvtnq_s64_f64(a) } + } + #[doc = "See [`arch::vcvtn_u32_f32`]."] + #[inline(always)] + pub fn vcvtn_u32_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcvtn_u32_f32(a) } + } + #[doc = "See [`arch::vcvtnq_u32_f32`]."] + #[inline(always)] + pub fn vcvtnq_u32_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcvtnq_u32_f32(a) } + } + #[doc = "See [`arch::vcvtn_u64_f64`]."] + #[inline(always)] + pub fn vcvtn_u64_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcvtn_u64_f64(a) } + } + #[doc = "See [`arch::vcvtnq_u64_f64`]."] + #[inline(always)] + pub fn vcvtnq_u64_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcvtnq_u64_f64(a) } + } + #[doc = "See [`arch::vcvtns_s32_f32`]."] + #[inline(always)] + pub fn vcvtns_s32_f32(self, a: f32) -> i32 { + unsafe { vcvtns_s32_f32(a) } + } + #[doc = "See [`arch::vcvtnd_s64_f64`]."] + #[inline(always)] + pub fn vcvtnd_s64_f64(self, a: f64) -> i64 { + unsafe { vcvtnd_s64_f64(a) } + } + #[doc = "See [`arch::vcvtns_u32_f32`]."] + #[inline(always)] + pub fn vcvtns_u32_f32(self, a: f32) -> u32 { + unsafe { vcvtns_u32_f32(a) } + } + #[doc = "See [`arch::vcvtnd_u64_f64`]."] + #[inline(always)] + pub fn vcvtnd_u64_f64(self, a: f64) -> u64 { + unsafe { vcvtnd_u64_f64(a) } + } + #[doc = "See [`arch::vcvtp_s32_f32`]."] + #[inline(always)] + pub fn vcvtp_s32_f32(self, a: float32x2_t) -> int32x2_t { + unsafe { vcvtp_s32_f32(a) } + } + #[doc = "See [`arch::vcvtpq_s32_f32`]."] + #[inline(always)] + pub fn vcvtpq_s32_f32(self, a: float32x4_t) -> int32x4_t { + unsafe { vcvtpq_s32_f32(a) } + } + #[doc = "See [`arch::vcvtp_s64_f64`]."] + #[inline(always)] + pub fn vcvtp_s64_f64(self, a: float64x1_t) -> int64x1_t { + unsafe { vcvtp_s64_f64(a) } + } + #[doc = "See [`arch::vcvtpq_s64_f64`]."] + #[inline(always)] + pub fn vcvtpq_s64_f64(self, a: float64x2_t) -> int64x2_t { + unsafe { vcvtpq_s64_f64(a) } + } + #[doc = "See [`arch::vcvtp_u32_f32`]."] + #[inline(always)] + pub fn vcvtp_u32_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcvtp_u32_f32(a) } + } + #[doc = "See [`arch::vcvtpq_u32_f32`]."] + #[inline(always)] + pub fn vcvtpq_u32_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcvtpq_u32_f32(a) } + } + #[doc = "See [`arch::vcvtp_u64_f64`]."] + #[inline(always)] + pub fn vcvtp_u64_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vcvtp_u64_f64(a) } + } + #[doc = "See [`arch::vcvtpq_u64_f64`]."] + #[inline(always)] + pub fn vcvtpq_u64_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vcvtpq_u64_f64(a) } + } + #[doc = "See [`arch::vcvtps_s32_f32`]."] + #[inline(always)] + pub fn vcvtps_s32_f32(self, a: f32) -> i32 { + unsafe { vcvtps_s32_f32(a) } + } + #[doc = "See [`arch::vcvtpd_s64_f64`]."] + #[inline(always)] + pub fn vcvtpd_s64_f64(self, a: f64) -> i64 { + unsafe { vcvtpd_s64_f64(a) } + } + #[doc = "See [`arch::vcvtps_u32_f32`]."] + #[inline(always)] + pub fn vcvtps_u32_f32(self, a: f32) -> u32 { + unsafe { vcvtps_u32_f32(a) } + } + #[doc = "See [`arch::vcvtpd_u64_f64`]."] + #[inline(always)] + pub fn vcvtpd_u64_f64(self, a: f64) -> u64 { + unsafe { vcvtpd_u64_f64(a) } + } + #[doc = "See [`arch::vcvts_f32_u32`]."] + #[inline(always)] + pub fn vcvts_f32_u32(self, a: u32) -> f32 { + unsafe { vcvts_f32_u32(a) } + } + #[doc = "See [`arch::vcvtd_f64_u64`]."] + #[inline(always)] + pub fn vcvtd_f64_u64(self, a: u64) -> f64 { + unsafe { vcvtd_f64_u64(a) } + } + #[doc = "See [`arch::vcvts_n_f32_s32`]."] + #[inline(always)] + pub fn vcvts_n_f32_s32(self, a: i32) -> f32 { + unsafe { vcvts_n_f32_s32::(a) } + } + #[doc = "See [`arch::vcvtd_n_f64_s64`]."] + #[inline(always)] + pub fn vcvtd_n_f64_s64(self, a: i64) -> f64 { + unsafe { vcvtd_n_f64_s64::(a) } + } + #[doc = "See [`arch::vcvts_n_f32_u32`]."] + #[inline(always)] + pub fn vcvts_n_f32_u32(self, a: u32) -> f32 { + unsafe { vcvts_n_f32_u32::(a) } + } + #[doc = "See [`arch::vcvtd_n_f64_u64`]."] + #[inline(always)] + pub fn vcvtd_n_f64_u64(self, a: u64) -> f64 { + unsafe { vcvtd_n_f64_u64::(a) } + } + #[doc = "See [`arch::vcvts_n_s32_f32`]."] + #[inline(always)] + pub fn vcvts_n_s32_f32(self, a: f32) -> i32 { + unsafe { vcvts_n_s32_f32::(a) } + } + #[doc = "See [`arch::vcvtd_n_s64_f64`]."] + #[inline(always)] + pub fn vcvtd_n_s64_f64(self, a: f64) -> i64 { + unsafe { vcvtd_n_s64_f64::(a) } + } + #[doc = "See [`arch::vcvts_n_u32_f32`]."] + #[inline(always)] + pub fn vcvts_n_u32_f32(self, a: f32) -> u32 { + unsafe { vcvts_n_u32_f32::(a) } + } + #[doc = "See [`arch::vcvtd_n_u64_f64`]."] + #[inline(always)] + pub fn vcvtd_n_u64_f64(self, a: f64) -> u64 { + unsafe { vcvtd_n_u64_f64::(a) } + } + #[doc = "See [`arch::vcvts_s32_f32`]."] + #[inline(always)] + pub fn vcvts_s32_f32(self, a: f32) -> i32 { + unsafe { vcvts_s32_f32(a) } + } + #[doc = "See [`arch::vcvtd_s64_f64`]."] + #[inline(always)] + pub fn vcvtd_s64_f64(self, a: f64) -> i64 { + unsafe { vcvtd_s64_f64(a) } + } + #[doc = "See [`arch::vcvts_u32_f32`]."] + #[inline(always)] + pub fn vcvts_u32_f32(self, a: f32) -> u32 { + unsafe { vcvts_u32_f32(a) } + } + #[doc = "See [`arch::vcvtd_u64_f64`]."] + #[inline(always)] + pub fn vcvtd_u64_f64(self, a: f64) -> u64 { + unsafe { vcvtd_u64_f64(a) } + } + #[doc = "See [`arch::vcvtx_f32_f64`]."] + #[inline(always)] + pub fn vcvtx_f32_f64(self, a: float64x2_t) -> float32x2_t { + unsafe { vcvtx_f32_f64(a) } + } + #[doc = "See [`arch::vcvtx_high_f32_f64`]."] + #[inline(always)] + pub fn vcvtx_high_f32_f64(self, a: float32x2_t, b: float64x2_t) -> float32x4_t { + unsafe { vcvtx_high_f32_f64(a, b) } + } + #[doc = "See [`arch::vcvtxd_f32_f64`]."] + #[inline(always)] + pub fn vcvtxd_f32_f64(self, a: f64) -> f32 { + unsafe { vcvtxd_f32_f64(a) } + } + #[doc = "See [`arch::vdiv_f32`]."] + #[inline(always)] + pub fn vdiv_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vdiv_f32(a, b) } + } + #[doc = "See [`arch::vdivq_f32`]."] + #[inline(always)] + pub fn vdivq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vdivq_f32(a, b) } + } + #[doc = "See [`arch::vdiv_f64`]."] + #[inline(always)] + pub fn vdiv_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vdiv_f64(a, b) } + } + #[doc = "See [`arch::vdivq_f64`]."] + #[inline(always)] + pub fn vdivq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vdivq_f64(a, b) } + } + #[doc = "See [`arch::vdup_lane_f64`]."] + #[inline(always)] + pub fn vdup_lane_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vdup_lane_f64::(a) } + } + #[doc = "See [`arch::vdup_lane_p64`]."] + #[inline(always)] + pub fn vdup_lane_p64(self, a: poly64x1_t) -> poly64x1_t { + unsafe { vdup_lane_p64::(a) } + } + #[doc = "See [`arch::vdup_laneq_f64`]."] + #[inline(always)] + pub fn vdup_laneq_f64(self, a: float64x2_t) -> float64x1_t { + unsafe { vdup_laneq_f64::(a) } + } + #[doc = "See [`arch::vdup_laneq_p64`]."] + #[inline(always)] + pub fn vdup_laneq_p64(self, a: poly64x2_t) -> poly64x1_t { + unsafe { vdup_laneq_p64::(a) } + } + #[doc = "See [`arch::vdupb_lane_s8`]."] + #[inline(always)] + pub fn vdupb_lane_s8(self, a: int8x8_t) -> i8 { + unsafe { vdupb_lane_s8::(a) } + } + #[doc = "See [`arch::vduph_laneq_s16`]."] + #[inline(always)] + pub fn vduph_laneq_s16(self, a: int16x8_t) -> i16 { + unsafe { vduph_laneq_s16::(a) } + } + #[doc = "See [`arch::vdupb_lane_u8`]."] + #[inline(always)] + pub fn vdupb_lane_u8(self, a: uint8x8_t) -> u8 { + unsafe { vdupb_lane_u8::(a) } + } + #[doc = "See [`arch::vduph_laneq_u16`]."] + #[inline(always)] + pub fn vduph_laneq_u16(self, a: uint16x8_t) -> u16 { + unsafe { vduph_laneq_u16::(a) } + } + #[doc = "See [`arch::vdupb_lane_p8`]."] + #[inline(always)] + pub fn vdupb_lane_p8(self, a: poly8x8_t) -> p8 { + unsafe { vdupb_lane_p8::(a) } + } + #[doc = "See [`arch::vduph_laneq_p16`]."] + #[inline(always)] + pub fn vduph_laneq_p16(self, a: poly16x8_t) -> p16 { + unsafe { vduph_laneq_p16::(a) } + } + #[doc = "See [`arch::vdupb_laneq_s8`]."] + #[inline(always)] + pub fn vdupb_laneq_s8(self, a: int8x16_t) -> i8 { + unsafe { vdupb_laneq_s8::(a) } + } + #[doc = "See [`arch::vdupb_laneq_u8`]."] + #[inline(always)] + pub fn vdupb_laneq_u8(self, a: uint8x16_t) -> u8 { + unsafe { vdupb_laneq_u8::(a) } + } + #[doc = "See [`arch::vdupb_laneq_p8`]."] + #[inline(always)] + pub fn vdupb_laneq_p8(self, a: poly8x16_t) -> p8 { + unsafe { vdupb_laneq_p8::(a) } + } + #[doc = "See [`arch::vdupd_lane_f64`]."] + #[inline(always)] + pub fn vdupd_lane_f64(self, a: float64x1_t) -> f64 { + unsafe { vdupd_lane_f64::(a) } + } + #[doc = "See [`arch::vdupd_lane_s64`]."] + #[inline(always)] + pub fn vdupd_lane_s64(self, a: int64x1_t) -> i64 { + unsafe { vdupd_lane_s64::(a) } + } + #[doc = "See [`arch::vdupd_lane_u64`]."] + #[inline(always)] + pub fn vdupd_lane_u64(self, a: uint64x1_t) -> u64 { + unsafe { vdupd_lane_u64::(a) } + } + #[doc = "See [`arch::vdupq_lane_f64`]."] + #[inline(always)] + pub fn vdupq_lane_f64(self, a: float64x1_t) -> float64x2_t { + unsafe { vdupq_lane_f64::(a) } + } + #[doc = "See [`arch::vdupq_lane_p64`]."] + #[inline(always)] + pub fn vdupq_lane_p64(self, a: poly64x1_t) -> poly64x2_t { + unsafe { vdupq_lane_p64::(a) } + } + #[doc = "See [`arch::vdupq_laneq_f64`]."] + #[inline(always)] + pub fn vdupq_laneq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vdupq_laneq_f64::(a) } + } + #[doc = "See [`arch::vdupq_laneq_p64`]."] + #[inline(always)] + pub fn vdupq_laneq_p64(self, a: poly64x2_t) -> poly64x2_t { + unsafe { vdupq_laneq_p64::(a) } + } + #[doc = "See [`arch::vdups_lane_f32`]."] + #[inline(always)] + pub fn vdups_lane_f32(self, a: float32x2_t) -> f32 { + unsafe { vdups_lane_f32::(a) } + } + #[doc = "See [`arch::vdupd_laneq_f64`]."] + #[inline(always)] + pub fn vdupd_laneq_f64(self, a: float64x2_t) -> f64 { + unsafe { vdupd_laneq_f64::(a) } + } + #[doc = "See [`arch::vdups_lane_s32`]."] + #[inline(always)] + pub fn vdups_lane_s32(self, a: int32x2_t) -> i32 { + unsafe { vdups_lane_s32::(a) } + } + #[doc = "See [`arch::vdupd_laneq_s64`]."] + #[inline(always)] + pub fn vdupd_laneq_s64(self, a: int64x2_t) -> i64 { + unsafe { vdupd_laneq_s64::(a) } + } + #[doc = "See [`arch::vdups_lane_u32`]."] + #[inline(always)] + pub fn vdups_lane_u32(self, a: uint32x2_t) -> u32 { + unsafe { vdups_lane_u32::(a) } + } + #[doc = "See [`arch::vdupd_laneq_u64`]."] + #[inline(always)] + pub fn vdupd_laneq_u64(self, a: uint64x2_t) -> u64 { + unsafe { vdupd_laneq_u64::(a) } + } + #[doc = "See [`arch::vdups_laneq_f32`]."] + #[inline(always)] + pub fn vdups_laneq_f32(self, a: float32x4_t) -> f32 { + unsafe { vdups_laneq_f32::(a) } + } + #[doc = "See [`arch::vduph_lane_s16`]."] + #[inline(always)] + pub fn vduph_lane_s16(self, a: int16x4_t) -> i16 { + unsafe { vduph_lane_s16::(a) } + } + #[doc = "See [`arch::vdups_laneq_s32`]."] + #[inline(always)] + pub fn vdups_laneq_s32(self, a: int32x4_t) -> i32 { + unsafe { vdups_laneq_s32::(a) } + } + #[doc = "See [`arch::vduph_lane_u16`]."] + #[inline(always)] + pub fn vduph_lane_u16(self, a: uint16x4_t) -> u16 { + unsafe { vduph_lane_u16::(a) } + } + #[doc = "See [`arch::vdups_laneq_u32`]."] + #[inline(always)] + pub fn vdups_laneq_u32(self, a: uint32x4_t) -> u32 { + unsafe { vdups_laneq_u32::(a) } + } + #[doc = "See [`arch::vduph_lane_p16`]."] + #[inline(always)] + pub fn vduph_lane_p16(self, a: poly16x4_t) -> p16 { + unsafe { vduph_lane_p16::(a) } + } + #[doc = "See [`arch::veor3q_s8`]."] + #[inline(always)] + pub fn veor3q_s8(self, a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { veor3q_s8(a, b, c) } + } + #[doc = "See [`arch::veor3q_s16`]."] + #[inline(always)] + pub fn veor3q_s16(self, a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { veor3q_s16(a, b, c) } + } + #[doc = "See [`arch::veor3q_s32`]."] + #[inline(always)] + pub fn veor3q_s32(self, a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { veor3q_s32(a, b, c) } + } + #[doc = "See [`arch::veor3q_s64`]."] + #[inline(always)] + pub fn veor3q_s64(self, a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + unsafe { veor3q_s64(a, b, c) } + } + #[doc = "See [`arch::veor3q_u8`]."] + #[inline(always)] + pub fn veor3q_u8(self, a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { veor3q_u8(a, b, c) } + } + #[doc = "See [`arch::veor3q_u16`]."] + #[inline(always)] + pub fn veor3q_u16(self, a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { veor3q_u16(a, b, c) } + } + #[doc = "See [`arch::veor3q_u32`]."] + #[inline(always)] + pub fn veor3q_u32(self, a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { veor3q_u32(a, b, c) } + } + #[doc = "See [`arch::veor3q_u64`]."] + #[inline(always)] + pub fn veor3q_u64(self, a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe { veor3q_u64(a, b, c) } + } + #[doc = "See [`arch::vextq_f64`]."] + #[inline(always)] + pub fn vextq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vextq_f64::(a, b) } + } + #[doc = "See [`arch::vextq_p64`]."] + #[inline(always)] + pub fn vextq_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vextq_p64::(a, b) } + } + #[doc = "See [`arch::vfma_f64`]."] + #[inline(always)] + pub fn vfma_f64(self, a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + unsafe { vfma_f64(a, b, c) } + } + #[doc = "See [`arch::vfma_lane_f32`]."] + #[inline(always)] + pub fn vfma_lane_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + ) -> float32x2_t { + unsafe { vfma_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vfma_laneq_f32`]."] + #[inline(always)] + pub fn vfma_laneq_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, + ) -> float32x2_t { + unsafe { vfma_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmaq_lane_f32`]."] + #[inline(always)] + pub fn vfmaq_lane_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, + ) -> float32x4_t { + unsafe { vfmaq_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmaq_laneq_f32`]."] + #[inline(always)] + pub fn vfmaq_laneq_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + ) -> float32x4_t { + unsafe { vfmaq_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmaq_laneq_f64`]."] + #[inline(always)] + pub fn vfmaq_laneq_f64( + self, + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + ) -> float64x2_t { + unsafe { vfmaq_laneq_f64::(a, b, c) } + } + #[doc = "See [`arch::vfma_lane_f64`]."] + #[inline(always)] + pub fn vfma_lane_f64( + self, + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + ) -> float64x1_t { + unsafe { vfma_lane_f64::(a, b, c) } + } + #[doc = "See [`arch::vfma_laneq_f64`]."] + #[inline(always)] + pub fn vfma_laneq_f64( + self, + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, + ) -> float64x1_t { + unsafe { vfma_laneq_f64::(a, b, c) } + } + #[doc = "See [`arch::vfma_n_f64`]."] + #[inline(always)] + pub fn vfma_n_f64(self, a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { + unsafe { vfma_n_f64(a, b, c) } + } + #[doc = "See [`arch::vfmad_lane_f64`]."] + #[inline(always)] + pub fn vfmad_lane_f64(self, a: f64, b: f64, c: float64x1_t) -> f64 { + unsafe { vfmad_lane_f64::(a, b, c) } + } + #[doc = "See [`arch::vfmaq_f64`]."] + #[inline(always)] + pub fn vfmaq_f64(self, a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe { vfmaq_f64(a, b, c) } + } + #[doc = "See [`arch::vfmaq_lane_f64`]."] + #[inline(always)] + pub fn vfmaq_lane_f64( + self, + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, + ) -> float64x2_t { + unsafe { vfmaq_lane_f64::(a, b, c) } + } + #[doc = "See [`arch::vfmaq_n_f64`]."] + #[inline(always)] + pub fn vfmaq_n_f64(self, a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + unsafe { vfmaq_n_f64(a, b, c) } + } + #[doc = "See [`arch::vfmas_lane_f32`]."] + #[inline(always)] + pub fn vfmas_lane_f32(self, a: f32, b: f32, c: float32x2_t) -> f32 { + unsafe { vfmas_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmas_laneq_f32`]."] + #[inline(always)] + pub fn vfmas_laneq_f32(self, a: f32, b: f32, c: float32x4_t) -> f32 { + unsafe { vfmas_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmad_laneq_f64`]."] + #[inline(always)] + pub fn vfmad_laneq_f64(self, a: f64, b: f64, c: float64x2_t) -> f64 { + unsafe { vfmad_laneq_f64::(a, b, c) } + } + #[doc = "See [`arch::vfms_f64`]."] + #[inline(always)] + pub fn vfms_f64(self, a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + unsafe { vfms_f64(a, b, c) } + } + #[doc = "See [`arch::vfms_lane_f32`]."] + #[inline(always)] + pub fn vfms_lane_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + ) -> float32x2_t { + unsafe { vfms_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vfms_laneq_f32`]."] + #[inline(always)] + pub fn vfms_laneq_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, + ) -> float32x2_t { + unsafe { vfms_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmsq_lane_f32`]."] + #[inline(always)] + pub fn vfmsq_lane_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, + ) -> float32x4_t { + unsafe { vfmsq_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmsq_laneq_f32`]."] + #[inline(always)] + pub fn vfmsq_laneq_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + ) -> float32x4_t { + unsafe { vfmsq_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmsq_laneq_f64`]."] + #[inline(always)] + pub fn vfmsq_laneq_f64( + self, + a: float64x2_t, + b: float64x2_t, + c: float64x2_t, + ) -> float64x2_t { + unsafe { vfmsq_laneq_f64::(a, b, c) } + } + #[doc = "See [`arch::vfms_lane_f64`]."] + #[inline(always)] + pub fn vfms_lane_f64( + self, + a: float64x1_t, + b: float64x1_t, + c: float64x1_t, + ) -> float64x1_t { + unsafe { vfms_lane_f64::(a, b, c) } + } + #[doc = "See [`arch::vfms_laneq_f64`]."] + #[inline(always)] + pub fn vfms_laneq_f64( + self, + a: float64x1_t, + b: float64x1_t, + c: float64x2_t, + ) -> float64x1_t { + unsafe { vfms_laneq_f64::(a, b, c) } + } + #[doc = "See [`arch::vfms_n_f64`]."] + #[inline(always)] + pub fn vfms_n_f64(self, a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { + unsafe { vfms_n_f64(a, b, c) } + } + #[doc = "See [`arch::vfmsq_f64`]."] + #[inline(always)] + pub fn vfmsq_f64(self, a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe { vfmsq_f64(a, b, c) } + } + #[doc = "See [`arch::vfmsq_lane_f64`]."] + #[inline(always)] + pub fn vfmsq_lane_f64( + self, + a: float64x2_t, + b: float64x2_t, + c: float64x1_t, + ) -> float64x2_t { + unsafe { vfmsq_lane_f64::(a, b, c) } + } + #[doc = "See [`arch::vfmsq_n_f64`]."] + #[inline(always)] + pub fn vfmsq_n_f64(self, a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { + unsafe { vfmsq_n_f64(a, b, c) } + } + #[doc = "See [`arch::vfmss_lane_f32`]."] + #[inline(always)] + pub fn vfmss_lane_f32(self, a: f32, b: f32, c: float32x2_t) -> f32 { + unsafe { vfmss_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmss_laneq_f32`]."] + #[inline(always)] + pub fn vfmss_laneq_f32(self, a: f32, b: f32, c: float32x4_t) -> f32 { + unsafe { vfmss_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vfmsd_lane_f64`]."] + #[inline(always)] + pub fn vfmsd_lane_f64(self, a: f64, b: f64, c: float64x1_t) -> f64 { + unsafe { vfmsd_lane_f64::(a, b, c) } + } + #[doc = "See [`arch::vfmsd_laneq_f64`]."] + #[inline(always)] + pub fn vfmsd_laneq_f64(self, a: f64, b: f64, c: float64x2_t) -> f64 { + unsafe { vfmsd_laneq_f64::(a, b, c) } + } + #[doc = "See [`arch::vld1_f32`]."] + #[inline(always)] + pub unsafe fn vld1_f32(self, ptr: *const f32) -> float32x2_t { + unsafe { vld1_f32(ptr) } + } + #[doc = "See [`arch::vld1q_f32`]."] + #[inline(always)] + pub unsafe fn vld1q_f32(self, ptr: *const f32) -> float32x4_t { + unsafe { vld1q_f32(ptr) } + } + #[doc = "See [`arch::vld1_f64`]."] + #[inline(always)] + pub unsafe fn vld1_f64(self, ptr: *const f64) -> float64x1_t { + unsafe { vld1_f64(ptr) } + } + #[doc = "See [`arch::vld1q_f64`]."] + #[inline(always)] + pub unsafe fn vld1q_f64(self, ptr: *const f64) -> float64x2_t { + unsafe { vld1q_f64(ptr) } + } + #[doc = "See [`arch::vld1_s8`]."] + #[inline(always)] + pub unsafe fn vld1_s8(self, ptr: *const i8) -> int8x8_t { + unsafe { vld1_s8(ptr) } + } + #[doc = "See [`arch::vld1q_s8`]."] + #[inline(always)] + pub unsafe fn vld1q_s8(self, ptr: *const i8) -> int8x16_t { + unsafe { vld1q_s8(ptr) } + } + #[doc = "See [`arch::vld1_s16`]."] + #[inline(always)] + pub unsafe fn vld1_s16(self, ptr: *const i16) -> int16x4_t { + unsafe { vld1_s16(ptr) } + } + #[doc = "See [`arch::vld1q_s16`]."] + #[inline(always)] + pub unsafe fn vld1q_s16(self, ptr: *const i16) -> int16x8_t { + unsafe { vld1q_s16(ptr) } + } + #[doc = "See [`arch::vld1_s32`]."] + #[inline(always)] + pub unsafe fn vld1_s32(self, ptr: *const i32) -> int32x2_t { + unsafe { vld1_s32(ptr) } + } + #[doc = "See [`arch::vld1q_s32`]."] + #[inline(always)] + pub unsafe fn vld1q_s32(self, ptr: *const i32) -> int32x4_t { + unsafe { vld1q_s32(ptr) } + } + #[doc = "See [`arch::vld1_s64`]."] + #[inline(always)] + pub unsafe fn vld1_s64(self, ptr: *const i64) -> int64x1_t { + unsafe { vld1_s64(ptr) } + } + #[doc = "See [`arch::vld1q_s64`]."] + #[inline(always)] + pub unsafe fn vld1q_s64(self, ptr: *const i64) -> int64x2_t { + unsafe { vld1q_s64(ptr) } + } + #[doc = "See [`arch::vld1_u8`]."] + #[inline(always)] + pub unsafe fn vld1_u8(self, ptr: *const u8) -> uint8x8_t { + unsafe { vld1_u8(ptr) } + } + #[doc = "See [`arch::vld1q_u8`]."] + #[inline(always)] + pub unsafe fn vld1q_u8(self, ptr: *const u8) -> uint8x16_t { + unsafe { vld1q_u8(ptr) } + } + #[doc = "See [`arch::vld1_u16`]."] + #[inline(always)] + pub unsafe fn vld1_u16(self, ptr: *const u16) -> uint16x4_t { + unsafe { vld1_u16(ptr) } + } + #[doc = "See [`arch::vld1q_u16`]."] + #[inline(always)] + pub unsafe fn vld1q_u16(self, ptr: *const u16) -> uint16x8_t { + unsafe { vld1q_u16(ptr) } + } + #[doc = "See [`arch::vld1_u32`]."] + #[inline(always)] + pub unsafe fn vld1_u32(self, ptr: *const u32) -> uint32x2_t { + unsafe { vld1_u32(ptr) } + } + #[doc = "See [`arch::vld1q_u32`]."] + #[inline(always)] + pub unsafe fn vld1q_u32(self, ptr: *const u32) -> uint32x4_t { + unsafe { vld1q_u32(ptr) } + } + #[doc = "See [`arch::vld1_u64`]."] + #[inline(always)] + pub unsafe fn vld1_u64(self, ptr: *const u64) -> uint64x1_t { + unsafe { vld1_u64(ptr) } + } + #[doc = "See [`arch::vld1q_u64`]."] + #[inline(always)] + pub unsafe fn vld1q_u64(self, ptr: *const u64) -> uint64x2_t { + unsafe { vld1q_u64(ptr) } + } + #[doc = "See [`arch::vld1_p8`]."] + #[inline(always)] + pub unsafe fn vld1_p8(self, ptr: *const p8) -> poly8x8_t { + unsafe { vld1_p8(ptr) } + } + #[doc = "See [`arch::vld1q_p8`]."] + #[inline(always)] + pub unsafe fn vld1q_p8(self, ptr: *const p8) -> poly8x16_t { + unsafe { vld1q_p8(ptr) } + } + #[doc = "See [`arch::vld1_p16`]."] + #[inline(always)] + pub unsafe fn vld1_p16(self, ptr: *const p16) -> poly16x4_t { + unsafe { vld1_p16(ptr) } + } + #[doc = "See [`arch::vld1q_p16`]."] + #[inline(always)] + pub unsafe fn vld1q_p16(self, ptr: *const p16) -> poly16x8_t { + unsafe { vld1q_p16(ptr) } + } + #[doc = "See [`arch::vld1_p64`]."] + #[inline(always)] + pub unsafe fn vld1_p64(self, ptr: *const p64) -> poly64x1_t { + unsafe { vld1_p64(ptr) } + } + #[doc = "See [`arch::vld1q_p64`]."] + #[inline(always)] + pub unsafe fn vld1q_p64(self, ptr: *const p64) -> poly64x2_t { + unsafe { vld1q_p64(ptr) } + } + #[doc = "See [`arch::vld1_f64_x2`]."] + #[inline(always)] + pub unsafe fn vld1_f64_x2(self, a: *const f64) -> float64x1x2_t { + unsafe { vld1_f64_x2(a) } + } + #[doc = "See [`arch::vld1_f64_x3`]."] + #[inline(always)] + pub unsafe fn vld1_f64_x3(self, a: *const f64) -> float64x1x3_t { + unsafe { vld1_f64_x3(a) } + } + #[doc = "See [`arch::vld1_f64_x4`]."] + #[inline(always)] + pub unsafe fn vld1_f64_x4(self, a: *const f64) -> float64x1x4_t { + unsafe { vld1_f64_x4(a) } + } + #[doc = "See [`arch::vld1q_f64_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_f64_x2(self, a: *const f64) -> float64x2x2_t { + unsafe { vld1q_f64_x2(a) } + } + #[doc = "See [`arch::vld1q_f64_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_f64_x3(self, a: *const f64) -> float64x2x3_t { + unsafe { vld1q_f64_x3(a) } + } + #[doc = "See [`arch::vld1q_f64_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_f64_x4(self, a: *const f64) -> float64x2x4_t { + unsafe { vld1q_f64_x4(a) } + } + #[doc = "See [`arch::vld2_dup_f64`]."] + #[inline(always)] + pub unsafe fn vld2_dup_f64(self, a: *const f64) -> float64x1x2_t { + unsafe { vld2_dup_f64(a) } + } + #[doc = "See [`arch::vld2q_dup_f64`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_f64(self, a: *const f64) -> float64x2x2_t { + unsafe { vld2q_dup_f64(a) } + } + #[doc = "See [`arch::vld2q_dup_s64`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_s64(self, a: *const i64) -> int64x2x2_t { + unsafe { vld2q_dup_s64(a) } + } + #[doc = "See [`arch::vld2_f64`]."] + #[inline(always)] + pub unsafe fn vld2_f64(self, a: *const f64) -> float64x1x2_t { + unsafe { vld2_f64(a) } + } + #[doc = "See [`arch::vld2_lane_f64`]."] + #[inline(always)] + pub unsafe fn vld2_lane_f64( + self, + a: *const f64, + b: float64x1x2_t, + ) -> float64x1x2_t { + unsafe { vld2_lane_f64::(a, b) } + } + #[doc = "See [`arch::vld2_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld2_lane_s64( + self, + a: *const i64, + b: int64x1x2_t, + ) -> int64x1x2_t { + unsafe { vld2_lane_s64::(a, b) } + } + #[doc = "See [`arch::vld2_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld2_lane_p64( + self, + a: *const p64, + b: poly64x1x2_t, + ) -> poly64x1x2_t { + unsafe { vld2_lane_p64::(a, b) } + } + #[doc = "See [`arch::vld2_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld2_lane_u64( + self, + a: *const u64, + b: uint64x1x2_t, + ) -> uint64x1x2_t { + unsafe { vld2_lane_u64::(a, b) } + } + #[doc = "See [`arch::vld2q_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_p64(self, a: *const p64) -> poly64x2x2_t { + unsafe { vld2q_dup_p64(a) } + } + #[doc = "See [`arch::vld2q_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_u64(self, a: *const u64) -> uint64x2x2_t { + unsafe { vld2q_dup_u64(a) } + } + #[doc = "See [`arch::vld2q_f64`]."] + #[inline(always)] + pub unsafe fn vld2q_f64(self, a: *const f64) -> float64x2x2_t { + unsafe { vld2q_f64(a) } + } + #[doc = "See [`arch::vld2q_s64`]."] + #[inline(always)] + pub unsafe fn vld2q_s64(self, a: *const i64) -> int64x2x2_t { + unsafe { vld2q_s64(a) } + } + #[doc = "See [`arch::vld2q_lane_f64`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_f64( + self, + a: *const f64, + b: float64x2x2_t, + ) -> float64x2x2_t { + unsafe { vld2q_lane_f64::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_s8( + self, + a: *const i8, + b: int8x16x2_t, + ) -> int8x16x2_t { + unsafe { vld2q_lane_s8::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_s64( + self, + a: *const i64, + b: int64x2x2_t, + ) -> int64x2x2_t { + unsafe { vld2q_lane_s64::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_p64( + self, + a: *const p64, + b: poly64x2x2_t, + ) -> poly64x2x2_t { + unsafe { vld2q_lane_p64::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_u8( + self, + a: *const u8, + b: uint8x16x2_t, + ) -> uint8x16x2_t { + unsafe { vld2q_lane_u8::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_u64( + self, + a: *const u64, + b: uint64x2x2_t, + ) -> uint64x2x2_t { + unsafe { vld2q_lane_u64::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_p8( + self, + a: *const p8, + b: poly8x16x2_t, + ) -> poly8x16x2_t { + unsafe { vld2q_lane_p8::(a, b) } + } + #[doc = "See [`arch::vld2q_p64`]."] + #[inline(always)] + pub unsafe fn vld2q_p64(self, a: *const p64) -> poly64x2x2_t { + unsafe { vld2q_p64(a) } + } + #[doc = "See [`arch::vld2q_u64`]."] + #[inline(always)] + pub unsafe fn vld2q_u64(self, a: *const u64) -> uint64x2x2_t { + unsafe { vld2q_u64(a) } + } + #[doc = "See [`arch::vld3_dup_f64`]."] + #[inline(always)] + pub unsafe fn vld3_dup_f64(self, a: *const f64) -> float64x1x3_t { + unsafe { vld3_dup_f64(a) } + } + #[doc = "See [`arch::vld3q_dup_f64`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_f64(self, a: *const f64) -> float64x2x3_t { + unsafe { vld3q_dup_f64(a) } + } + #[doc = "See [`arch::vld3q_dup_s64`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_s64(self, a: *const i64) -> int64x2x3_t { + unsafe { vld3q_dup_s64(a) } + } + #[doc = "See [`arch::vld3_f64`]."] + #[inline(always)] + pub unsafe fn vld3_f64(self, a: *const f64) -> float64x1x3_t { + unsafe { vld3_f64(a) } + } + #[doc = "See [`arch::vld3_lane_f64`]."] + #[inline(always)] + pub unsafe fn vld3_lane_f64( + self, + a: *const f64, + b: float64x1x3_t, + ) -> float64x1x3_t { + unsafe { vld3_lane_f64::(a, b) } + } + #[doc = "See [`arch::vld3_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld3_lane_p64( + self, + a: *const p64, + b: poly64x1x3_t, + ) -> poly64x1x3_t { + unsafe { vld3_lane_p64::(a, b) } + } + #[doc = "See [`arch::vld3_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld3_lane_s64( + self, + a: *const i64, + b: int64x1x3_t, + ) -> int64x1x3_t { + unsafe { vld3_lane_s64::(a, b) } + } + #[doc = "See [`arch::vld3_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld3_lane_u64( + self, + a: *const u64, + b: uint64x1x3_t, + ) -> uint64x1x3_t { + unsafe { vld3_lane_u64::(a, b) } + } + #[doc = "See [`arch::vld3q_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_p64(self, a: *const p64) -> poly64x2x3_t { + unsafe { vld3q_dup_p64(a) } + } + #[doc = "See [`arch::vld3q_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_u64(self, a: *const u64) -> uint64x2x3_t { + unsafe { vld3q_dup_u64(a) } + } + #[doc = "See [`arch::vld3q_f64`]."] + #[inline(always)] + pub unsafe fn vld3q_f64(self, a: *const f64) -> float64x2x3_t { + unsafe { vld3q_f64(a) } + } + #[doc = "See [`arch::vld3q_s64`]."] + #[inline(always)] + pub unsafe fn vld3q_s64(self, a: *const i64) -> int64x2x3_t { + unsafe { vld3q_s64(a) } + } + #[doc = "See [`arch::vld3q_lane_f64`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_f64( + self, + a: *const f64, + b: float64x2x3_t, + ) -> float64x2x3_t { + unsafe { vld3q_lane_f64::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_p64( + self, + a: *const p64, + b: poly64x2x3_t, + ) -> poly64x2x3_t { + unsafe { vld3q_lane_p64::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_s8( + self, + a: *const i8, + b: int8x16x3_t, + ) -> int8x16x3_t { + unsafe { vld3q_lane_s8::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_s64( + self, + a: *const i64, + b: int64x2x3_t, + ) -> int64x2x3_t { + unsafe { vld3q_lane_s64::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_u8( + self, + a: *const u8, + b: uint8x16x3_t, + ) -> uint8x16x3_t { + unsafe { vld3q_lane_u8::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_u64( + self, + a: *const u64, + b: uint64x2x3_t, + ) -> uint64x2x3_t { + unsafe { vld3q_lane_u64::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_p8( + self, + a: *const p8, + b: poly8x16x3_t, + ) -> poly8x16x3_t { + unsafe { vld3q_lane_p8::(a, b) } + } + #[doc = "See [`arch::vld3q_p64`]."] + #[inline(always)] + pub unsafe fn vld3q_p64(self, a: *const p64) -> poly64x2x3_t { + unsafe { vld3q_p64(a) } + } + #[doc = "See [`arch::vld3q_u64`]."] + #[inline(always)] + pub unsafe fn vld3q_u64(self, a: *const u64) -> uint64x2x3_t { + unsafe { vld3q_u64(a) } + } + #[doc = "See [`arch::vld4_dup_f64`]."] + #[inline(always)] + pub unsafe fn vld4_dup_f64(self, a: *const f64) -> float64x1x4_t { + unsafe { vld4_dup_f64(a) } + } + #[doc = "See [`arch::vld4q_dup_f64`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_f64(self, a: *const f64) -> float64x2x4_t { + unsafe { vld4q_dup_f64(a) } + } + #[doc = "See [`arch::vld4q_dup_s64`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_s64(self, a: *const i64) -> int64x2x4_t { + unsafe { vld4q_dup_s64(a) } + } + #[doc = "See [`arch::vld4_f64`]."] + #[inline(always)] + pub unsafe fn vld4_f64(self, a: *const f64) -> float64x1x4_t { + unsafe { vld4_f64(a) } + } + #[doc = "See [`arch::vld4_lane_f64`]."] + #[inline(always)] + pub unsafe fn vld4_lane_f64( + self, + a: *const f64, + b: float64x1x4_t, + ) -> float64x1x4_t { + unsafe { vld4_lane_f64::(a, b) } + } + #[doc = "See [`arch::vld4_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld4_lane_s64( + self, + a: *const i64, + b: int64x1x4_t, + ) -> int64x1x4_t { + unsafe { vld4_lane_s64::(a, b) } + } + #[doc = "See [`arch::vld4_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld4_lane_p64( + self, + a: *const p64, + b: poly64x1x4_t, + ) -> poly64x1x4_t { + unsafe { vld4_lane_p64::(a, b) } + } + #[doc = "See [`arch::vld4_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld4_lane_u64( + self, + a: *const u64, + b: uint64x1x4_t, + ) -> uint64x1x4_t { + unsafe { vld4_lane_u64::(a, b) } + } + #[doc = "See [`arch::vld4q_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_p64(self, a: *const p64) -> poly64x2x4_t { + unsafe { vld4q_dup_p64(a) } + } + #[doc = "See [`arch::vld4q_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_u64(self, a: *const u64) -> uint64x2x4_t { + unsafe { vld4q_dup_u64(a) } + } + #[doc = "See [`arch::vld4q_f64`]."] + #[inline(always)] + pub unsafe fn vld4q_f64(self, a: *const f64) -> float64x2x4_t { + unsafe { vld4q_f64(a) } + } + #[doc = "See [`arch::vld4q_s64`]."] + #[inline(always)] + pub unsafe fn vld4q_s64(self, a: *const i64) -> int64x2x4_t { + unsafe { vld4q_s64(a) } + } + #[doc = "See [`arch::vld4q_lane_f64`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_f64( + self, + a: *const f64, + b: float64x2x4_t, + ) -> float64x2x4_t { + unsafe { vld4q_lane_f64::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_s8( + self, + a: *const i8, + b: int8x16x4_t, + ) -> int8x16x4_t { + unsafe { vld4q_lane_s8::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_s64( + self, + a: *const i64, + b: int64x2x4_t, + ) -> int64x2x4_t { + unsafe { vld4q_lane_s64::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_p64( + self, + a: *const p64, + b: poly64x2x4_t, + ) -> poly64x2x4_t { + unsafe { vld4q_lane_p64::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_u8( + self, + a: *const u8, + b: uint8x16x4_t, + ) -> uint8x16x4_t { + unsafe { vld4q_lane_u8::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_u64( + self, + a: *const u64, + b: uint64x2x4_t, + ) -> uint64x2x4_t { + unsafe { vld4q_lane_u64::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_p8( + self, + a: *const p8, + b: poly8x16x4_t, + ) -> poly8x16x4_t { + unsafe { vld4q_lane_p8::(a, b) } + } + #[doc = "See [`arch::vld4q_p64`]."] + #[inline(always)] + pub unsafe fn vld4q_p64(self, a: *const p64) -> poly64x2x4_t { + unsafe { vld4q_p64(a) } + } + #[doc = "See [`arch::vld4q_u64`]."] + #[inline(always)] + pub unsafe fn vld4q_u64(self, a: *const u64) -> uint64x2x4_t { + unsafe { vld4q_u64(a) } + } + #[doc = "See [`arch::vmax_f64`]."] + #[inline(always)] + pub fn vmax_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vmax_f64(a, b) } + } + #[doc = "See [`arch::vmaxq_f64`]."] + #[inline(always)] + pub fn vmaxq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vmaxq_f64(a, b) } + } + #[doc = "See [`arch::vmaxnm_f64`]."] + #[inline(always)] + pub fn vmaxnm_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vmaxnm_f64(a, b) } + } + #[doc = "See [`arch::vmaxnmq_f64`]."] + #[inline(always)] + pub fn vmaxnmq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vmaxnmq_f64(a, b) } + } + #[doc = "See [`arch::vmaxnmv_f32`]."] + #[inline(always)] + pub fn vmaxnmv_f32(self, a: float32x2_t) -> f32 { + unsafe { vmaxnmv_f32(a) } + } + #[doc = "See [`arch::vmaxnmvq_f64`]."] + #[inline(always)] + pub fn vmaxnmvq_f64(self, a: float64x2_t) -> f64 { + unsafe { vmaxnmvq_f64(a) } + } + #[doc = "See [`arch::vmaxnmvq_f32`]."] + #[inline(always)] + pub fn vmaxnmvq_f32(self, a: float32x4_t) -> f32 { + unsafe { vmaxnmvq_f32(a) } + } + #[doc = "See [`arch::vmaxv_f32`]."] + #[inline(always)] + pub fn vmaxv_f32(self, a: float32x2_t) -> f32 { + unsafe { vmaxv_f32(a) } + } + #[doc = "See [`arch::vmaxvq_f32`]."] + #[inline(always)] + pub fn vmaxvq_f32(self, a: float32x4_t) -> f32 { + unsafe { vmaxvq_f32(a) } + } + #[doc = "See [`arch::vmaxvq_f64`]."] + #[inline(always)] + pub fn vmaxvq_f64(self, a: float64x2_t) -> f64 { + unsafe { vmaxvq_f64(a) } + } + #[doc = "See [`arch::vmaxv_s8`]."] + #[inline(always)] + pub fn vmaxv_s8(self, a: int8x8_t) -> i8 { + unsafe { vmaxv_s8(a) } + } + #[doc = "See [`arch::vmaxvq_s8`]."] + #[inline(always)] + pub fn vmaxvq_s8(self, a: int8x16_t) -> i8 { + unsafe { vmaxvq_s8(a) } + } + #[doc = "See [`arch::vmaxv_s16`]."] + #[inline(always)] + pub fn vmaxv_s16(self, a: int16x4_t) -> i16 { + unsafe { vmaxv_s16(a) } + } + #[doc = "See [`arch::vmaxvq_s16`]."] + #[inline(always)] + pub fn vmaxvq_s16(self, a: int16x8_t) -> i16 { + unsafe { vmaxvq_s16(a) } + } + #[doc = "See [`arch::vmaxv_s32`]."] + #[inline(always)] + pub fn vmaxv_s32(self, a: int32x2_t) -> i32 { + unsafe { vmaxv_s32(a) } + } + #[doc = "See [`arch::vmaxvq_s32`]."] + #[inline(always)] + pub fn vmaxvq_s32(self, a: int32x4_t) -> i32 { + unsafe { vmaxvq_s32(a) } + } + #[doc = "See [`arch::vmaxv_u8`]."] + #[inline(always)] + pub fn vmaxv_u8(self, a: uint8x8_t) -> u8 { + unsafe { vmaxv_u8(a) } + } + #[doc = "See [`arch::vmaxvq_u8`]."] + #[inline(always)] + pub fn vmaxvq_u8(self, a: uint8x16_t) -> u8 { + unsafe { vmaxvq_u8(a) } + } + #[doc = "See [`arch::vmaxv_u16`]."] + #[inline(always)] + pub fn vmaxv_u16(self, a: uint16x4_t) -> u16 { + unsafe { vmaxv_u16(a) } + } + #[doc = "See [`arch::vmaxvq_u16`]."] + #[inline(always)] + pub fn vmaxvq_u16(self, a: uint16x8_t) -> u16 { + unsafe { vmaxvq_u16(a) } + } + #[doc = "See [`arch::vmaxv_u32`]."] + #[inline(always)] + pub fn vmaxv_u32(self, a: uint32x2_t) -> u32 { + unsafe { vmaxv_u32(a) } + } + #[doc = "See [`arch::vmaxvq_u32`]."] + #[inline(always)] + pub fn vmaxvq_u32(self, a: uint32x4_t) -> u32 { + unsafe { vmaxvq_u32(a) } + } + #[doc = "See [`arch::vmin_f64`]."] + #[inline(always)] + pub fn vmin_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vmin_f64(a, b) } + } + #[doc = "See [`arch::vminq_f64`]."] + #[inline(always)] + pub fn vminq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vminq_f64(a, b) } + } + #[doc = "See [`arch::vminnm_f64`]."] + #[inline(always)] + pub fn vminnm_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vminnm_f64(a, b) } + } + #[doc = "See [`arch::vminnmq_f64`]."] + #[inline(always)] + pub fn vminnmq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vminnmq_f64(a, b) } + } + #[doc = "See [`arch::vminnmv_f32`]."] + #[inline(always)] + pub fn vminnmv_f32(self, a: float32x2_t) -> f32 { + unsafe { vminnmv_f32(a) } + } + #[doc = "See [`arch::vminnmvq_f64`]."] + #[inline(always)] + pub fn vminnmvq_f64(self, a: float64x2_t) -> f64 { + unsafe { vminnmvq_f64(a) } + } + #[doc = "See [`arch::vminnmvq_f32`]."] + #[inline(always)] + pub fn vminnmvq_f32(self, a: float32x4_t) -> f32 { + unsafe { vminnmvq_f32(a) } + } + #[doc = "See [`arch::vminv_f32`]."] + #[inline(always)] + pub fn vminv_f32(self, a: float32x2_t) -> f32 { + unsafe { vminv_f32(a) } + } + #[doc = "See [`arch::vminvq_f32`]."] + #[inline(always)] + pub fn vminvq_f32(self, a: float32x4_t) -> f32 { + unsafe { vminvq_f32(a) } + } + #[doc = "See [`arch::vminvq_f64`]."] + #[inline(always)] + pub fn vminvq_f64(self, a: float64x2_t) -> f64 { + unsafe { vminvq_f64(a) } + } + #[doc = "See [`arch::vminv_s8`]."] + #[inline(always)] + pub fn vminv_s8(self, a: int8x8_t) -> i8 { + unsafe { vminv_s8(a) } + } + #[doc = "See [`arch::vminvq_s8`]."] + #[inline(always)] + pub fn vminvq_s8(self, a: int8x16_t) -> i8 { + unsafe { vminvq_s8(a) } + } + #[doc = "See [`arch::vminv_s16`]."] + #[inline(always)] + pub fn vminv_s16(self, a: int16x4_t) -> i16 { + unsafe { vminv_s16(a) } + } + #[doc = "See [`arch::vminvq_s16`]."] + #[inline(always)] + pub fn vminvq_s16(self, a: int16x8_t) -> i16 { + unsafe { vminvq_s16(a) } + } + #[doc = "See [`arch::vminv_s32`]."] + #[inline(always)] + pub fn vminv_s32(self, a: int32x2_t) -> i32 { + unsafe { vminv_s32(a) } + } + #[doc = "See [`arch::vminvq_s32`]."] + #[inline(always)] + pub fn vminvq_s32(self, a: int32x4_t) -> i32 { + unsafe { vminvq_s32(a) } + } + #[doc = "See [`arch::vminv_u8`]."] + #[inline(always)] + pub fn vminv_u8(self, a: uint8x8_t) -> u8 { + unsafe { vminv_u8(a) } + } + #[doc = "See [`arch::vminvq_u8`]."] + #[inline(always)] + pub fn vminvq_u8(self, a: uint8x16_t) -> u8 { + unsafe { vminvq_u8(a) } + } + #[doc = "See [`arch::vminv_u16`]."] + #[inline(always)] + pub fn vminv_u16(self, a: uint16x4_t) -> u16 { + unsafe { vminv_u16(a) } + } + #[doc = "See [`arch::vminvq_u16`]."] + #[inline(always)] + pub fn vminvq_u16(self, a: uint16x8_t) -> u16 { + unsafe { vminvq_u16(a) } + } + #[doc = "See [`arch::vminv_u32`]."] + #[inline(always)] + pub fn vminv_u32(self, a: uint32x2_t) -> u32 { + unsafe { vminv_u32(a) } + } + #[doc = "See [`arch::vminvq_u32`]."] + #[inline(always)] + pub fn vminvq_u32(self, a: uint32x4_t) -> u32 { + unsafe { vminvq_u32(a) } + } + #[doc = "See [`arch::vmla_f64`]."] + #[inline(always)] + pub fn vmla_f64(self, a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + unsafe { vmla_f64(a, b, c) } + } + #[doc = "See [`arch::vmlaq_f64`]."] + #[inline(always)] + pub fn vmlaq_f64(self, a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe { vmlaq_f64(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_lane_s16`]."] + #[inline(always)] + pub fn vmlal_high_lane_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vmlal_high_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_laneq_s16`]."] + #[inline(always)] + pub fn vmlal_high_laneq_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vmlal_high_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_lane_s32`]."] + #[inline(always)] + pub fn vmlal_high_lane_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vmlal_high_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_laneq_s32`]."] + #[inline(always)] + pub fn vmlal_high_laneq_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vmlal_high_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_lane_u16`]."] + #[inline(always)] + pub fn vmlal_high_lane_u16( + self, + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, + ) -> uint32x4_t { + unsafe { vmlal_high_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_laneq_u16`]."] + #[inline(always)] + pub fn vmlal_high_laneq_u16( + self, + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, + ) -> uint32x4_t { + unsafe { vmlal_high_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_lane_u32`]."] + #[inline(always)] + pub fn vmlal_high_lane_u32( + self, + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, + ) -> uint64x2_t { + unsafe { vmlal_high_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_laneq_u32`]."] + #[inline(always)] + pub fn vmlal_high_laneq_u32( + self, + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, + ) -> uint64x2_t { + unsafe { vmlal_high_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_n_s16`]."] + #[inline(always)] + pub fn vmlal_high_n_s16(self, a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + unsafe { vmlal_high_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_n_s32`]."] + #[inline(always)] + pub fn vmlal_high_n_s32(self, a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + unsafe { vmlal_high_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_n_u16`]."] + #[inline(always)] + pub fn vmlal_high_n_u16(self, a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + unsafe { vmlal_high_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_n_u32`]."] + #[inline(always)] + pub fn vmlal_high_n_u32(self, a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + unsafe { vmlal_high_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_s8`]."] + #[inline(always)] + pub fn vmlal_high_s8(self, a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + unsafe { vmlal_high_s8(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_s16`]."] + #[inline(always)] + pub fn vmlal_high_s16(self, a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + unsafe { vmlal_high_s16(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_s32`]."] + #[inline(always)] + pub fn vmlal_high_s32(self, a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + unsafe { vmlal_high_s32(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_u8`]."] + #[inline(always)] + pub fn vmlal_high_u8(self, a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + unsafe { vmlal_high_u8(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_u16`]."] + #[inline(always)] + pub fn vmlal_high_u16(self, a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + unsafe { vmlal_high_u16(a, b, c) } + } + #[doc = "See [`arch::vmlal_high_u32`]."] + #[inline(always)] + pub fn vmlal_high_u32(self, a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + unsafe { vmlal_high_u32(a, b, c) } + } + #[doc = "See [`arch::vmls_f64`]."] + #[inline(always)] + pub fn vmls_f64(self, a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { + unsafe { vmls_f64(a, b, c) } + } + #[doc = "See [`arch::vmlsq_f64`]."] + #[inline(always)] + pub fn vmlsq_f64(self, a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t { + unsafe { vmlsq_f64(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_lane_s16`]."] + #[inline(always)] + pub fn vmlsl_high_lane_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vmlsl_high_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_laneq_s16`]."] + #[inline(always)] + pub fn vmlsl_high_laneq_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vmlsl_high_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_lane_s32`]."] + #[inline(always)] + pub fn vmlsl_high_lane_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vmlsl_high_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_laneq_s32`]."] + #[inline(always)] + pub fn vmlsl_high_laneq_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vmlsl_high_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_lane_u16`]."] + #[inline(always)] + pub fn vmlsl_high_lane_u16( + self, + a: uint32x4_t, + b: uint16x8_t, + c: uint16x4_t, + ) -> uint32x4_t { + unsafe { vmlsl_high_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_laneq_u16`]."] + #[inline(always)] + pub fn vmlsl_high_laneq_u16( + self, + a: uint32x4_t, + b: uint16x8_t, + c: uint16x8_t, + ) -> uint32x4_t { + unsafe { vmlsl_high_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_lane_u32`]."] + #[inline(always)] + pub fn vmlsl_high_lane_u32( + self, + a: uint64x2_t, + b: uint32x4_t, + c: uint32x2_t, + ) -> uint64x2_t { + unsafe { vmlsl_high_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_laneq_u32`]."] + #[inline(always)] + pub fn vmlsl_high_laneq_u32( + self, + a: uint64x2_t, + b: uint32x4_t, + c: uint32x4_t, + ) -> uint64x2_t { + unsafe { vmlsl_high_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_n_s16`]."] + #[inline(always)] + pub fn vmlsl_high_n_s16(self, a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + unsafe { vmlsl_high_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_n_s32`]."] + #[inline(always)] + pub fn vmlsl_high_n_s32(self, a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + unsafe { vmlsl_high_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_n_u16`]."] + #[inline(always)] + pub fn vmlsl_high_n_u16(self, a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { + unsafe { vmlsl_high_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_n_u32`]."] + #[inline(always)] + pub fn vmlsl_high_n_u32(self, a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { + unsafe { vmlsl_high_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_s8`]."] + #[inline(always)] + pub fn vmlsl_high_s8(self, a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { + unsafe { vmlsl_high_s8(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_s16`]."] + #[inline(always)] + pub fn vmlsl_high_s16(self, a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + unsafe { vmlsl_high_s16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_s32`]."] + #[inline(always)] + pub fn vmlsl_high_s32(self, a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + unsafe { vmlsl_high_s32(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_u8`]."] + #[inline(always)] + pub fn vmlsl_high_u8(self, a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t { + unsafe { vmlsl_high_u8(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_u16`]."] + #[inline(always)] + pub fn vmlsl_high_u16(self, a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t { + unsafe { vmlsl_high_u16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_high_u32`]."] + #[inline(always)] + pub fn vmlsl_high_u32(self, a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t { + unsafe { vmlsl_high_u32(a, b, c) } + } + #[doc = "See [`arch::vmovl_high_s8`]."] + #[inline(always)] + pub fn vmovl_high_s8(self, a: int8x16_t) -> int16x8_t { + unsafe { vmovl_high_s8(a) } + } + #[doc = "See [`arch::vmovl_high_s16`]."] + #[inline(always)] + pub fn vmovl_high_s16(self, a: int16x8_t) -> int32x4_t { + unsafe { vmovl_high_s16(a) } + } + #[doc = "See [`arch::vmovl_high_s32`]."] + #[inline(always)] + pub fn vmovl_high_s32(self, a: int32x4_t) -> int64x2_t { + unsafe { vmovl_high_s32(a) } + } + #[doc = "See [`arch::vmovl_high_u8`]."] + #[inline(always)] + pub fn vmovl_high_u8(self, a: uint8x16_t) -> uint16x8_t { + unsafe { vmovl_high_u8(a) } + } + #[doc = "See [`arch::vmovl_high_u16`]."] + #[inline(always)] + pub fn vmovl_high_u16(self, a: uint16x8_t) -> uint32x4_t { + unsafe { vmovl_high_u16(a) } + } + #[doc = "See [`arch::vmovl_high_u32`]."] + #[inline(always)] + pub fn vmovl_high_u32(self, a: uint32x4_t) -> uint64x2_t { + unsafe { vmovl_high_u32(a) } + } + #[doc = "See [`arch::vmovn_high_s16`]."] + #[inline(always)] + pub fn vmovn_high_s16(self, a: int8x8_t, b: int16x8_t) -> int8x16_t { + unsafe { vmovn_high_s16(a, b) } + } + #[doc = "See [`arch::vmovn_high_s32`]."] + #[inline(always)] + pub fn vmovn_high_s32(self, a: int16x4_t, b: int32x4_t) -> int16x8_t { + unsafe { vmovn_high_s32(a, b) } + } + #[doc = "See [`arch::vmovn_high_s64`]."] + #[inline(always)] + pub fn vmovn_high_s64(self, a: int32x2_t, b: int64x2_t) -> int32x4_t { + unsafe { vmovn_high_s64(a, b) } + } + #[doc = "See [`arch::vmovn_high_u16`]."] + #[inline(always)] + pub fn vmovn_high_u16(self, a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { vmovn_high_u16(a, b) } + } + #[doc = "See [`arch::vmovn_high_u32`]."] + #[inline(always)] + pub fn vmovn_high_u32(self, a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { vmovn_high_u32(a, b) } + } + #[doc = "See [`arch::vmovn_high_u64`]."] + #[inline(always)] + pub fn vmovn_high_u64(self, a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { vmovn_high_u64(a, b) } + } + #[doc = "See [`arch::vmul_f64`]."] + #[inline(always)] + pub fn vmul_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vmul_f64(a, b) } + } + #[doc = "See [`arch::vmulq_f64`]."] + #[inline(always)] + pub fn vmulq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vmulq_f64(a, b) } + } + #[doc = "See [`arch::vmul_lane_f64`]."] + #[inline(always)] + pub fn vmul_lane_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vmul_lane_f64::(a, b) } + } + #[doc = "See [`arch::vmul_laneq_f64`]."] + #[inline(always)] + pub fn vmul_laneq_f64(self, a: float64x1_t, b: float64x2_t) -> float64x1_t { + unsafe { vmul_laneq_f64::(a, b) } + } + #[doc = "See [`arch::vmul_n_f64`]."] + #[inline(always)] + pub fn vmul_n_f64(self, a: float64x1_t, b: f64) -> float64x1_t { + unsafe { vmul_n_f64(a, b) } + } + #[doc = "See [`arch::vmulq_n_f64`]."] + #[inline(always)] + pub fn vmulq_n_f64(self, a: float64x2_t, b: f64) -> float64x2_t { + unsafe { vmulq_n_f64(a, b) } + } + #[doc = "See [`arch::vmuld_lane_f64`]."] + #[inline(always)] + pub fn vmuld_lane_f64(self, a: f64, b: float64x1_t) -> f64 { + unsafe { vmuld_lane_f64::(a, b) } + } + #[doc = "See [`arch::vmull_high_lane_s16`]."] + #[inline(always)] + pub fn vmull_high_lane_s16(self, a: int16x8_t, b: int16x4_t) -> int32x4_t { + unsafe { vmull_high_lane_s16::(a, b) } + } + #[doc = "See [`arch::vmull_high_laneq_s16`]."] + #[inline(always)] + pub fn vmull_high_laneq_s16(self, a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { vmull_high_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vmull_high_lane_s32`]."] + #[inline(always)] + pub fn vmull_high_lane_s32(self, a: int32x4_t, b: int32x2_t) -> int64x2_t { + unsafe { vmull_high_lane_s32::(a, b) } + } + #[doc = "See [`arch::vmull_high_laneq_s32`]."] + #[inline(always)] + pub fn vmull_high_laneq_s32(self, a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { vmull_high_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vmull_high_lane_u16`]."] + #[inline(always)] + pub fn vmull_high_lane_u16(self, a: uint16x8_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vmull_high_lane_u16::(a, b) } + } + #[doc = "See [`arch::vmull_high_laneq_u16`]."] + #[inline(always)] + pub fn vmull_high_laneq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vmull_high_laneq_u16::(a, b) } + } + #[doc = "See [`arch::vmull_high_lane_u32`]."] + #[inline(always)] + pub fn vmull_high_lane_u32(self, a: uint32x4_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vmull_high_lane_u32::(a, b) } + } + #[doc = "See [`arch::vmull_high_laneq_u32`]."] + #[inline(always)] + pub fn vmull_high_laneq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vmull_high_laneq_u32::(a, b) } + } + #[doc = "See [`arch::vmull_high_n_s16`]."] + #[inline(always)] + pub fn vmull_high_n_s16(self, a: int16x8_t, b: i16) -> int32x4_t { + unsafe { vmull_high_n_s16(a, b) } + } + #[doc = "See [`arch::vmull_high_n_s32`]."] + #[inline(always)] + pub fn vmull_high_n_s32(self, a: int32x4_t, b: i32) -> int64x2_t { + unsafe { vmull_high_n_s32(a, b) } + } + #[doc = "See [`arch::vmull_high_n_u16`]."] + #[inline(always)] + pub fn vmull_high_n_u16(self, a: uint16x8_t, b: u16) -> uint32x4_t { + unsafe { vmull_high_n_u16(a, b) } + } + #[doc = "See [`arch::vmull_high_n_u32`]."] + #[inline(always)] + pub fn vmull_high_n_u32(self, a: uint32x4_t, b: u32) -> uint64x2_t { + unsafe { vmull_high_n_u32(a, b) } + } + #[doc = "See [`arch::vmull_high_p64`]."] + #[inline(always)] + pub fn vmull_high_p64(self, a: poly64x2_t, b: poly64x2_t) -> p128 { + unsafe { vmull_high_p64(a, b) } + } + #[doc = "See [`arch::vmull_high_p8`]."] + #[inline(always)] + pub fn vmull_high_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { + unsafe { vmull_high_p8(a, b) } + } + #[doc = "See [`arch::vmull_high_s8`]."] + #[inline(always)] + pub fn vmull_high_s8(self, a: int8x16_t, b: int8x16_t) -> int16x8_t { + unsafe { vmull_high_s8(a, b) } + } + #[doc = "See [`arch::vmull_high_s16`]."] + #[inline(always)] + pub fn vmull_high_s16(self, a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { vmull_high_s16(a, b) } + } + #[doc = "See [`arch::vmull_high_s32`]."] + #[inline(always)] + pub fn vmull_high_s32(self, a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { vmull_high_s32(a, b) } + } + #[doc = "See [`arch::vmull_high_u8`]."] + #[inline(always)] + pub fn vmull_high_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + unsafe { vmull_high_u8(a, b) } + } + #[doc = "See [`arch::vmull_high_u16`]."] + #[inline(always)] + pub fn vmull_high_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vmull_high_u16(a, b) } + } + #[doc = "See [`arch::vmull_high_u32`]."] + #[inline(always)] + pub fn vmull_high_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vmull_high_u32(a, b) } + } + #[doc = "See [`arch::vmull_p64`]."] + #[inline(always)] + pub fn vmull_p64(self, a: p64, b: p64) -> p128 { + unsafe { vmull_p64(a, b) } + } + #[doc = "See [`arch::vmulq_lane_f64`]."] + #[inline(always)] + pub fn vmulq_lane_f64(self, a: float64x2_t, b: float64x1_t) -> float64x2_t { + unsafe { vmulq_lane_f64::(a, b) } + } + #[doc = "See [`arch::vmulq_laneq_f64`]."] + #[inline(always)] + pub fn vmulq_laneq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vmulq_laneq_f64::(a, b) } + } + #[doc = "See [`arch::vmuls_lane_f32`]."] + #[inline(always)] + pub fn vmuls_lane_f32(self, a: f32, b: float32x2_t) -> f32 { + unsafe { vmuls_lane_f32::(a, b) } + } + #[doc = "See [`arch::vmuls_laneq_f32`]."] + #[inline(always)] + pub fn vmuls_laneq_f32(self, a: f32, b: float32x4_t) -> f32 { + unsafe { vmuls_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vmuld_laneq_f64`]."] + #[inline(always)] + pub fn vmuld_laneq_f64(self, a: f64, b: float64x2_t) -> f64 { + unsafe { vmuld_laneq_f64::(a, b) } + } + #[doc = "See [`arch::vmulx_f32`]."] + #[inline(always)] + pub fn vmulx_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vmulx_f32(a, b) } + } + #[doc = "See [`arch::vmulxq_f32`]."] + #[inline(always)] + pub fn vmulxq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vmulxq_f32(a, b) } + } + #[doc = "See [`arch::vmulx_f64`]."] + #[inline(always)] + pub fn vmulx_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vmulx_f64(a, b) } + } + #[doc = "See [`arch::vmulxq_f64`]."] + #[inline(always)] + pub fn vmulxq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vmulxq_f64(a, b) } + } + #[doc = "See [`arch::vmulx_lane_f32`]."] + #[inline(always)] + pub fn vmulx_lane_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vmulx_lane_f32::(a, b) } + } + #[doc = "See [`arch::vmulx_laneq_f32`]."] + #[inline(always)] + pub fn vmulx_laneq_f32(self, a: float32x2_t, b: float32x4_t) -> float32x2_t { + unsafe { vmulx_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vmulxq_lane_f32`]."] + #[inline(always)] + pub fn vmulxq_lane_f32(self, a: float32x4_t, b: float32x2_t) -> float32x4_t { + unsafe { vmulxq_lane_f32::(a, b) } + } + #[doc = "See [`arch::vmulxq_laneq_f32`]."] + #[inline(always)] + pub fn vmulxq_laneq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vmulxq_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vmulxq_laneq_f64`]."] + #[inline(always)] + pub fn vmulxq_laneq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vmulxq_laneq_f64::(a, b) } + } + #[doc = "See [`arch::vmulx_lane_f64`]."] + #[inline(always)] + pub fn vmulx_lane_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vmulx_lane_f64::(a, b) } + } + #[doc = "See [`arch::vmulx_laneq_f64`]."] + #[inline(always)] + pub fn vmulx_laneq_f64(self, a: float64x1_t, b: float64x2_t) -> float64x1_t { + unsafe { vmulx_laneq_f64::(a, b) } + } + #[doc = "See [`arch::vmulxd_f64`]."] + #[inline(always)] + pub fn vmulxd_f64(self, a: f64, b: f64) -> f64 { + unsafe { vmulxd_f64(a, b) } + } + #[doc = "See [`arch::vmulxs_f32`]."] + #[inline(always)] + pub fn vmulxs_f32(self, a: f32, b: f32) -> f32 { + unsafe { vmulxs_f32(a, b) } + } + #[doc = "See [`arch::vmulxd_lane_f64`]."] + #[inline(always)] + pub fn vmulxd_lane_f64(self, a: f64, b: float64x1_t) -> f64 { + unsafe { vmulxd_lane_f64::(a, b) } + } + #[doc = "See [`arch::vmulxd_laneq_f64`]."] + #[inline(always)] + pub fn vmulxd_laneq_f64(self, a: f64, b: float64x2_t) -> f64 { + unsafe { vmulxd_laneq_f64::(a, b) } + } + #[doc = "See [`arch::vmulxs_lane_f32`]."] + #[inline(always)] + pub fn vmulxs_lane_f32(self, a: f32, b: float32x2_t) -> f32 { + unsafe { vmulxs_lane_f32::(a, b) } + } + #[doc = "See [`arch::vmulxs_laneq_f32`]."] + #[inline(always)] + pub fn vmulxs_laneq_f32(self, a: f32, b: float32x4_t) -> f32 { + unsafe { vmulxs_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vmulxq_lane_f64`]."] + #[inline(always)] + pub fn vmulxq_lane_f64(self, a: float64x2_t, b: float64x1_t) -> float64x2_t { + unsafe { vmulxq_lane_f64::(a, b) } + } + #[doc = "See [`arch::vneg_f64`]."] + #[inline(always)] + pub fn vneg_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vneg_f64(a) } + } + #[doc = "See [`arch::vnegq_f64`]."] + #[inline(always)] + pub fn vnegq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vnegq_f64(a) } + } + #[doc = "See [`arch::vneg_s64`]."] + #[inline(always)] + pub fn vneg_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vneg_s64(a) } + } + #[doc = "See [`arch::vnegq_s64`]."] + #[inline(always)] + pub fn vnegq_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vnegq_s64(a) } + } + #[doc = "See [`arch::vnegd_s64`]."] + #[inline(always)] + pub fn vnegd_s64(self, a: i64) -> i64 { + unsafe { vnegd_s64(a) } + } + #[doc = "See [`arch::vpaddd_f64`]."] + #[inline(always)] + pub fn vpaddd_f64(self, a: float64x2_t) -> f64 { + unsafe { vpaddd_f64(a) } + } + #[doc = "See [`arch::vpadds_f32`]."] + #[inline(always)] + pub fn vpadds_f32(self, a: float32x2_t) -> f32 { + unsafe { vpadds_f32(a) } + } + #[doc = "See [`arch::vpaddd_s64`]."] + #[inline(always)] + pub fn vpaddd_s64(self, a: int64x2_t) -> i64 { + unsafe { vpaddd_s64(a) } + } + #[doc = "See [`arch::vpaddd_u64`]."] + #[inline(always)] + pub fn vpaddd_u64(self, a: uint64x2_t) -> u64 { + unsafe { vpaddd_u64(a) } + } + #[doc = "See [`arch::vpaddq_f32`]."] + #[inline(always)] + pub fn vpaddq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vpaddq_f32(a, b) } + } + #[doc = "See [`arch::vpaddq_f64`]."] + #[inline(always)] + pub fn vpaddq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vpaddq_f64(a, b) } + } + #[doc = "See [`arch::vpaddq_s8`]."] + #[inline(always)] + pub fn vpaddq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vpaddq_s8(a, b) } + } + #[doc = "See [`arch::vpaddq_s16`]."] + #[inline(always)] + pub fn vpaddq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vpaddq_s16(a, b) } + } + #[doc = "See [`arch::vpaddq_s32`]."] + #[inline(always)] + pub fn vpaddq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vpaddq_s32(a, b) } + } + #[doc = "See [`arch::vpaddq_s64`]."] + #[inline(always)] + pub fn vpaddq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vpaddq_s64(a, b) } + } + #[doc = "See [`arch::vpaddq_u8`]."] + #[inline(always)] + pub fn vpaddq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vpaddq_u8(a, b) } + } + #[doc = "See [`arch::vpaddq_u16`]."] + #[inline(always)] + pub fn vpaddq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vpaddq_u16(a, b) } + } + #[doc = "See [`arch::vpaddq_u32`]."] + #[inline(always)] + pub fn vpaddq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vpaddq_u32(a, b) } + } + #[doc = "See [`arch::vpaddq_u64`]."] + #[inline(always)] + pub fn vpaddq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vpaddq_u64(a, b) } + } + #[doc = "See [`arch::vpmaxnm_f32`]."] + #[inline(always)] + pub fn vpmaxnm_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vpmaxnm_f32(a, b) } + } + #[doc = "See [`arch::vpmaxnmq_f32`]."] + #[inline(always)] + pub fn vpmaxnmq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vpmaxnmq_f32(a, b) } + } + #[doc = "See [`arch::vpmaxnmq_f64`]."] + #[inline(always)] + pub fn vpmaxnmq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vpmaxnmq_f64(a, b) } + } + #[doc = "See [`arch::vpmaxnmqd_f64`]."] + #[inline(always)] + pub fn vpmaxnmqd_f64(self, a: float64x2_t) -> f64 { + unsafe { vpmaxnmqd_f64(a) } + } + #[doc = "See [`arch::vpmaxnms_f32`]."] + #[inline(always)] + pub fn vpmaxnms_f32(self, a: float32x2_t) -> f32 { + unsafe { vpmaxnms_f32(a) } + } + #[doc = "See [`arch::vpmaxq_f32`]."] + #[inline(always)] + pub fn vpmaxq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vpmaxq_f32(a, b) } + } + #[doc = "See [`arch::vpmaxq_f64`]."] + #[inline(always)] + pub fn vpmaxq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vpmaxq_f64(a, b) } + } + #[doc = "See [`arch::vpmaxq_s8`]."] + #[inline(always)] + pub fn vpmaxq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vpmaxq_s8(a, b) } + } + #[doc = "See [`arch::vpmaxq_s16`]."] + #[inline(always)] + pub fn vpmaxq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vpmaxq_s16(a, b) } + } + #[doc = "See [`arch::vpmaxq_s32`]."] + #[inline(always)] + pub fn vpmaxq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vpmaxq_s32(a, b) } + } + #[doc = "See [`arch::vpmaxq_u8`]."] + #[inline(always)] + pub fn vpmaxq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vpmaxq_u8(a, b) } + } + #[doc = "See [`arch::vpmaxq_u16`]."] + #[inline(always)] + pub fn vpmaxq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vpmaxq_u16(a, b) } + } + #[doc = "See [`arch::vpmaxq_u32`]."] + #[inline(always)] + pub fn vpmaxq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vpmaxq_u32(a, b) } + } + #[doc = "See [`arch::vpmaxqd_f64`]."] + #[inline(always)] + pub fn vpmaxqd_f64(self, a: float64x2_t) -> f64 { + unsafe { vpmaxqd_f64(a) } + } + #[doc = "See [`arch::vpmaxs_f32`]."] + #[inline(always)] + pub fn vpmaxs_f32(self, a: float32x2_t) -> f32 { + unsafe { vpmaxs_f32(a) } + } + #[doc = "See [`arch::vpminnm_f32`]."] + #[inline(always)] + pub fn vpminnm_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vpminnm_f32(a, b) } + } + #[doc = "See [`arch::vpminnmq_f32`]."] + #[inline(always)] + pub fn vpminnmq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vpminnmq_f32(a, b) } + } + #[doc = "See [`arch::vpminnmq_f64`]."] + #[inline(always)] + pub fn vpminnmq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vpminnmq_f64(a, b) } + } + #[doc = "See [`arch::vpminnmqd_f64`]."] + #[inline(always)] + pub fn vpminnmqd_f64(self, a: float64x2_t) -> f64 { + unsafe { vpminnmqd_f64(a) } + } + #[doc = "See [`arch::vpminnms_f32`]."] + #[inline(always)] + pub fn vpminnms_f32(self, a: float32x2_t) -> f32 { + unsafe { vpminnms_f32(a) } + } + #[doc = "See [`arch::vpminq_f32`]."] + #[inline(always)] + pub fn vpminq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vpminq_f32(a, b) } + } + #[doc = "See [`arch::vpminq_f64`]."] + #[inline(always)] + pub fn vpminq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vpminq_f64(a, b) } + } + #[doc = "See [`arch::vpminq_s8`]."] + #[inline(always)] + pub fn vpminq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vpminq_s8(a, b) } + } + #[doc = "See [`arch::vpminq_s16`]."] + #[inline(always)] + pub fn vpminq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vpminq_s16(a, b) } + } + #[doc = "See [`arch::vpminq_s32`]."] + #[inline(always)] + pub fn vpminq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vpminq_s32(a, b) } + } + #[doc = "See [`arch::vpminq_u8`]."] + #[inline(always)] + pub fn vpminq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vpminq_u8(a, b) } + } + #[doc = "See [`arch::vpminq_u16`]."] + #[inline(always)] + pub fn vpminq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vpminq_u16(a, b) } + } + #[doc = "See [`arch::vpminq_u32`]."] + #[inline(always)] + pub fn vpminq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vpminq_u32(a, b) } + } + #[doc = "See [`arch::vpminqd_f64`]."] + #[inline(always)] + pub fn vpminqd_f64(self, a: float64x2_t) -> f64 { + unsafe { vpminqd_f64(a) } + } + #[doc = "See [`arch::vpmins_f32`]."] + #[inline(always)] + pub fn vpmins_f32(self, a: float32x2_t) -> f32 { + unsafe { vpmins_f32(a) } + } + #[doc = "See [`arch::vqabs_s64`]."] + #[inline(always)] + pub fn vqabs_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vqabs_s64(a) } + } + #[doc = "See [`arch::vqabsq_s64`]."] + #[inline(always)] + pub fn vqabsq_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vqabsq_s64(a) } + } + #[doc = "See [`arch::vqabsb_s8`]."] + #[inline(always)] + pub fn vqabsb_s8(self, a: i8) -> i8 { + unsafe { vqabsb_s8(a) } + } + #[doc = "See [`arch::vqabsh_s16`]."] + #[inline(always)] + pub fn vqabsh_s16(self, a: i16) -> i16 { + unsafe { vqabsh_s16(a) } + } + #[doc = "See [`arch::vqabss_s32`]."] + #[inline(always)] + pub fn vqabss_s32(self, a: i32) -> i32 { + unsafe { vqabss_s32(a) } + } + #[doc = "See [`arch::vqabsd_s64`]."] + #[inline(always)] + pub fn vqabsd_s64(self, a: i64) -> i64 { + unsafe { vqabsd_s64(a) } + } + #[doc = "See [`arch::vqaddb_s8`]."] + #[inline(always)] + pub fn vqaddb_s8(self, a: i8, b: i8) -> i8 { + unsafe { vqaddb_s8(a, b) } + } + #[doc = "See [`arch::vqaddh_s16`]."] + #[inline(always)] + pub fn vqaddh_s16(self, a: i16, b: i16) -> i16 { + unsafe { vqaddh_s16(a, b) } + } + #[doc = "See [`arch::vqaddb_u8`]."] + #[inline(always)] + pub fn vqaddb_u8(self, a: u8, b: u8) -> u8 { + unsafe { vqaddb_u8(a, b) } + } + #[doc = "See [`arch::vqaddh_u16`]."] + #[inline(always)] + pub fn vqaddh_u16(self, a: u16, b: u16) -> u16 { + unsafe { vqaddh_u16(a, b) } + } + #[doc = "See [`arch::vqadds_s32`]."] + #[inline(always)] + pub fn vqadds_s32(self, a: i32, b: i32) -> i32 { + unsafe { vqadds_s32(a, b) } + } + #[doc = "See [`arch::vqaddd_s64`]."] + #[inline(always)] + pub fn vqaddd_s64(self, a: i64, b: i64) -> i64 { + unsafe { vqaddd_s64(a, b) } + } + #[doc = "See [`arch::vqadds_u32`]."] + #[inline(always)] + pub fn vqadds_u32(self, a: u32, b: u32) -> u32 { + unsafe { vqadds_u32(a, b) } + } + #[doc = "See [`arch::vqaddd_u64`]."] + #[inline(always)] + pub fn vqaddd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vqaddd_u64(a, b) } + } + #[doc = "See [`arch::vqdmlal_high_lane_s16`]."] + #[inline(always)] + pub fn vqdmlal_high_lane_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vqdmlal_high_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_high_laneq_s16`]."] + #[inline(always)] + pub fn vqdmlal_high_laneq_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vqdmlal_high_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_high_lane_s32`]."] + #[inline(always)] + pub fn vqdmlal_high_lane_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vqdmlal_high_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_high_laneq_s32`]."] + #[inline(always)] + pub fn vqdmlal_high_laneq_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vqdmlal_high_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_high_n_s16`]."] + #[inline(always)] + pub fn vqdmlal_high_n_s16(self, a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + unsafe { vqdmlal_high_n_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_high_s16`]."] + #[inline(always)] + pub fn vqdmlal_high_s16(self, a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + unsafe { vqdmlal_high_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_high_n_s32`]."] + #[inline(always)] + pub fn vqdmlal_high_n_s32(self, a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + unsafe { vqdmlal_high_n_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_high_s32`]."] + #[inline(always)] + pub fn vqdmlal_high_s32(self, a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + unsafe { vqdmlal_high_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_laneq_s16`]."] + #[inline(always)] + pub fn vqdmlal_laneq_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vqdmlal_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_laneq_s32`]."] + #[inline(always)] + pub fn vqdmlal_laneq_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vqdmlal_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlalh_lane_s16`]."] + #[inline(always)] + pub fn vqdmlalh_lane_s16(self, a: i32, b: i16, c: int16x4_t) -> i32 { + unsafe { vqdmlalh_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlalh_laneq_s16`]."] + #[inline(always)] + pub fn vqdmlalh_laneq_s16(self, a: i32, b: i16, c: int16x8_t) -> i32 { + unsafe { vqdmlalh_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlals_lane_s32`]."] + #[inline(always)] + pub fn vqdmlals_lane_s32(self, a: i64, b: i32, c: int32x2_t) -> i64 { + unsafe { vqdmlals_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlals_laneq_s32`]."] + #[inline(always)] + pub fn vqdmlals_laneq_s32(self, a: i64, b: i32, c: int32x4_t) -> i64 { + unsafe { vqdmlals_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlalh_s16`]."] + #[inline(always)] + pub fn vqdmlalh_s16(self, a: i32, b: i16, c: i16) -> i32 { + unsafe { vqdmlalh_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlals_s32`]."] + #[inline(always)] + pub fn vqdmlals_s32(self, a: i64, b: i32, c: i32) -> i64 { + unsafe { vqdmlals_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_lane_s16`]."] + #[inline(always)] + pub fn vqdmlsl_high_lane_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vqdmlsl_high_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_laneq_s16`]."] + #[inline(always)] + pub fn vqdmlsl_high_laneq_s16( + self, + a: int32x4_t, + b: int16x8_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vqdmlsl_high_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_lane_s32`]."] + #[inline(always)] + pub fn vqdmlsl_high_lane_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vqdmlsl_high_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_laneq_s32`]."] + #[inline(always)] + pub fn vqdmlsl_high_laneq_s32( + self, + a: int64x2_t, + b: int32x4_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vqdmlsl_high_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_n_s16`]."] + #[inline(always)] + pub fn vqdmlsl_high_n_s16(self, a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { + unsafe { vqdmlsl_high_n_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_s16`]."] + #[inline(always)] + pub fn vqdmlsl_high_s16(self, a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { + unsafe { vqdmlsl_high_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_n_s32`]."] + #[inline(always)] + pub fn vqdmlsl_high_n_s32(self, a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { + unsafe { vqdmlsl_high_n_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_high_s32`]."] + #[inline(always)] + pub fn vqdmlsl_high_s32(self, a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { + unsafe { vqdmlsl_high_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_laneq_s16`]."] + #[inline(always)] + pub fn vqdmlsl_laneq_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vqdmlsl_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_laneq_s32`]."] + #[inline(always)] + pub fn vqdmlsl_laneq_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vqdmlsl_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlslh_lane_s16`]."] + #[inline(always)] + pub fn vqdmlslh_lane_s16(self, a: i32, b: i16, c: int16x4_t) -> i32 { + unsafe { vqdmlslh_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlslh_laneq_s16`]."] + #[inline(always)] + pub fn vqdmlslh_laneq_s16(self, a: i32, b: i16, c: int16x8_t) -> i32 { + unsafe { vqdmlslh_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsls_lane_s32`]."] + #[inline(always)] + pub fn vqdmlsls_lane_s32(self, a: i64, b: i32, c: int32x2_t) -> i64 { + unsafe { vqdmlsls_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsls_laneq_s32`]."] + #[inline(always)] + pub fn vqdmlsls_laneq_s32(self, a: i64, b: i32, c: int32x4_t) -> i64 { + unsafe { vqdmlsls_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlslh_s16`]."] + #[inline(always)] + pub fn vqdmlslh_s16(self, a: i32, b: i16, c: i16) -> i32 { + unsafe { vqdmlslh_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlsls_s32`]."] + #[inline(always)] + pub fn vqdmlsls_s32(self, a: i64, b: i32, c: i32) -> i64 { + unsafe { vqdmlsls_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmulh_lane_s16`]."] + #[inline(always)] + pub fn vqdmulh_lane_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqdmulh_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqdmulhq_lane_s16`]."] + #[inline(always)] + pub fn vqdmulhq_lane_s16(self, a: int16x8_t, b: int16x4_t) -> int16x8_t { + unsafe { vqdmulhq_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqdmulh_lane_s32`]."] + #[inline(always)] + pub fn vqdmulh_lane_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqdmulh_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqdmulhq_lane_s32`]."] + #[inline(always)] + pub fn vqdmulhq_lane_s32(self, a: int32x4_t, b: int32x2_t) -> int32x4_t { + unsafe { vqdmulhq_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqdmulhh_lane_s16`]."] + #[inline(always)] + pub fn vqdmulhh_lane_s16(self, a: i16, b: int16x4_t) -> i16 { + unsafe { vqdmulhh_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqdmulhh_laneq_s16`]."] + #[inline(always)] + pub fn vqdmulhh_laneq_s16(self, a: i16, b: int16x8_t) -> i16 { + unsafe { vqdmulhh_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqdmulhh_s16`]."] + #[inline(always)] + pub fn vqdmulhh_s16(self, a: i16, b: i16) -> i16 { + unsafe { vqdmulhh_s16(a, b) } + } + #[doc = "See [`arch::vqdmulhs_s32`]."] + #[inline(always)] + pub fn vqdmulhs_s32(self, a: i32, b: i32) -> i32 { + unsafe { vqdmulhs_s32(a, b) } + } + #[doc = "See [`arch::vqdmulhs_lane_s32`]."] + #[inline(always)] + pub fn vqdmulhs_lane_s32(self, a: i32, b: int32x2_t) -> i32 { + unsafe { vqdmulhs_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqdmulhs_laneq_s32`]."] + #[inline(always)] + pub fn vqdmulhs_laneq_s32(self, a: i32, b: int32x4_t) -> i32 { + unsafe { vqdmulhs_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqdmull_high_lane_s16`]."] + #[inline(always)] + pub fn vqdmull_high_lane_s16(self, a: int16x8_t, b: int16x4_t) -> int32x4_t { + unsafe { vqdmull_high_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqdmull_high_laneq_s32`]."] + #[inline(always)] + pub fn vqdmull_high_laneq_s32(self, a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { vqdmull_high_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqdmull_high_lane_s32`]."] + #[inline(always)] + pub fn vqdmull_high_lane_s32(self, a: int32x4_t, b: int32x2_t) -> int64x2_t { + unsafe { vqdmull_high_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqdmull_high_laneq_s16`]."] + #[inline(always)] + pub fn vqdmull_high_laneq_s16(self, a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { vqdmull_high_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqdmull_high_n_s16`]."] + #[inline(always)] + pub fn vqdmull_high_n_s16(self, a: int16x8_t, b: i16) -> int32x4_t { + unsafe { vqdmull_high_n_s16(a, b) } + } + #[doc = "See [`arch::vqdmull_high_n_s32`]."] + #[inline(always)] + pub fn vqdmull_high_n_s32(self, a: int32x4_t, b: i32) -> int64x2_t { + unsafe { vqdmull_high_n_s32(a, b) } + } + #[doc = "See [`arch::vqdmull_high_s16`]."] + #[inline(always)] + pub fn vqdmull_high_s16(self, a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { vqdmull_high_s16(a, b) } + } + #[doc = "See [`arch::vqdmull_high_s32`]."] + #[inline(always)] + pub fn vqdmull_high_s32(self, a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { vqdmull_high_s32(a, b) } + } + #[doc = "See [`arch::vqdmull_laneq_s16`]."] + #[inline(always)] + pub fn vqdmull_laneq_s16(self, a: int16x4_t, b: int16x8_t) -> int32x4_t { + unsafe { vqdmull_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqdmull_laneq_s32`]."] + #[inline(always)] + pub fn vqdmull_laneq_s32(self, a: int32x2_t, b: int32x4_t) -> int64x2_t { + unsafe { vqdmull_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqdmullh_lane_s16`]."] + #[inline(always)] + pub fn vqdmullh_lane_s16(self, a: i16, b: int16x4_t) -> i32 { + unsafe { vqdmullh_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqdmulls_laneq_s32`]."] + #[inline(always)] + pub fn vqdmulls_laneq_s32(self, a: i32, b: int32x4_t) -> i64 { + unsafe { vqdmulls_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqdmullh_laneq_s16`]."] + #[inline(always)] + pub fn vqdmullh_laneq_s16(self, a: i16, b: int16x8_t) -> i32 { + unsafe { vqdmullh_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqdmullh_s16`]."] + #[inline(always)] + pub fn vqdmullh_s16(self, a: i16, b: i16) -> i32 { + unsafe { vqdmullh_s16(a, b) } + } + #[doc = "See [`arch::vqdmulls_lane_s32`]."] + #[inline(always)] + pub fn vqdmulls_lane_s32(self, a: i32, b: int32x2_t) -> i64 { + unsafe { vqdmulls_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqdmulls_s32`]."] + #[inline(always)] + pub fn vqdmulls_s32(self, a: i32, b: i32) -> i64 { + unsafe { vqdmulls_s32(a, b) } + } + #[doc = "See [`arch::vqmovn_high_s16`]."] + #[inline(always)] + pub fn vqmovn_high_s16(self, a: int8x8_t, b: int16x8_t) -> int8x16_t { + unsafe { vqmovn_high_s16(a, b) } + } + #[doc = "See [`arch::vqmovn_high_s32`]."] + #[inline(always)] + pub fn vqmovn_high_s32(self, a: int16x4_t, b: int32x4_t) -> int16x8_t { + unsafe { vqmovn_high_s32(a, b) } + } + #[doc = "See [`arch::vqmovn_high_s64`]."] + #[inline(always)] + pub fn vqmovn_high_s64(self, a: int32x2_t, b: int64x2_t) -> int32x4_t { + unsafe { vqmovn_high_s64(a, b) } + } + #[doc = "See [`arch::vqmovn_high_u16`]."] + #[inline(always)] + pub fn vqmovn_high_u16(self, a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { vqmovn_high_u16(a, b) } + } + #[doc = "See [`arch::vqmovn_high_u32`]."] + #[inline(always)] + pub fn vqmovn_high_u32(self, a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { vqmovn_high_u32(a, b) } + } + #[doc = "See [`arch::vqmovn_high_u64`]."] + #[inline(always)] + pub fn vqmovn_high_u64(self, a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { vqmovn_high_u64(a, b) } + } + #[doc = "See [`arch::vqmovnd_s64`]."] + #[inline(always)] + pub fn vqmovnd_s64(self, a: i64) -> i32 { + unsafe { vqmovnd_s64(a) } + } + #[doc = "See [`arch::vqmovnd_u64`]."] + #[inline(always)] + pub fn vqmovnd_u64(self, a: u64) -> u32 { + unsafe { vqmovnd_u64(a) } + } + #[doc = "See [`arch::vqmovnh_s16`]."] + #[inline(always)] + pub fn vqmovnh_s16(self, a: i16) -> i8 { + unsafe { vqmovnh_s16(a) } + } + #[doc = "See [`arch::vqmovns_s32`]."] + #[inline(always)] + pub fn vqmovns_s32(self, a: i32) -> i16 { + unsafe { vqmovns_s32(a) } + } + #[doc = "See [`arch::vqmovnh_u16`]."] + #[inline(always)] + pub fn vqmovnh_u16(self, a: u16) -> u8 { + unsafe { vqmovnh_u16(a) } + } + #[doc = "See [`arch::vqmovns_u32`]."] + #[inline(always)] + pub fn vqmovns_u32(self, a: u32) -> u16 { + unsafe { vqmovns_u32(a) } + } + #[doc = "See [`arch::vqmovun_high_s16`]."] + #[inline(always)] + pub fn vqmovun_high_s16(self, a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + unsafe { vqmovun_high_s16(a, b) } + } + #[doc = "See [`arch::vqmovun_high_s32`]."] + #[inline(always)] + pub fn vqmovun_high_s32(self, a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + unsafe { vqmovun_high_s32(a, b) } + } + #[doc = "See [`arch::vqmovun_high_s64`]."] + #[inline(always)] + pub fn vqmovun_high_s64(self, a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + unsafe { vqmovun_high_s64(a, b) } + } + #[doc = "See [`arch::vqmovunh_s16`]."] + #[inline(always)] + pub fn vqmovunh_s16(self, a: i16) -> u8 { + unsafe { vqmovunh_s16(a) } + } + #[doc = "See [`arch::vqmovuns_s32`]."] + #[inline(always)] + pub fn vqmovuns_s32(self, a: i32) -> u16 { + unsafe { vqmovuns_s32(a) } + } + #[doc = "See [`arch::vqmovund_s64`]."] + #[inline(always)] + pub fn vqmovund_s64(self, a: i64) -> u32 { + unsafe { vqmovund_s64(a) } + } + #[doc = "See [`arch::vqneg_s64`]."] + #[inline(always)] + pub fn vqneg_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vqneg_s64(a) } + } + #[doc = "See [`arch::vqnegq_s64`]."] + #[inline(always)] + pub fn vqnegq_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vqnegq_s64(a) } + } + #[doc = "See [`arch::vqnegb_s8`]."] + #[inline(always)] + pub fn vqnegb_s8(self, a: i8) -> i8 { + unsafe { vqnegb_s8(a) } + } + #[doc = "See [`arch::vqnegh_s16`]."] + #[inline(always)] + pub fn vqnegh_s16(self, a: i16) -> i16 { + unsafe { vqnegh_s16(a) } + } + #[doc = "See [`arch::vqnegs_s32`]."] + #[inline(always)] + pub fn vqnegs_s32(self, a: i32) -> i32 { + unsafe { vqnegs_s32(a) } + } + #[doc = "See [`arch::vqnegd_s64`]."] + #[inline(always)] + pub fn vqnegd_s64(self, a: i64) -> i64 { + unsafe { vqnegd_s64(a) } + } + #[doc = "See [`arch::vqrdmulhh_lane_s16`]."] + #[inline(always)] + pub fn vqrdmulhh_lane_s16(self, a: i16, b: int16x4_t) -> i16 { + unsafe { vqrdmulhh_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqrdmulhh_laneq_s16`]."] + #[inline(always)] + pub fn vqrdmulhh_laneq_s16(self, a: i16, b: int16x8_t) -> i16 { + unsafe { vqrdmulhh_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqrdmulhs_lane_s32`]."] + #[inline(always)] + pub fn vqrdmulhs_lane_s32(self, a: i32, b: int32x2_t) -> i32 { + unsafe { vqrdmulhs_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqrdmulhs_laneq_s32`]."] + #[inline(always)] + pub fn vqrdmulhs_laneq_s32(self, a: i32, b: int32x4_t) -> i32 { + unsafe { vqrdmulhs_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqrdmulhh_s16`]."] + #[inline(always)] + pub fn vqrdmulhh_s16(self, a: i16, b: i16) -> i16 { + unsafe { vqrdmulhh_s16(a, b) } + } + #[doc = "See [`arch::vqrdmulhs_s32`]."] + #[inline(always)] + pub fn vqrdmulhs_s32(self, a: i32, b: i32) -> i32 { + unsafe { vqrdmulhs_s32(a, b) } + } + #[doc = "See [`arch::vqrshlb_s8`]."] + #[inline(always)] + pub fn vqrshlb_s8(self, a: i8, b: i8) -> i8 { + unsafe { vqrshlb_s8(a, b) } + } + #[doc = "See [`arch::vqrshlh_s16`]."] + #[inline(always)] + pub fn vqrshlh_s16(self, a: i16, b: i16) -> i16 { + unsafe { vqrshlh_s16(a, b) } + } + #[doc = "See [`arch::vqrshlb_u8`]."] + #[inline(always)] + pub fn vqrshlb_u8(self, a: u8, b: i8) -> u8 { + unsafe { vqrshlb_u8(a, b) } + } + #[doc = "See [`arch::vqrshlh_u16`]."] + #[inline(always)] + pub fn vqrshlh_u16(self, a: u16, b: i16) -> u16 { + unsafe { vqrshlh_u16(a, b) } + } + #[doc = "See [`arch::vqrshld_s64`]."] + #[inline(always)] + pub fn vqrshld_s64(self, a: i64, b: i64) -> i64 { + unsafe { vqrshld_s64(a, b) } + } + #[doc = "See [`arch::vqrshls_s32`]."] + #[inline(always)] + pub fn vqrshls_s32(self, a: i32, b: i32) -> i32 { + unsafe { vqrshls_s32(a, b) } + } + #[doc = "See [`arch::vqrshls_u32`]."] + #[inline(always)] + pub fn vqrshls_u32(self, a: u32, b: i32) -> u32 { + unsafe { vqrshls_u32(a, b) } + } + #[doc = "See [`arch::vqrshld_u64`]."] + #[inline(always)] + pub fn vqrshld_u64(self, a: u64, b: i64) -> u64 { + unsafe { vqrshld_u64(a, b) } + } + #[doc = "See [`arch::vqrshrn_high_n_s16`]."] + #[inline(always)] + pub fn vqrshrn_high_n_s16(self, a: int8x8_t, b: int16x8_t) -> int8x16_t { + unsafe { vqrshrn_high_n_s16::(a, b) } + } + #[doc = "See [`arch::vqrshrn_high_n_s32`]."] + #[inline(always)] + pub fn vqrshrn_high_n_s32(self, a: int16x4_t, b: int32x4_t) -> int16x8_t { + unsafe { vqrshrn_high_n_s32::(a, b) } + } + #[doc = "See [`arch::vqrshrn_high_n_s64`]."] + #[inline(always)] + pub fn vqrshrn_high_n_s64(self, a: int32x2_t, b: int64x2_t) -> int32x4_t { + unsafe { vqrshrn_high_n_s64::(a, b) } + } + #[doc = "See [`arch::vqrshrn_high_n_u16`]."] + #[inline(always)] + pub fn vqrshrn_high_n_u16(self, a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { vqrshrn_high_n_u16::(a, b) } + } + #[doc = "See [`arch::vqrshrn_high_n_u32`]."] + #[inline(always)] + pub fn vqrshrn_high_n_u32(self, a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { vqrshrn_high_n_u32::(a, b) } + } + #[doc = "See [`arch::vqrshrn_high_n_u64`]."] + #[inline(always)] + pub fn vqrshrn_high_n_u64(self, a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { vqrshrn_high_n_u64::(a, b) } + } + #[doc = "See [`arch::vqrshrnd_n_u64`]."] + #[inline(always)] + pub fn vqrshrnd_n_u64(self, a: u64) -> u32 { + unsafe { vqrshrnd_n_u64::(a) } + } + #[doc = "See [`arch::vqrshrnh_n_u16`]."] + #[inline(always)] + pub fn vqrshrnh_n_u16(self, a: u16) -> u8 { + unsafe { vqrshrnh_n_u16::(a) } + } + #[doc = "See [`arch::vqrshrns_n_u32`]."] + #[inline(always)] + pub fn vqrshrns_n_u32(self, a: u32) -> u16 { + unsafe { vqrshrns_n_u32::(a) } + } + #[doc = "See [`arch::vqrshrnh_n_s16`]."] + #[inline(always)] + pub fn vqrshrnh_n_s16(self, a: i16) -> i8 { + unsafe { vqrshrnh_n_s16::(a) } + } + #[doc = "See [`arch::vqrshrns_n_s32`]."] + #[inline(always)] + pub fn vqrshrns_n_s32(self, a: i32) -> i16 { + unsafe { vqrshrns_n_s32::(a) } + } + #[doc = "See [`arch::vqrshrnd_n_s64`]."] + #[inline(always)] + pub fn vqrshrnd_n_s64(self, a: i64) -> i32 { + unsafe { vqrshrnd_n_s64::(a) } + } + #[doc = "See [`arch::vqrshrun_high_n_s16`]."] + #[inline(always)] + pub fn vqrshrun_high_n_s16(self, a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + unsafe { vqrshrun_high_n_s16::(a, b) } + } + #[doc = "See [`arch::vqrshrun_high_n_s32`]."] + #[inline(always)] + pub fn vqrshrun_high_n_s32(self, a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + unsafe { vqrshrun_high_n_s32::(a, b) } + } + #[doc = "See [`arch::vqrshrun_high_n_s64`]."] + #[inline(always)] + pub fn vqrshrun_high_n_s64(self, a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + unsafe { vqrshrun_high_n_s64::(a, b) } + } + #[doc = "See [`arch::vqrshrund_n_s64`]."] + #[inline(always)] + pub fn vqrshrund_n_s64(self, a: i64) -> u32 { + unsafe { vqrshrund_n_s64::(a) } + } + #[doc = "See [`arch::vqrshrunh_n_s16`]."] + #[inline(always)] + pub fn vqrshrunh_n_s16(self, a: i16) -> u8 { + unsafe { vqrshrunh_n_s16::(a) } + } + #[doc = "See [`arch::vqrshruns_n_s32`]."] + #[inline(always)] + pub fn vqrshruns_n_s32(self, a: i32) -> u16 { + unsafe { vqrshruns_n_s32::(a) } + } + #[doc = "See [`arch::vqshlb_n_s8`]."] + #[inline(always)] + pub fn vqshlb_n_s8(self, a: i8) -> i8 { + unsafe { vqshlb_n_s8::(a) } + } + #[doc = "See [`arch::vqshld_n_s64`]."] + #[inline(always)] + pub fn vqshld_n_s64(self, a: i64) -> i64 { + unsafe { vqshld_n_s64::(a) } + } + #[doc = "See [`arch::vqshlh_n_s16`]."] + #[inline(always)] + pub fn vqshlh_n_s16(self, a: i16) -> i16 { + unsafe { vqshlh_n_s16::(a) } + } + #[doc = "See [`arch::vqshls_n_s32`]."] + #[inline(always)] + pub fn vqshls_n_s32(self, a: i32) -> i32 { + unsafe { vqshls_n_s32::(a) } + } + #[doc = "See [`arch::vqshlb_n_u8`]."] + #[inline(always)] + pub fn vqshlb_n_u8(self, a: u8) -> u8 { + unsafe { vqshlb_n_u8::(a) } + } + #[doc = "See [`arch::vqshld_n_u64`]."] + #[inline(always)] + pub fn vqshld_n_u64(self, a: u64) -> u64 { + unsafe { vqshld_n_u64::(a) } + } + #[doc = "See [`arch::vqshlh_n_u16`]."] + #[inline(always)] + pub fn vqshlh_n_u16(self, a: u16) -> u16 { + unsafe { vqshlh_n_u16::(a) } + } + #[doc = "See [`arch::vqshls_n_u32`]."] + #[inline(always)] + pub fn vqshls_n_u32(self, a: u32) -> u32 { + unsafe { vqshls_n_u32::(a) } + } + #[doc = "See [`arch::vqshlb_s8`]."] + #[inline(always)] + pub fn vqshlb_s8(self, a: i8, b: i8) -> i8 { + unsafe { vqshlb_s8(a, b) } + } + #[doc = "See [`arch::vqshlh_s16`]."] + #[inline(always)] + pub fn vqshlh_s16(self, a: i16, b: i16) -> i16 { + unsafe { vqshlh_s16(a, b) } + } + #[doc = "See [`arch::vqshls_s32`]."] + #[inline(always)] + pub fn vqshls_s32(self, a: i32, b: i32) -> i32 { + unsafe { vqshls_s32(a, b) } + } + #[doc = "See [`arch::vqshlb_u8`]."] + #[inline(always)] + pub fn vqshlb_u8(self, a: u8, b: i8) -> u8 { + unsafe { vqshlb_u8(a, b) } + } + #[doc = "See [`arch::vqshlh_u16`]."] + #[inline(always)] + pub fn vqshlh_u16(self, a: u16, b: i16) -> u16 { + unsafe { vqshlh_u16(a, b) } + } + #[doc = "See [`arch::vqshls_u32`]."] + #[inline(always)] + pub fn vqshls_u32(self, a: u32, b: i32) -> u32 { + unsafe { vqshls_u32(a, b) } + } + #[doc = "See [`arch::vqshld_s64`]."] + #[inline(always)] + pub fn vqshld_s64(self, a: i64, b: i64) -> i64 { + unsafe { vqshld_s64(a, b) } + } + #[doc = "See [`arch::vqshld_u64`]."] + #[inline(always)] + pub fn vqshld_u64(self, a: u64, b: i64) -> u64 { + unsafe { vqshld_u64(a, b) } + } + #[doc = "See [`arch::vqshlub_n_s8`]."] + #[inline(always)] + pub fn vqshlub_n_s8(self, a: i8) -> u8 { + unsafe { vqshlub_n_s8::(a) } + } + #[doc = "See [`arch::vqshlud_n_s64`]."] + #[inline(always)] + pub fn vqshlud_n_s64(self, a: i64) -> u64 { + unsafe { vqshlud_n_s64::(a) } + } + #[doc = "See [`arch::vqshluh_n_s16`]."] + #[inline(always)] + pub fn vqshluh_n_s16(self, a: i16) -> u16 { + unsafe { vqshluh_n_s16::(a) } + } + #[doc = "See [`arch::vqshlus_n_s32`]."] + #[inline(always)] + pub fn vqshlus_n_s32(self, a: i32) -> u32 { + unsafe { vqshlus_n_s32::(a) } + } + #[doc = "See [`arch::vqshrn_high_n_s16`]."] + #[inline(always)] + pub fn vqshrn_high_n_s16(self, a: int8x8_t, b: int16x8_t) -> int8x16_t { + unsafe { vqshrn_high_n_s16::(a, b) } + } + #[doc = "See [`arch::vqshrn_high_n_s32`]."] + #[inline(always)] + pub fn vqshrn_high_n_s32(self, a: int16x4_t, b: int32x4_t) -> int16x8_t { + unsafe { vqshrn_high_n_s32::(a, b) } + } + #[doc = "See [`arch::vqshrn_high_n_s64`]."] + #[inline(always)] + pub fn vqshrn_high_n_s64(self, a: int32x2_t, b: int64x2_t) -> int32x4_t { + unsafe { vqshrn_high_n_s64::(a, b) } + } + #[doc = "See [`arch::vqshrn_high_n_u16`]."] + #[inline(always)] + pub fn vqshrn_high_n_u16(self, a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { vqshrn_high_n_u16::(a, b) } + } + #[doc = "See [`arch::vqshrn_high_n_u32`]."] + #[inline(always)] + pub fn vqshrn_high_n_u32(self, a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { vqshrn_high_n_u32::(a, b) } + } + #[doc = "See [`arch::vqshrn_high_n_u64`]."] + #[inline(always)] + pub fn vqshrn_high_n_u64(self, a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { vqshrn_high_n_u64::(a, b) } + } + #[doc = "See [`arch::vqshrnd_n_s64`]."] + #[inline(always)] + pub fn vqshrnd_n_s64(self, a: i64) -> i32 { + unsafe { vqshrnd_n_s64::(a) } + } + #[doc = "See [`arch::vqshrnd_n_u64`]."] + #[inline(always)] + pub fn vqshrnd_n_u64(self, a: u64) -> u32 { + unsafe { vqshrnd_n_u64::(a) } + } + #[doc = "See [`arch::vqshrnh_n_s16`]."] + #[inline(always)] + pub fn vqshrnh_n_s16(self, a: i16) -> i8 { + unsafe { vqshrnh_n_s16::(a) } + } + #[doc = "See [`arch::vqshrns_n_s32`]."] + #[inline(always)] + pub fn vqshrns_n_s32(self, a: i32) -> i16 { + unsafe { vqshrns_n_s32::(a) } + } + #[doc = "See [`arch::vqshrnh_n_u16`]."] + #[inline(always)] + pub fn vqshrnh_n_u16(self, a: u16) -> u8 { + unsafe { vqshrnh_n_u16::(a) } + } + #[doc = "See [`arch::vqshrns_n_u32`]."] + #[inline(always)] + pub fn vqshrns_n_u32(self, a: u32) -> u16 { + unsafe { vqshrns_n_u32::(a) } + } + #[doc = "See [`arch::vqshrun_high_n_s16`]."] + #[inline(always)] + pub fn vqshrun_high_n_s16(self, a: uint8x8_t, b: int16x8_t) -> uint8x16_t { + unsafe { vqshrun_high_n_s16::(a, b) } + } + #[doc = "See [`arch::vqshrun_high_n_s32`]."] + #[inline(always)] + pub fn vqshrun_high_n_s32(self, a: uint16x4_t, b: int32x4_t) -> uint16x8_t { + unsafe { vqshrun_high_n_s32::(a, b) } + } + #[doc = "See [`arch::vqshrun_high_n_s64`]."] + #[inline(always)] + pub fn vqshrun_high_n_s64(self, a: uint32x2_t, b: int64x2_t) -> uint32x4_t { + unsafe { vqshrun_high_n_s64::(a, b) } + } + #[doc = "See [`arch::vqshrund_n_s64`]."] + #[inline(always)] + pub fn vqshrund_n_s64(self, a: i64) -> u32 { + unsafe { vqshrund_n_s64::(a) } + } + #[doc = "See [`arch::vqshrunh_n_s16`]."] + #[inline(always)] + pub fn vqshrunh_n_s16(self, a: i16) -> u8 { + unsafe { vqshrunh_n_s16::(a) } + } + #[doc = "See [`arch::vqshruns_n_s32`]."] + #[inline(always)] + pub fn vqshruns_n_s32(self, a: i32) -> u16 { + unsafe { vqshruns_n_s32::(a) } + } + #[doc = "See [`arch::vqsubb_s8`]."] + #[inline(always)] + pub fn vqsubb_s8(self, a: i8, b: i8) -> i8 { + unsafe { vqsubb_s8(a, b) } + } + #[doc = "See [`arch::vqsubh_s16`]."] + #[inline(always)] + pub fn vqsubh_s16(self, a: i16, b: i16) -> i16 { + unsafe { vqsubh_s16(a, b) } + } + #[doc = "See [`arch::vqsubb_u8`]."] + #[inline(always)] + pub fn vqsubb_u8(self, a: u8, b: u8) -> u8 { + unsafe { vqsubb_u8(a, b) } + } + #[doc = "See [`arch::vqsubh_u16`]."] + #[inline(always)] + pub fn vqsubh_u16(self, a: u16, b: u16) -> u16 { + unsafe { vqsubh_u16(a, b) } + } + #[doc = "See [`arch::vqsubs_s32`]."] + #[inline(always)] + pub fn vqsubs_s32(self, a: i32, b: i32) -> i32 { + unsafe { vqsubs_s32(a, b) } + } + #[doc = "See [`arch::vqsubd_s64`]."] + #[inline(always)] + pub fn vqsubd_s64(self, a: i64, b: i64) -> i64 { + unsafe { vqsubd_s64(a, b) } + } + #[doc = "See [`arch::vqsubs_u32`]."] + #[inline(always)] + pub fn vqsubs_u32(self, a: u32, b: u32) -> u32 { + unsafe { vqsubs_u32(a, b) } + } + #[doc = "See [`arch::vqsubd_u64`]."] + #[inline(always)] + pub fn vqsubd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vqsubd_u64(a, b) } + } + #[doc = "See [`arch::vqtbl1_s8`]."] + #[inline(always)] + pub fn vqtbl1_s8(self, a: int8x16_t, b: uint8x8_t) -> int8x8_t { + unsafe { vqtbl1_s8(a, b) } + } + #[doc = "See [`arch::vqtbl1q_s8`]."] + #[inline(always)] + pub fn vqtbl1q_s8(self, a: int8x16_t, b: uint8x16_t) -> int8x16_t { + unsafe { vqtbl1q_s8(a, b) } + } + #[doc = "See [`arch::vqtbl1_u8`]."] + #[inline(always)] + pub fn vqtbl1_u8(self, a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vqtbl1_u8(a, b) } + } + #[doc = "See [`arch::vqtbl1q_u8`]."] + #[inline(always)] + pub fn vqtbl1q_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vqtbl1q_u8(a, b) } + } + #[doc = "See [`arch::vqtbl1_p8`]."] + #[inline(always)] + pub fn vqtbl1_p8(self, a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vqtbl1_p8(a, b) } + } + #[doc = "See [`arch::vqtbl1q_p8`]."] + #[inline(always)] + pub fn vqtbl1q_p8(self, a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { + unsafe { vqtbl1q_p8(a, b) } + } + #[doc = "See [`arch::vqtbl2_s8`]."] + #[inline(always)] + pub fn vqtbl2_s8(self, a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { + unsafe { vqtbl2_s8(a, b) } + } + #[doc = "See [`arch::vqtbl2q_s8`]."] + #[inline(always)] + pub fn vqtbl2q_s8(self, a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { + unsafe { vqtbl2q_s8(a, b) } + } + #[doc = "See [`arch::vqtbl2_u8`]."] + #[inline(always)] + pub fn vqtbl2_u8(self, a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vqtbl2_u8(a, b) } + } + #[doc = "See [`arch::vqtbl2q_u8`]."] + #[inline(always)] + pub fn vqtbl2q_u8(self, a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vqtbl2q_u8(a, b) } + } + #[doc = "See [`arch::vqtbl2_p8`]."] + #[inline(always)] + pub fn vqtbl2_p8(self, a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vqtbl2_p8(a, b) } + } + #[doc = "See [`arch::vqtbl2q_p8`]."] + #[inline(always)] + pub fn vqtbl2q_p8(self, a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { + unsafe { vqtbl2q_p8(a, b) } + } + #[doc = "See [`arch::vqtbl3_s8`]."] + #[inline(always)] + pub fn vqtbl3_s8(self, a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { + unsafe { vqtbl3_s8(a, b) } + } + #[doc = "See [`arch::vqtbl3q_s8`]."] + #[inline(always)] + pub fn vqtbl3q_s8(self, a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { + unsafe { vqtbl3q_s8(a, b) } + } + #[doc = "See [`arch::vqtbl3_u8`]."] + #[inline(always)] + pub fn vqtbl3_u8(self, a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vqtbl3_u8(a, b) } + } + #[doc = "See [`arch::vqtbl3q_u8`]."] + #[inline(always)] + pub fn vqtbl3q_u8(self, a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vqtbl3q_u8(a, b) } + } + #[doc = "See [`arch::vqtbl3_p8`]."] + #[inline(always)] + pub fn vqtbl3_p8(self, a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vqtbl3_p8(a, b) } + } + #[doc = "See [`arch::vqtbl3q_p8`]."] + #[inline(always)] + pub fn vqtbl3q_p8(self, a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { + unsafe { vqtbl3q_p8(a, b) } + } + #[doc = "See [`arch::vqtbl4_s8`]."] + #[inline(always)] + pub fn vqtbl4_s8(self, a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { + unsafe { vqtbl4_s8(a, b) } + } + #[doc = "See [`arch::vqtbl4q_s8`]."] + #[inline(always)] + pub fn vqtbl4q_s8(self, a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { + unsafe { vqtbl4q_s8(a, b) } + } + #[doc = "See [`arch::vqtbl4_u8`]."] + #[inline(always)] + pub fn vqtbl4_u8(self, a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vqtbl4_u8(a, b) } + } + #[doc = "See [`arch::vqtbl4q_u8`]."] + #[inline(always)] + pub fn vqtbl4q_u8(self, a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vqtbl4q_u8(a, b) } + } + #[doc = "See [`arch::vqtbl4_p8`]."] + #[inline(always)] + pub fn vqtbl4_p8(self, a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vqtbl4_p8(a, b) } + } + #[doc = "See [`arch::vqtbl4q_p8`]."] + #[inline(always)] + pub fn vqtbl4q_p8(self, a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { + unsafe { vqtbl4q_p8(a, b) } + } + #[doc = "See [`arch::vqtbx1_s8`]."] + #[inline(always)] + pub fn vqtbx1_s8(self, a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { + unsafe { vqtbx1_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx1q_s8`]."] + #[inline(always)] + pub fn vqtbx1q_s8(self, a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { + unsafe { vqtbx1q_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx1_u8`]."] + #[inline(always)] + pub fn vqtbx1_u8(self, a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vqtbx1_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx1q_u8`]."] + #[inline(always)] + pub fn vqtbx1q_u8(self, a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vqtbx1q_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx1_p8`]."] + #[inline(always)] + pub fn vqtbx1_p8(self, a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vqtbx1_p8(a, b, c) } + } + #[doc = "See [`arch::vqtbx1q_p8`]."] + #[inline(always)] + pub fn vqtbx1q_p8(self, a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { + unsafe { vqtbx1q_p8(a, b, c) } + } + #[doc = "See [`arch::vqtbx2_s8`]."] + #[inline(always)] + pub fn vqtbx2_s8(self, a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { + unsafe { vqtbx2_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx2q_s8`]."] + #[inline(always)] + pub fn vqtbx2q_s8(self, a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { + unsafe { vqtbx2q_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx2_u8`]."] + #[inline(always)] + pub fn vqtbx2_u8(self, a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vqtbx2_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx2q_u8`]."] + #[inline(always)] + pub fn vqtbx2q_u8(self, a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vqtbx2q_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx2_p8`]."] + #[inline(always)] + pub fn vqtbx2_p8(self, a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vqtbx2_p8(a, b, c) } + } + #[doc = "See [`arch::vqtbx2q_p8`]."] + #[inline(always)] + pub fn vqtbx2q_p8(self, a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { + unsafe { vqtbx2q_p8(a, b, c) } + } + #[doc = "See [`arch::vqtbx3_s8`]."] + #[inline(always)] + pub fn vqtbx3_s8(self, a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { + unsafe { vqtbx3_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx3q_s8`]."] + #[inline(always)] + pub fn vqtbx3q_s8(self, a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { + unsafe { vqtbx3q_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx3_u8`]."] + #[inline(always)] + pub fn vqtbx3_u8(self, a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vqtbx3_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx3q_u8`]."] + #[inline(always)] + pub fn vqtbx3q_u8(self, a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vqtbx3q_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx3_p8`]."] + #[inline(always)] + pub fn vqtbx3_p8(self, a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vqtbx3_p8(a, b, c) } + } + #[doc = "See [`arch::vqtbx3q_p8`]."] + #[inline(always)] + pub fn vqtbx3q_p8(self, a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { + unsafe { vqtbx3q_p8(a, b, c) } + } + #[doc = "See [`arch::vqtbx4_s8`]."] + #[inline(always)] + pub fn vqtbx4_s8(self, a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { + unsafe { vqtbx4_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx4q_s8`]."] + #[inline(always)] + pub fn vqtbx4q_s8(self, a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { + unsafe { vqtbx4q_s8(a, b, c) } + } + #[doc = "See [`arch::vqtbx4_u8`]."] + #[inline(always)] + pub fn vqtbx4_u8(self, a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vqtbx4_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx4q_u8`]."] + #[inline(always)] + pub fn vqtbx4q_u8(self, a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vqtbx4q_u8(a, b, c) } + } + #[doc = "See [`arch::vqtbx4_p8`]."] + #[inline(always)] + pub fn vqtbx4_p8(self, a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vqtbx4_p8(a, b, c) } + } + #[doc = "See [`arch::vqtbx4q_p8`]."] + #[inline(always)] + pub fn vqtbx4q_p8(self, a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { + unsafe { vqtbx4q_p8(a, b, c) } + } + #[doc = "See [`arch::vrax1q_u64`]."] + #[inline(always)] + pub fn vrax1q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vrax1q_u64(a, b) } + } + #[doc = "See [`arch::vrbit_s8`]."] + #[inline(always)] + pub fn vrbit_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vrbit_s8(a) } + } + #[doc = "See [`arch::vrbitq_s8`]."] + #[inline(always)] + pub fn vrbitq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vrbitq_s8(a) } + } + #[doc = "See [`arch::vrbit_u8`]."] + #[inline(always)] + pub fn vrbit_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vrbit_u8(a) } + } + #[doc = "See [`arch::vrbitq_u8`]."] + #[inline(always)] + pub fn vrbitq_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vrbitq_u8(a) } + } + #[doc = "See [`arch::vrbit_p8`]."] + #[inline(always)] + pub fn vrbit_p8(self, a: poly8x8_t) -> poly8x8_t { + unsafe { vrbit_p8(a) } + } + #[doc = "See [`arch::vrbitq_p8`]."] + #[inline(always)] + pub fn vrbitq_p8(self, a: poly8x16_t) -> poly8x16_t { + unsafe { vrbitq_p8(a) } + } + #[doc = "See [`arch::vrecpe_f64`]."] + #[inline(always)] + pub fn vrecpe_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrecpe_f64(a) } + } + #[doc = "See [`arch::vrecpeq_f64`]."] + #[inline(always)] + pub fn vrecpeq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrecpeq_f64(a) } + } + #[doc = "See [`arch::vrecped_f64`]."] + #[inline(always)] + pub fn vrecped_f64(self, a: f64) -> f64 { + unsafe { vrecped_f64(a) } + } + #[doc = "See [`arch::vrecpes_f32`]."] + #[inline(always)] + pub fn vrecpes_f32(self, a: f32) -> f32 { + unsafe { vrecpes_f32(a) } + } + #[doc = "See [`arch::vrecps_f64`]."] + #[inline(always)] + pub fn vrecps_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vrecps_f64(a, b) } + } + #[doc = "See [`arch::vrecpsq_f64`]."] + #[inline(always)] + pub fn vrecpsq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vrecpsq_f64(a, b) } + } + #[doc = "See [`arch::vrecpsd_f64`]."] + #[inline(always)] + pub fn vrecpsd_f64(self, a: f64, b: f64) -> f64 { + unsafe { vrecpsd_f64(a, b) } + } + #[doc = "See [`arch::vrecpss_f32`]."] + #[inline(always)] + pub fn vrecpss_f32(self, a: f32, b: f32) -> f32 { + unsafe { vrecpss_f32(a, b) } + } + #[doc = "See [`arch::vrecpxd_f64`]."] + #[inline(always)] + pub fn vrecpxd_f64(self, a: f64) -> f64 { + unsafe { vrecpxd_f64(a) } + } + #[doc = "See [`arch::vrecpxs_f32`]."] + #[inline(always)] + pub fn vrecpxs_f32(self, a: f32) -> f32 { + unsafe { vrecpxs_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_p128`]."] + #[inline(always)] + pub fn vreinterpretq_f64_p128(self, a: p128) -> float64x2_t { + unsafe { vreinterpretq_f64_p128(a) } + } + #[doc = "See [`arch::vreinterpret_f64_f32`]."] + #[inline(always)] + pub fn vreinterpret_f64_f32(self, a: float32x2_t) -> float64x1_t { + unsafe { vreinterpret_f64_f32(a) } + } + #[doc = "See [`arch::vreinterpret_p64_f32`]."] + #[inline(always)] + pub fn vreinterpret_p64_f32(self, a: float32x2_t) -> poly64x1_t { + unsafe { vreinterpret_p64_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_f32`]."] + #[inline(always)] + pub fn vreinterpretq_f64_f32(self, a: float32x4_t) -> float64x2_t { + unsafe { vreinterpretq_f64_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_f32`]."] + #[inline(always)] + pub fn vreinterpretq_p64_f32(self, a: float32x4_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_f32(a) } + } + #[doc = "See [`arch::vreinterpret_f32_f64`]."] + #[inline(always)] + pub fn vreinterpret_f32_f64(self, a: float64x1_t) -> float32x2_t { + unsafe { vreinterpret_f32_f64(a) } + } + #[doc = "See [`arch::vreinterpret_s8_f64`]."] + #[inline(always)] + pub fn vreinterpret_s8_f64(self, a: float64x1_t) -> int8x8_t { + unsafe { vreinterpret_s8_f64(a) } + } + #[doc = "See [`arch::vreinterpret_s16_f64`]."] + #[inline(always)] + pub fn vreinterpret_s16_f64(self, a: float64x1_t) -> int16x4_t { + unsafe { vreinterpret_s16_f64(a) } + } + #[doc = "See [`arch::vreinterpret_s32_f64`]."] + #[inline(always)] + pub fn vreinterpret_s32_f64(self, a: float64x1_t) -> int32x2_t { + unsafe { vreinterpret_s32_f64(a) } + } + #[doc = "See [`arch::vreinterpret_s64_f64`]."] + #[inline(always)] + pub fn vreinterpret_s64_f64(self, a: float64x1_t) -> int64x1_t { + unsafe { vreinterpret_s64_f64(a) } + } + #[doc = "See [`arch::vreinterpret_u8_f64`]."] + #[inline(always)] + pub fn vreinterpret_u8_f64(self, a: float64x1_t) -> uint8x8_t { + unsafe { vreinterpret_u8_f64(a) } + } + #[doc = "See [`arch::vreinterpret_u16_f64`]."] + #[inline(always)] + pub fn vreinterpret_u16_f64(self, a: float64x1_t) -> uint16x4_t { + unsafe { vreinterpret_u16_f64(a) } + } + #[doc = "See [`arch::vreinterpret_u32_f64`]."] + #[inline(always)] + pub fn vreinterpret_u32_f64(self, a: float64x1_t) -> uint32x2_t { + unsafe { vreinterpret_u32_f64(a) } + } + #[doc = "See [`arch::vreinterpret_u64_f64`]."] + #[inline(always)] + pub fn vreinterpret_u64_f64(self, a: float64x1_t) -> uint64x1_t { + unsafe { vreinterpret_u64_f64(a) } + } + #[doc = "See [`arch::vreinterpret_p8_f64`]."] + #[inline(always)] + pub fn vreinterpret_p8_f64(self, a: float64x1_t) -> poly8x8_t { + unsafe { vreinterpret_p8_f64(a) } + } + #[doc = "See [`arch::vreinterpret_p16_f64`]."] + #[inline(always)] + pub fn vreinterpret_p16_f64(self, a: float64x1_t) -> poly16x4_t { + unsafe { vreinterpret_p16_f64(a) } + } + #[doc = "See [`arch::vreinterpret_p64_f64`]."] + #[inline(always)] + pub fn vreinterpret_p64_f64(self, a: float64x1_t) -> poly64x1_t { + unsafe { vreinterpret_p64_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_f64`]."] + #[inline(always)] + pub fn vreinterpretq_p128_f64(self, a: float64x2_t) -> p128 { + unsafe { vreinterpretq_p128_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_f64`]."] + #[inline(always)] + pub fn vreinterpretq_f32_f64(self, a: float64x2_t) -> float32x4_t { + unsafe { vreinterpretq_f32_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_f64`]."] + #[inline(always)] + pub fn vreinterpretq_s8_f64(self, a: float64x2_t) -> int8x16_t { + unsafe { vreinterpretq_s8_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_f64`]."] + #[inline(always)] + pub fn vreinterpretq_s16_f64(self, a: float64x2_t) -> int16x8_t { + unsafe { vreinterpretq_s16_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_f64`]."] + #[inline(always)] + pub fn vreinterpretq_s32_f64(self, a: float64x2_t) -> int32x4_t { + unsafe { vreinterpretq_s32_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_f64`]."] + #[inline(always)] + pub fn vreinterpretq_s64_f64(self, a: float64x2_t) -> int64x2_t { + unsafe { vreinterpretq_s64_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_f64`]."] + #[inline(always)] + pub fn vreinterpretq_u8_f64(self, a: float64x2_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_f64`]."] + #[inline(always)] + pub fn vreinterpretq_u16_f64(self, a: float64x2_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_f64`]."] + #[inline(always)] + pub fn vreinterpretq_u32_f64(self, a: float64x2_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_f64`]."] + #[inline(always)] + pub fn vreinterpretq_u64_f64(self, a: float64x2_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_f64`]."] + #[inline(always)] + pub fn vreinterpretq_p8_f64(self, a: float64x2_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_f64`]."] + #[inline(always)] + pub fn vreinterpretq_p16_f64(self, a: float64x2_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_f64(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_f64`]."] + #[inline(always)] + pub fn vreinterpretq_p64_f64(self, a: float64x2_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_f64(a) } + } + #[doc = "See [`arch::vreinterpret_f64_s8`]."] + #[inline(always)] + pub fn vreinterpret_f64_s8(self, a: int8x8_t) -> float64x1_t { + unsafe { vreinterpret_f64_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_s8`]."] + #[inline(always)] + pub fn vreinterpretq_f64_s8(self, a: int8x16_t) -> float64x2_t { + unsafe { vreinterpretq_f64_s8(a) } + } + #[doc = "See [`arch::vreinterpret_f64_s16`]."] + #[inline(always)] + pub fn vreinterpret_f64_s16(self, a: int16x4_t) -> float64x1_t { + unsafe { vreinterpret_f64_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_s16`]."] + #[inline(always)] + pub fn vreinterpretq_f64_s16(self, a: int16x8_t) -> float64x2_t { + unsafe { vreinterpretq_f64_s16(a) } + } + #[doc = "See [`arch::vreinterpret_f64_s32`]."] + #[inline(always)] + pub fn vreinterpret_f64_s32(self, a: int32x2_t) -> float64x1_t { + unsafe { vreinterpret_f64_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_s32`]."] + #[inline(always)] + pub fn vreinterpretq_f64_s32(self, a: int32x4_t) -> float64x2_t { + unsafe { vreinterpretq_f64_s32(a) } + } + #[doc = "See [`arch::vreinterpret_f64_s64`]."] + #[inline(always)] + pub fn vreinterpret_f64_s64(self, a: int64x1_t) -> float64x1_t { + unsafe { vreinterpret_f64_s64(a) } + } + #[doc = "See [`arch::vreinterpret_p64_s64`]."] + #[inline(always)] + pub fn vreinterpret_p64_s64(self, a: int64x1_t) -> poly64x1_t { + unsafe { vreinterpret_p64_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_s64`]."] + #[inline(always)] + pub fn vreinterpretq_f64_s64(self, a: int64x2_t) -> float64x2_t { + unsafe { vreinterpretq_f64_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_s64`]."] + #[inline(always)] + pub fn vreinterpretq_p64_s64(self, a: int64x2_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_s64(a) } + } + #[doc = "See [`arch::vreinterpret_f64_u8`]."] + #[inline(always)] + pub fn vreinterpret_f64_u8(self, a: uint8x8_t) -> float64x1_t { + unsafe { vreinterpret_f64_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_u8`]."] + #[inline(always)] + pub fn vreinterpretq_f64_u8(self, a: uint8x16_t) -> float64x2_t { + unsafe { vreinterpretq_f64_u8(a) } + } + #[doc = "See [`arch::vreinterpret_f64_u16`]."] + #[inline(always)] + pub fn vreinterpret_f64_u16(self, a: uint16x4_t) -> float64x1_t { + unsafe { vreinterpret_f64_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_u16`]."] + #[inline(always)] + pub fn vreinterpretq_f64_u16(self, a: uint16x8_t) -> float64x2_t { + unsafe { vreinterpretq_f64_u16(a) } + } + #[doc = "See [`arch::vreinterpret_f64_u32`]."] + #[inline(always)] + pub fn vreinterpret_f64_u32(self, a: uint32x2_t) -> float64x1_t { + unsafe { vreinterpret_f64_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_u32`]."] + #[inline(always)] + pub fn vreinterpretq_f64_u32(self, a: uint32x4_t) -> float64x2_t { + unsafe { vreinterpretq_f64_u32(a) } + } + #[doc = "See [`arch::vreinterpret_f64_u64`]."] + #[inline(always)] + pub fn vreinterpret_f64_u64(self, a: uint64x1_t) -> float64x1_t { + unsafe { vreinterpret_f64_u64(a) } + } + #[doc = "See [`arch::vreinterpret_p64_u64`]."] + #[inline(always)] + pub fn vreinterpret_p64_u64(self, a: uint64x1_t) -> poly64x1_t { + unsafe { vreinterpret_p64_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_u64`]."] + #[inline(always)] + pub fn vreinterpretq_f64_u64(self, a: uint64x2_t) -> float64x2_t { + unsafe { vreinterpretq_f64_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_u64`]."] + #[inline(always)] + pub fn vreinterpretq_p64_u64(self, a: uint64x2_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_u64(a) } + } + #[doc = "See [`arch::vreinterpret_f64_p8`]."] + #[inline(always)] + pub fn vreinterpret_f64_p8(self, a: poly8x8_t) -> float64x1_t { + unsafe { vreinterpret_f64_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_p8`]."] + #[inline(always)] + pub fn vreinterpretq_f64_p8(self, a: poly8x16_t) -> float64x2_t { + unsafe { vreinterpretq_f64_p8(a) } + } + #[doc = "See [`arch::vreinterpret_f64_p16`]."] + #[inline(always)] + pub fn vreinterpret_f64_p16(self, a: poly16x4_t) -> float64x1_t { + unsafe { vreinterpret_f64_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_p16`]."] + #[inline(always)] + pub fn vreinterpretq_f64_p16(self, a: poly16x8_t) -> float64x2_t { + unsafe { vreinterpretq_f64_p16(a) } + } + #[doc = "See [`arch::vreinterpret_f32_p64`]."] + #[inline(always)] + pub fn vreinterpret_f32_p64(self, a: poly64x1_t) -> float32x2_t { + unsafe { vreinterpret_f32_p64(a) } + } + #[doc = "See [`arch::vreinterpret_f64_p64`]."] + #[inline(always)] + pub fn vreinterpret_f64_p64(self, a: poly64x1_t) -> float64x1_t { + unsafe { vreinterpret_f64_p64(a) } + } + #[doc = "See [`arch::vreinterpret_s64_p64`]."] + #[inline(always)] + pub fn vreinterpret_s64_p64(self, a: poly64x1_t) -> int64x1_t { + unsafe { vreinterpret_s64_p64(a) } + } + #[doc = "See [`arch::vreinterpret_u64_p64`]."] + #[inline(always)] + pub fn vreinterpret_u64_p64(self, a: poly64x1_t) -> uint64x1_t { + unsafe { vreinterpret_u64_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_p64`]."] + #[inline(always)] + pub fn vreinterpretq_f32_p64(self, a: poly64x2_t) -> float32x4_t { + unsafe { vreinterpretq_f32_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_f64_p64`]."] + #[inline(always)] + pub fn vreinterpretq_f64_p64(self, a: poly64x2_t) -> float64x2_t { + unsafe { vreinterpretq_f64_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_p64`]."] + #[inline(always)] + pub fn vreinterpretq_s64_p64(self, a: poly64x2_t) -> int64x2_t { + unsafe { vreinterpretq_s64_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_p64`]."] + #[inline(always)] + pub fn vreinterpretq_u64_p64(self, a: poly64x2_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_p64(a) } + } + #[doc = "See [`arch::vrnd_f32`]."] + #[inline(always)] + pub fn vrnd_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrnd_f32(a) } + } + #[doc = "See [`arch::vrndq_f32`]."] + #[inline(always)] + pub fn vrndq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrndq_f32(a) } + } + #[doc = "See [`arch::vrnd_f64`]."] + #[inline(always)] + pub fn vrnd_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrnd_f64(a) } + } + #[doc = "See [`arch::vrndq_f64`]."] + #[inline(always)] + pub fn vrndq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrndq_f64(a) } + } + #[doc = "See [`arch::vrnda_f32`]."] + #[inline(always)] + pub fn vrnda_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrnda_f32(a) } + } + #[doc = "See [`arch::vrndaq_f32`]."] + #[inline(always)] + pub fn vrndaq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrndaq_f32(a) } + } + #[doc = "See [`arch::vrnda_f64`]."] + #[inline(always)] + pub fn vrnda_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrnda_f64(a) } + } + #[doc = "See [`arch::vrndaq_f64`]."] + #[inline(always)] + pub fn vrndaq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrndaq_f64(a) } + } + #[doc = "See [`arch::vrndi_f32`]."] + #[inline(always)] + pub fn vrndi_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrndi_f32(a) } + } + #[doc = "See [`arch::vrndiq_f32`]."] + #[inline(always)] + pub fn vrndiq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrndiq_f32(a) } + } + #[doc = "See [`arch::vrndi_f64`]."] + #[inline(always)] + pub fn vrndi_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrndi_f64(a) } + } + #[doc = "See [`arch::vrndiq_f64`]."] + #[inline(always)] + pub fn vrndiq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrndiq_f64(a) } + } + #[doc = "See [`arch::vrndm_f32`]."] + #[inline(always)] + pub fn vrndm_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrndm_f32(a) } + } + #[doc = "See [`arch::vrndmq_f32`]."] + #[inline(always)] + pub fn vrndmq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrndmq_f32(a) } + } + #[doc = "See [`arch::vrndm_f64`]."] + #[inline(always)] + pub fn vrndm_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrndm_f64(a) } + } + #[doc = "See [`arch::vrndmq_f64`]."] + #[inline(always)] + pub fn vrndmq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrndmq_f64(a) } + } + #[doc = "See [`arch::vrndn_f64`]."] + #[inline(always)] + pub fn vrndn_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrndn_f64(a) } + } + #[doc = "See [`arch::vrndnq_f64`]."] + #[inline(always)] + pub fn vrndnq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrndnq_f64(a) } + } + #[doc = "See [`arch::vrndns_f32`]."] + #[inline(always)] + pub fn vrndns_f32(self, a: f32) -> f32 { + unsafe { vrndns_f32(a) } + } + #[doc = "See [`arch::vrndp_f32`]."] + #[inline(always)] + pub fn vrndp_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrndp_f32(a) } + } + #[doc = "See [`arch::vrndpq_f32`]."] + #[inline(always)] + pub fn vrndpq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrndpq_f32(a) } + } + #[doc = "See [`arch::vrndp_f64`]."] + #[inline(always)] + pub fn vrndp_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrndp_f64(a) } + } + #[doc = "See [`arch::vrndpq_f64`]."] + #[inline(always)] + pub fn vrndpq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrndpq_f64(a) } + } + #[doc = "See [`arch::vrndx_f32`]."] + #[inline(always)] + pub fn vrndx_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrndx_f32(a) } + } + #[doc = "See [`arch::vrndxq_f32`]."] + #[inline(always)] + pub fn vrndxq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrndxq_f32(a) } + } + #[doc = "See [`arch::vrndx_f64`]."] + #[inline(always)] + pub fn vrndx_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrndx_f64(a) } + } + #[doc = "See [`arch::vrndxq_f64`]."] + #[inline(always)] + pub fn vrndxq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrndxq_f64(a) } + } + #[doc = "See [`arch::vrshld_s64`]."] + #[inline(always)] + pub fn vrshld_s64(self, a: i64, b: i64) -> i64 { + unsafe { vrshld_s64(a, b) } + } + #[doc = "See [`arch::vrshld_u64`]."] + #[inline(always)] + pub fn vrshld_u64(self, a: u64, b: i64) -> u64 { + unsafe { vrshld_u64(a, b) } + } + #[doc = "See [`arch::vrshrd_n_s64`]."] + #[inline(always)] + pub fn vrshrd_n_s64(self, a: i64) -> i64 { + unsafe { vrshrd_n_s64::(a) } + } + #[doc = "See [`arch::vrshrd_n_u64`]."] + #[inline(always)] + pub fn vrshrd_n_u64(self, a: u64) -> u64 { + unsafe { vrshrd_n_u64::(a) } + } + #[doc = "See [`arch::vrshrn_high_n_s16`]."] + #[inline(always)] + pub fn vrshrn_high_n_s16(self, a: int8x8_t, b: int16x8_t) -> int8x16_t { + unsafe { vrshrn_high_n_s16::(a, b) } + } + #[doc = "See [`arch::vrshrn_high_n_s32`]."] + #[inline(always)] + pub fn vrshrn_high_n_s32(self, a: int16x4_t, b: int32x4_t) -> int16x8_t { + unsafe { vrshrn_high_n_s32::(a, b) } + } + #[doc = "See [`arch::vrshrn_high_n_s64`]."] + #[inline(always)] + pub fn vrshrn_high_n_s64(self, a: int32x2_t, b: int64x2_t) -> int32x4_t { + unsafe { vrshrn_high_n_s64::(a, b) } + } + #[doc = "See [`arch::vrshrn_high_n_u16`]."] + #[inline(always)] + pub fn vrshrn_high_n_u16(self, a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { vrshrn_high_n_u16::(a, b) } + } + #[doc = "See [`arch::vrshrn_high_n_u32`]."] + #[inline(always)] + pub fn vrshrn_high_n_u32(self, a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { vrshrn_high_n_u32::(a, b) } + } + #[doc = "See [`arch::vrshrn_high_n_u64`]."] + #[inline(always)] + pub fn vrshrn_high_n_u64(self, a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { vrshrn_high_n_u64::(a, b) } + } + #[doc = "See [`arch::vrsqrte_f64`]."] + #[inline(always)] + pub fn vrsqrte_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vrsqrte_f64(a) } + } + #[doc = "See [`arch::vrsqrteq_f64`]."] + #[inline(always)] + pub fn vrsqrteq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vrsqrteq_f64(a) } + } + #[doc = "See [`arch::vrsqrted_f64`]."] + #[inline(always)] + pub fn vrsqrted_f64(self, a: f64) -> f64 { + unsafe { vrsqrted_f64(a) } + } + #[doc = "See [`arch::vrsqrtes_f32`]."] + #[inline(always)] + pub fn vrsqrtes_f32(self, a: f32) -> f32 { + unsafe { vrsqrtes_f32(a) } + } + #[doc = "See [`arch::vrsqrts_f64`]."] + #[inline(always)] + pub fn vrsqrts_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vrsqrts_f64(a, b) } + } + #[doc = "See [`arch::vrsqrtsq_f64`]."] + #[inline(always)] + pub fn vrsqrtsq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vrsqrtsq_f64(a, b) } + } + #[doc = "See [`arch::vrsqrtsd_f64`]."] + #[inline(always)] + pub fn vrsqrtsd_f64(self, a: f64, b: f64) -> f64 { + unsafe { vrsqrtsd_f64(a, b) } + } + #[doc = "See [`arch::vrsqrtss_f32`]."] + #[inline(always)] + pub fn vrsqrtss_f32(self, a: f32, b: f32) -> f32 { + unsafe { vrsqrtss_f32(a, b) } + } + #[doc = "See [`arch::vrsrad_n_s64`]."] + #[inline(always)] + pub fn vrsrad_n_s64(self, a: i64, b: i64) -> i64 { + unsafe { vrsrad_n_s64::(a, b) } + } + #[doc = "See [`arch::vrsrad_n_u64`]."] + #[inline(always)] + pub fn vrsrad_n_u64(self, a: u64, b: u64) -> u64 { + unsafe { vrsrad_n_u64::(a, b) } + } + #[doc = "See [`arch::vrsubhn_high_s16`]."] + #[inline(always)] + pub fn vrsubhn_high_s16(self, a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + unsafe { vrsubhn_high_s16(a, b, c) } + } + #[doc = "See [`arch::vrsubhn_high_s32`]."] + #[inline(always)] + pub fn vrsubhn_high_s32(self, a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + unsafe { vrsubhn_high_s32(a, b, c) } + } + #[doc = "See [`arch::vrsubhn_high_s64`]."] + #[inline(always)] + pub fn vrsubhn_high_s64(self, a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + unsafe { vrsubhn_high_s64(a, b, c) } + } + #[doc = "See [`arch::vrsubhn_high_u16`]."] + #[inline(always)] + pub fn vrsubhn_high_u16(self, a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + unsafe { vrsubhn_high_u16(a, b, c) } + } + #[doc = "See [`arch::vrsubhn_high_u32`]."] + #[inline(always)] + pub fn vrsubhn_high_u32(self, a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + unsafe { vrsubhn_high_u32(a, b, c) } + } + #[doc = "See [`arch::vrsubhn_high_u64`]."] + #[inline(always)] + pub fn vrsubhn_high_u64(self, a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + unsafe { vrsubhn_high_u64(a, b, c) } + } + #[doc = "See [`arch::vset_lane_f64`]."] + #[inline(always)] + pub fn vset_lane_f64(self, a: f64, b: float64x1_t) -> float64x1_t { + unsafe { vset_lane_f64::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_f64`]."] + #[inline(always)] + pub fn vsetq_lane_f64(self, a: f64, b: float64x2_t) -> float64x2_t { + unsafe { vsetq_lane_f64::(a, b) } + } + #[doc = "See [`arch::vsha512h2q_u64`]."] + #[inline(always)] + pub fn vsha512h2q_u64(self, a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe { vsha512h2q_u64(a, b, c) } + } + #[doc = "See [`arch::vsha512hq_u64`]."] + #[inline(always)] + pub fn vsha512hq_u64(self, a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe { vsha512hq_u64(a, b, c) } + } + #[doc = "See [`arch::vsha512su0q_u64`]."] + #[inline(always)] + pub fn vsha512su0q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vsha512su0q_u64(a, b) } + } + #[doc = "See [`arch::vsha512su1q_u64`]."] + #[inline(always)] + pub fn vsha512su1q_u64(self, a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe { vsha512su1q_u64(a, b, c) } + } + #[doc = "See [`arch::vshld_s64`]."] + #[inline(always)] + pub fn vshld_s64(self, a: i64, b: i64) -> i64 { + unsafe { vshld_s64(a, b) } + } + #[doc = "See [`arch::vshld_u64`]."] + #[inline(always)] + pub fn vshld_u64(self, a: u64, b: i64) -> u64 { + unsafe { vshld_u64(a, b) } + } + #[doc = "See [`arch::vshll_high_n_s8`]."] + #[inline(always)] + pub fn vshll_high_n_s8(self, a: int8x16_t) -> int16x8_t { + unsafe { vshll_high_n_s8::(a) } + } + #[doc = "See [`arch::vshll_high_n_s16`]."] + #[inline(always)] + pub fn vshll_high_n_s16(self, a: int16x8_t) -> int32x4_t { + unsafe { vshll_high_n_s16::(a) } + } + #[doc = "See [`arch::vshll_high_n_s32`]."] + #[inline(always)] + pub fn vshll_high_n_s32(self, a: int32x4_t) -> int64x2_t { + unsafe { vshll_high_n_s32::(a) } + } + #[doc = "See [`arch::vshll_high_n_u8`]."] + #[inline(always)] + pub fn vshll_high_n_u8(self, a: uint8x16_t) -> uint16x8_t { + unsafe { vshll_high_n_u8::(a) } + } + #[doc = "See [`arch::vshll_high_n_u16`]."] + #[inline(always)] + pub fn vshll_high_n_u16(self, a: uint16x8_t) -> uint32x4_t { + unsafe { vshll_high_n_u16::(a) } + } + #[doc = "See [`arch::vshll_high_n_u32`]."] + #[inline(always)] + pub fn vshll_high_n_u32(self, a: uint32x4_t) -> uint64x2_t { + unsafe { vshll_high_n_u32::(a) } + } + #[doc = "See [`arch::vshrn_high_n_s16`]."] + #[inline(always)] + pub fn vshrn_high_n_s16(self, a: int8x8_t, b: int16x8_t) -> int8x16_t { + unsafe { vshrn_high_n_s16::(a, b) } + } + #[doc = "See [`arch::vshrn_high_n_s32`]."] + #[inline(always)] + pub fn vshrn_high_n_s32(self, a: int16x4_t, b: int32x4_t) -> int16x8_t { + unsafe { vshrn_high_n_s32::(a, b) } + } + #[doc = "See [`arch::vshrn_high_n_s64`]."] + #[inline(always)] + pub fn vshrn_high_n_s64(self, a: int32x2_t, b: int64x2_t) -> int32x4_t { + unsafe { vshrn_high_n_s64::(a, b) } + } + #[doc = "See [`arch::vshrn_high_n_u16`]."] + #[inline(always)] + pub fn vshrn_high_n_u16(self, a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { vshrn_high_n_u16::(a, b) } + } + #[doc = "See [`arch::vshrn_high_n_u32`]."] + #[inline(always)] + pub fn vshrn_high_n_u32(self, a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { vshrn_high_n_u32::(a, b) } + } + #[doc = "See [`arch::vshrn_high_n_u64`]."] + #[inline(always)] + pub fn vshrn_high_n_u64(self, a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { vshrn_high_n_u64::(a, b) } + } + #[doc = "See [`arch::vsli_n_s8`]."] + #[inline(always)] + pub fn vsli_n_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vsli_n_s8::(a, b) } + } + #[doc = "See [`arch::vsliq_n_s8`]."] + #[inline(always)] + pub fn vsliq_n_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vsliq_n_s8::(a, b) } + } + #[doc = "See [`arch::vsli_n_s16`]."] + #[inline(always)] + pub fn vsli_n_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vsli_n_s16::(a, b) } + } + #[doc = "See [`arch::vsliq_n_s16`]."] + #[inline(always)] + pub fn vsliq_n_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vsliq_n_s16::(a, b) } + } + #[doc = "See [`arch::vsli_n_s32`]."] + #[inline(always)] + pub fn vsli_n_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vsli_n_s32::(a, b) } + } + #[doc = "See [`arch::vsliq_n_s32`]."] + #[inline(always)] + pub fn vsliq_n_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vsliq_n_s32::(a, b) } + } + #[doc = "See [`arch::vsli_n_s64`]."] + #[inline(always)] + pub fn vsli_n_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vsli_n_s64::(a, b) } + } + #[doc = "See [`arch::vsliq_n_s64`]."] + #[inline(always)] + pub fn vsliq_n_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vsliq_n_s64::(a, b) } + } + #[doc = "See [`arch::vsli_n_u8`]."] + #[inline(always)] + pub fn vsli_n_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vsli_n_u8::(a, b) } + } + #[doc = "See [`arch::vsliq_n_u8`]."] + #[inline(always)] + pub fn vsliq_n_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vsliq_n_u8::(a, b) } + } + #[doc = "See [`arch::vsli_n_u16`]."] + #[inline(always)] + pub fn vsli_n_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vsli_n_u16::(a, b) } + } + #[doc = "See [`arch::vsliq_n_u16`]."] + #[inline(always)] + pub fn vsliq_n_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vsliq_n_u16::(a, b) } + } + #[doc = "See [`arch::vsli_n_u32`]."] + #[inline(always)] + pub fn vsli_n_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vsli_n_u32::(a, b) } + } + #[doc = "See [`arch::vsliq_n_u32`]."] + #[inline(always)] + pub fn vsliq_n_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vsliq_n_u32::(a, b) } + } + #[doc = "See [`arch::vsli_n_u64`]."] + #[inline(always)] + pub fn vsli_n_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vsli_n_u64::(a, b) } + } + #[doc = "See [`arch::vsliq_n_u64`]."] + #[inline(always)] + pub fn vsliq_n_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vsliq_n_u64::(a, b) } + } + #[doc = "See [`arch::vsli_n_p8`]."] + #[inline(always)] + pub fn vsli_n_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vsli_n_p8::(a, b) } + } + #[doc = "See [`arch::vsliq_n_p8`]."] + #[inline(always)] + pub fn vsliq_n_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vsliq_n_p8::(a, b) } + } + #[doc = "See [`arch::vsli_n_p16`]."] + #[inline(always)] + pub fn vsli_n_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vsli_n_p16::(a, b) } + } + #[doc = "See [`arch::vsliq_n_p16`]."] + #[inline(always)] + pub fn vsliq_n_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vsliq_n_p16::(a, b) } + } + #[doc = "See [`arch::vsli_n_p64`]."] + #[inline(always)] + pub fn vsli_n_p64(self, a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + unsafe { vsli_n_p64::(a, b) } + } + #[doc = "See [`arch::vsliq_n_p64`]."] + #[inline(always)] + pub fn vsliq_n_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vsliq_n_p64::(a, b) } + } + #[doc = "See [`arch::vslid_n_s64`]."] + #[inline(always)] + pub fn vslid_n_s64(self, a: i64, b: i64) -> i64 { + unsafe { vslid_n_s64::(a, b) } + } + #[doc = "See [`arch::vslid_n_u64`]."] + #[inline(always)] + pub fn vslid_n_u64(self, a: u64, b: u64) -> u64 { + unsafe { vslid_n_u64::(a, b) } + } + #[doc = "See [`arch::vsqadd_u8`]."] + #[inline(always)] + pub fn vsqadd_u8(self, a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vsqadd_u8(a, b) } + } + #[doc = "See [`arch::vsqaddq_u8`]."] + #[inline(always)] + pub fn vsqaddq_u8(self, a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vsqaddq_u8(a, b) } + } + #[doc = "See [`arch::vsqadd_u16`]."] + #[inline(always)] + pub fn vsqadd_u16(self, a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vsqadd_u16(a, b) } + } + #[doc = "See [`arch::vsqaddq_u16`]."] + #[inline(always)] + pub fn vsqaddq_u16(self, a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vsqaddq_u16(a, b) } + } + #[doc = "See [`arch::vsqadd_u32`]."] + #[inline(always)] + pub fn vsqadd_u32(self, a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vsqadd_u32(a, b) } + } + #[doc = "See [`arch::vsqaddq_u32`]."] + #[inline(always)] + pub fn vsqaddq_u32(self, a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vsqaddq_u32(a, b) } + } + #[doc = "See [`arch::vsqadd_u64`]."] + #[inline(always)] + pub fn vsqadd_u64(self, a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vsqadd_u64(a, b) } + } + #[doc = "See [`arch::vsqaddq_u64`]."] + #[inline(always)] + pub fn vsqaddq_u64(self, a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vsqaddq_u64(a, b) } + } + #[doc = "See [`arch::vsqaddb_u8`]."] + #[inline(always)] + pub fn vsqaddb_u8(self, a: u8, b: i8) -> u8 { + unsafe { vsqaddb_u8(a, b) } + } + #[doc = "See [`arch::vsqaddh_u16`]."] + #[inline(always)] + pub fn vsqaddh_u16(self, a: u16, b: i16) -> u16 { + unsafe { vsqaddh_u16(a, b) } + } + #[doc = "See [`arch::vsqaddd_u64`]."] + #[inline(always)] + pub fn vsqaddd_u64(self, a: u64, b: i64) -> u64 { + unsafe { vsqaddd_u64(a, b) } + } + #[doc = "See [`arch::vsqadds_u32`]."] + #[inline(always)] + pub fn vsqadds_u32(self, a: u32, b: i32) -> u32 { + unsafe { vsqadds_u32(a, b) } + } + #[doc = "See [`arch::vsqrt_f32`]."] + #[inline(always)] + pub fn vsqrt_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vsqrt_f32(a) } + } + #[doc = "See [`arch::vsqrtq_f32`]."] + #[inline(always)] + pub fn vsqrtq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vsqrtq_f32(a) } + } + #[doc = "See [`arch::vsqrt_f64`]."] + #[inline(always)] + pub fn vsqrt_f64(self, a: float64x1_t) -> float64x1_t { + unsafe { vsqrt_f64(a) } + } + #[doc = "See [`arch::vsqrtq_f64`]."] + #[inline(always)] + pub fn vsqrtq_f64(self, a: float64x2_t) -> float64x2_t { + unsafe { vsqrtq_f64(a) } + } + #[doc = "See [`arch::vsri_n_s8`]."] + #[inline(always)] + pub fn vsri_n_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vsri_n_s8::(a, b) } + } + #[doc = "See [`arch::vsriq_n_s8`]."] + #[inline(always)] + pub fn vsriq_n_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vsriq_n_s8::(a, b) } + } + #[doc = "See [`arch::vsri_n_s16`]."] + #[inline(always)] + pub fn vsri_n_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vsri_n_s16::(a, b) } + } + #[doc = "See [`arch::vsriq_n_s16`]."] + #[inline(always)] + pub fn vsriq_n_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vsriq_n_s16::(a, b) } + } + #[doc = "See [`arch::vsri_n_s32`]."] + #[inline(always)] + pub fn vsri_n_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vsri_n_s32::(a, b) } + } + #[doc = "See [`arch::vsriq_n_s32`]."] + #[inline(always)] + pub fn vsriq_n_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vsriq_n_s32::(a, b) } + } + #[doc = "See [`arch::vsri_n_s64`]."] + #[inline(always)] + pub fn vsri_n_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vsri_n_s64::(a, b) } + } + #[doc = "See [`arch::vsriq_n_s64`]."] + #[inline(always)] + pub fn vsriq_n_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vsriq_n_s64::(a, b) } + } + #[doc = "See [`arch::vsri_n_u8`]."] + #[inline(always)] + pub fn vsri_n_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vsri_n_u8::(a, b) } + } + #[doc = "See [`arch::vsriq_n_u8`]."] + #[inline(always)] + pub fn vsriq_n_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vsriq_n_u8::(a, b) } + } + #[doc = "See [`arch::vsri_n_u16`]."] + #[inline(always)] + pub fn vsri_n_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vsri_n_u16::(a, b) } + } + #[doc = "See [`arch::vsriq_n_u16`]."] + #[inline(always)] + pub fn vsriq_n_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vsriq_n_u16::(a, b) } + } + #[doc = "See [`arch::vsri_n_u32`]."] + #[inline(always)] + pub fn vsri_n_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vsri_n_u32::(a, b) } + } + #[doc = "See [`arch::vsriq_n_u32`]."] + #[inline(always)] + pub fn vsriq_n_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vsriq_n_u32::(a, b) } + } + #[doc = "See [`arch::vsri_n_u64`]."] + #[inline(always)] + pub fn vsri_n_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vsri_n_u64::(a, b) } + } + #[doc = "See [`arch::vsriq_n_u64`]."] + #[inline(always)] + pub fn vsriq_n_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vsriq_n_u64::(a, b) } + } + #[doc = "See [`arch::vsri_n_p8`]."] + #[inline(always)] + pub fn vsri_n_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vsri_n_p8::(a, b) } + } + #[doc = "See [`arch::vsriq_n_p8`]."] + #[inline(always)] + pub fn vsriq_n_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vsriq_n_p8::(a, b) } + } + #[doc = "See [`arch::vsri_n_p16`]."] + #[inline(always)] + pub fn vsri_n_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vsri_n_p16::(a, b) } + } + #[doc = "See [`arch::vsriq_n_p16`]."] + #[inline(always)] + pub fn vsriq_n_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vsriq_n_p16::(a, b) } + } + #[doc = "See [`arch::vsri_n_p64`]."] + #[inline(always)] + pub fn vsri_n_p64(self, a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + unsafe { vsri_n_p64::(a, b) } + } + #[doc = "See [`arch::vsriq_n_p64`]."] + #[inline(always)] + pub fn vsriq_n_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vsriq_n_p64::(a, b) } + } + #[doc = "See [`arch::vsrid_n_s64`]."] + #[inline(always)] + pub fn vsrid_n_s64(self, a: i64, b: i64) -> i64 { + unsafe { vsrid_n_s64::(a, b) } + } + #[doc = "See [`arch::vsrid_n_u64`]."] + #[inline(always)] + pub fn vsrid_n_u64(self, a: u64, b: u64) -> u64 { + unsafe { vsrid_n_u64::(a, b) } + } + #[doc = "See [`arch::vst1_f32`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_f32(self, ptr: *mut f32, a: float32x2_t) { + unsafe { vst1_f32(ptr, a) } + } + #[doc = "See [`arch::vst1q_f32`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_f32(self, ptr: *mut f32, a: float32x4_t) { + unsafe { vst1q_f32(ptr, a) } + } + #[doc = "See [`arch::vst1_f64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_f64(self, ptr: *mut f64, a: float64x1_t) { + unsafe { vst1_f64(ptr, a) } + } + #[doc = "See [`arch::vst1q_f64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_f64(self, ptr: *mut f64, a: float64x2_t) { + unsafe { vst1q_f64(ptr, a) } + } + #[doc = "See [`arch::vst1_s8`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_s8(self, ptr: *mut i8, a: int8x8_t) { + unsafe { vst1_s8(ptr, a) } + } + #[doc = "See [`arch::vst1q_s8`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_s8(self, ptr: *mut i8, a: int8x16_t) { + unsafe { vst1q_s8(ptr, a) } + } + #[doc = "See [`arch::vst1_s16`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_s16(self, ptr: *mut i16, a: int16x4_t) { + unsafe { vst1_s16(ptr, a) } + } + #[doc = "See [`arch::vst1q_s16`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_s16(self, ptr: *mut i16, a: int16x8_t) { + unsafe { vst1q_s16(ptr, a) } + } + #[doc = "See [`arch::vst1_s32`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_s32(self, ptr: *mut i32, a: int32x2_t) { + unsafe { vst1_s32(ptr, a) } + } + #[doc = "See [`arch::vst1q_s32`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_s32(self, ptr: *mut i32, a: int32x4_t) { + unsafe { vst1q_s32(ptr, a) } + } + #[doc = "See [`arch::vst1_s64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_s64(self, ptr: *mut i64, a: int64x1_t) { + unsafe { vst1_s64(ptr, a) } + } + #[doc = "See [`arch::vst1q_s64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_s64(self, ptr: *mut i64, a: int64x2_t) { + unsafe { vst1q_s64(ptr, a) } + } + #[doc = "See [`arch::vst1_u8`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_u8(self, ptr: *mut u8, a: uint8x8_t) { + unsafe { vst1_u8(ptr, a) } + } + #[doc = "See [`arch::vst1q_u8`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_u8(self, ptr: *mut u8, a: uint8x16_t) { + unsafe { vst1q_u8(ptr, a) } + } + #[doc = "See [`arch::vst1_u16`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_u16(self, ptr: *mut u16, a: uint16x4_t) { + unsafe { vst1_u16(ptr, a) } + } + #[doc = "See [`arch::vst1q_u16`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_u16(self, ptr: *mut u16, a: uint16x8_t) { + unsafe { vst1q_u16(ptr, a) } + } + #[doc = "See [`arch::vst1_u32`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_u32(self, ptr: *mut u32, a: uint32x2_t) { + unsafe { vst1_u32(ptr, a) } + } + #[doc = "See [`arch::vst1q_u32`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_u32(self, ptr: *mut u32, a: uint32x4_t) { + unsafe { vst1q_u32(ptr, a) } + } + #[doc = "See [`arch::vst1_u64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_u64(self, ptr: *mut u64, a: uint64x1_t) { + unsafe { vst1_u64(ptr, a) } + } + #[doc = "See [`arch::vst1q_u64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_u64(self, ptr: *mut u64, a: uint64x2_t) { + unsafe { vst1q_u64(ptr, a) } + } + #[doc = "See [`arch::vst1_p8`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_p8(self, ptr: *mut p8, a: poly8x8_t) { + unsafe { vst1_p8(ptr, a) } + } + #[doc = "See [`arch::vst1q_p8`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_p8(self, ptr: *mut p8, a: poly8x16_t) { + unsafe { vst1q_p8(ptr, a) } + } + #[doc = "See [`arch::vst1_p16`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_p16(self, ptr: *mut p16, a: poly16x4_t) { + unsafe { vst1_p16(ptr, a) } + } + #[doc = "See [`arch::vst1q_p16`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_p16(self, ptr: *mut p16, a: poly16x8_t) { + unsafe { vst1q_p16(ptr, a) } + } + #[doc = "See [`arch::vst1_p64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1_p64(self, ptr: *mut p64, a: poly64x1_t) { + unsafe { vst1_p64(ptr, a) } + } + #[doc = "See [`arch::vst1q_p64`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn vst1q_p64(self, ptr: *mut p64, a: poly64x2_t) { + unsafe { vst1q_p64(ptr, a) } + } + #[doc = "See [`arch::vst1_f64_x2`]."] + #[inline(always)] + pub unsafe fn vst1_f64_x2(self, a: *mut f64, b: float64x1x2_t) { + unsafe { vst1_f64_x2(a, b) } + } + #[doc = "See [`arch::vst1q_f64_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_f64_x2(self, a: *mut f64, b: float64x2x2_t) { + unsafe { vst1q_f64_x2(a, b) } + } + #[doc = "See [`arch::vst1_f64_x3`]."] + #[inline(always)] + pub unsafe fn vst1_f64_x3(self, a: *mut f64, b: float64x1x3_t) { + unsafe { vst1_f64_x3(a, b) } + } + #[doc = "See [`arch::vst1q_f64_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_f64_x3(self, a: *mut f64, b: float64x2x3_t) { + unsafe { vst1q_f64_x3(a, b) } + } + #[doc = "See [`arch::vst1_f64_x4`]."] + #[inline(always)] + pub unsafe fn vst1_f64_x4(self, a: *mut f64, b: float64x1x4_t) { + unsafe { vst1_f64_x4(a, b) } + } + #[doc = "See [`arch::vst1q_f64_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_f64_x4(self, a: *mut f64, b: float64x2x4_t) { + unsafe { vst1q_f64_x4(a, b) } + } + #[doc = "See [`arch::vst1_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst1_lane_f64(self, a: *mut f64, b: float64x1_t) { + unsafe { vst1_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_f64(self, a: *mut f64, b: float64x2_t) { + unsafe { vst1q_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst2_f64`]."] + #[inline(always)] + pub unsafe fn vst2_f64(self, a: *mut f64, b: float64x1x2_t) { + unsafe { vst2_f64(a, b) } + } + #[doc = "See [`arch::vst2_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst2_lane_f64(self, a: *mut f64, b: float64x1x2_t) { + unsafe { vst2_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst2_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst2_lane_s64(self, a: *mut i64, b: int64x1x2_t) { + unsafe { vst2_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst2_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst2_lane_p64(self, a: *mut p64, b: poly64x1x2_t) { + unsafe { vst2_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst2_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst2_lane_u64(self, a: *mut u64, b: uint64x1x2_t) { + unsafe { vst2_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst2q_f64`]."] + #[inline(always)] + pub unsafe fn vst2q_f64(self, a: *mut f64, b: float64x2x2_t) { + unsafe { vst2q_f64(a, b) } + } + #[doc = "See [`arch::vst2q_s64`]."] + #[inline(always)] + pub unsafe fn vst2q_s64(self, a: *mut i64, b: int64x2x2_t) { + unsafe { vst2q_s64(a, b) } + } + #[doc = "See [`arch::vst2q_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_f64(self, a: *mut f64, b: float64x2x2_t) { + unsafe { vst2q_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_s8(self, a: *mut i8, b: int8x16x2_t) { + unsafe { vst2q_lane_s8::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_s64(self, a: *mut i64, b: int64x2x2_t) { + unsafe { vst2q_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_p64(self, a: *mut p64, b: poly64x2x2_t) { + unsafe { vst2q_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_u8(self, a: *mut u8, b: uint8x16x2_t) { + unsafe { vst2q_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_u64(self, a: *mut u64, b: uint64x2x2_t) { + unsafe { vst2q_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_p8(self, a: *mut p8, b: poly8x16x2_t) { + unsafe { vst2q_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst2q_p64`]."] + #[inline(always)] + pub unsafe fn vst2q_p64(self, a: *mut p64, b: poly64x2x2_t) { + unsafe { vst2q_p64(a, b) } + } + #[doc = "See [`arch::vst2q_u64`]."] + #[inline(always)] + pub unsafe fn vst2q_u64(self, a: *mut u64, b: uint64x2x2_t) { + unsafe { vst2q_u64(a, b) } + } + #[doc = "See [`arch::vst3_f64`]."] + #[inline(always)] + pub unsafe fn vst3_f64(self, a: *mut f64, b: float64x1x3_t) { + unsafe { vst3_f64(a, b) } + } + #[doc = "See [`arch::vst3_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst3_lane_f64(self, a: *mut f64, b: float64x1x3_t) { + unsafe { vst3_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst3_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst3_lane_s64(self, a: *mut i64, b: int64x1x3_t) { + unsafe { vst3_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst3_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst3_lane_p64(self, a: *mut p64, b: poly64x1x3_t) { + unsafe { vst3_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst3_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst3_lane_u64(self, a: *mut u64, b: uint64x1x3_t) { + unsafe { vst3_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst3q_f64`]."] + #[inline(always)] + pub unsafe fn vst3q_f64(self, a: *mut f64, b: float64x2x3_t) { + unsafe { vst3q_f64(a, b) } + } + #[doc = "See [`arch::vst3q_s64`]."] + #[inline(always)] + pub unsafe fn vst3q_s64(self, a: *mut i64, b: int64x2x3_t) { + unsafe { vst3q_s64(a, b) } + } + #[doc = "See [`arch::vst3q_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_f64(self, a: *mut f64, b: float64x2x3_t) { + unsafe { vst3q_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_s8(self, a: *mut i8, b: int8x16x3_t) { + unsafe { vst3q_lane_s8::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_s64(self, a: *mut i64, b: int64x2x3_t) { + unsafe { vst3q_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_p64(self, a: *mut p64, b: poly64x2x3_t) { + unsafe { vst3q_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_u8(self, a: *mut u8, b: uint8x16x3_t) { + unsafe { vst3q_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_u64(self, a: *mut u64, b: uint64x2x3_t) { + unsafe { vst3q_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_p8(self, a: *mut p8, b: poly8x16x3_t) { + unsafe { vst3q_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst3q_p64`]."] + #[inline(always)] + pub unsafe fn vst3q_p64(self, a: *mut p64, b: poly64x2x3_t) { + unsafe { vst3q_p64(a, b) } + } + #[doc = "See [`arch::vst3q_u64`]."] + #[inline(always)] + pub unsafe fn vst3q_u64(self, a: *mut u64, b: uint64x2x3_t) { + unsafe { vst3q_u64(a, b) } + } + #[doc = "See [`arch::vst4_f64`]."] + #[inline(always)] + pub unsafe fn vst4_f64(self, a: *mut f64, b: float64x1x4_t) { + unsafe { vst4_f64(a, b) } + } + #[doc = "See [`arch::vst4_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst4_lane_f64(self, a: *mut f64, b: float64x1x4_t) { + unsafe { vst4_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst4_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst4_lane_s64(self, a: *mut i64, b: int64x1x4_t) { + unsafe { vst4_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst4_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst4_lane_p64(self, a: *mut p64, b: poly64x1x4_t) { + unsafe { vst4_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst4_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst4_lane_u64(self, a: *mut u64, b: uint64x1x4_t) { + unsafe { vst4_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst4q_f64`]."] + #[inline(always)] + pub unsafe fn vst4q_f64(self, a: *mut f64, b: float64x2x4_t) { + unsafe { vst4q_f64(a, b) } + } + #[doc = "See [`arch::vst4q_s64`]."] + #[inline(always)] + pub unsafe fn vst4q_s64(self, a: *mut i64, b: int64x2x4_t) { + unsafe { vst4q_s64(a, b) } + } + #[doc = "See [`arch::vst4q_lane_f64`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_f64(self, a: *mut f64, b: float64x2x4_t) { + unsafe { vst4q_lane_f64::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_s8(self, a: *mut i8, b: int8x16x4_t) { + unsafe { vst4q_lane_s8::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_s64(self, a: *mut i64, b: int64x2x4_t) { + unsafe { vst4q_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_p64(self, a: *mut p64, b: poly64x2x4_t) { + unsafe { vst4q_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_u8(self, a: *mut u8, b: uint8x16x4_t) { + unsafe { vst4q_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_u64(self, a: *mut u64, b: uint64x2x4_t) { + unsafe { vst4q_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_p8(self, a: *mut p8, b: poly8x16x4_t) { + unsafe { vst4q_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst4q_p64`]."] + #[inline(always)] + pub unsafe fn vst4q_p64(self, a: *mut p64, b: poly64x2x4_t) { + unsafe { vst4q_p64(a, b) } + } + #[doc = "See [`arch::vst4q_u64`]."] + #[inline(always)] + pub unsafe fn vst4q_u64(self, a: *mut u64, b: uint64x2x4_t) { + unsafe { vst4q_u64(a, b) } + } + #[doc = "See [`arch::vsub_f64`]."] + #[inline(always)] + pub fn vsub_f64(self, a: float64x1_t, b: float64x1_t) -> float64x1_t { + unsafe { vsub_f64(a, b) } + } + #[doc = "See [`arch::vsubq_f64`]."] + #[inline(always)] + pub fn vsubq_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vsubq_f64(a, b) } + } + #[doc = "See [`arch::vsubd_s64`]."] + #[inline(always)] + pub fn vsubd_s64(self, a: i64, b: i64) -> i64 { + unsafe { vsubd_s64(a, b) } + } + #[doc = "See [`arch::vsubd_u64`]."] + #[inline(always)] + pub fn vsubd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vsubd_u64(a, b) } + } + #[doc = "See [`arch::vsubl_high_s8`]."] + #[inline(always)] + pub fn vsubl_high_s8(self, a: int8x16_t, b: int8x16_t) -> int16x8_t { + unsafe { vsubl_high_s8(a, b) } + } + #[doc = "See [`arch::vsubl_high_s16`]."] + #[inline(always)] + pub fn vsubl_high_s16(self, a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { vsubl_high_s16(a, b) } + } + #[doc = "See [`arch::vsubl_high_s32`]."] + #[inline(always)] + pub fn vsubl_high_s32(self, a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { vsubl_high_s32(a, b) } + } + #[doc = "See [`arch::vsubl_high_u8`]."] + #[inline(always)] + pub fn vsubl_high_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + unsafe { vsubl_high_u8(a, b) } + } + #[doc = "See [`arch::vsubl_high_u16`]."] + #[inline(always)] + pub fn vsubl_high_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vsubl_high_u16(a, b) } + } + #[doc = "See [`arch::vsubl_high_u32`]."] + #[inline(always)] + pub fn vsubl_high_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vsubl_high_u32(a, b) } + } + #[doc = "See [`arch::vsubw_high_s8`]."] + #[inline(always)] + pub fn vsubw_high_s8(self, a: int16x8_t, b: int8x16_t) -> int16x8_t { + unsafe { vsubw_high_s8(a, b) } + } + #[doc = "See [`arch::vsubw_high_s16`]."] + #[inline(always)] + pub fn vsubw_high_s16(self, a: int32x4_t, b: int16x8_t) -> int32x4_t { + unsafe { vsubw_high_s16(a, b) } + } + #[doc = "See [`arch::vsubw_high_s32`]."] + #[inline(always)] + pub fn vsubw_high_s32(self, a: int64x2_t, b: int32x4_t) -> int64x2_t { + unsafe { vsubw_high_s32(a, b) } + } + #[doc = "See [`arch::vsubw_high_u8`]."] + #[inline(always)] + pub fn vsubw_high_u8(self, a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + unsafe { vsubw_high_u8(a, b) } + } + #[doc = "See [`arch::vsubw_high_u16`]."] + #[inline(always)] + pub fn vsubw_high_u16(self, a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vsubw_high_u16(a, b) } + } + #[doc = "See [`arch::vsubw_high_u32`]."] + #[inline(always)] + pub fn vsubw_high_u32(self, a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vsubw_high_u32(a, b) } + } + #[doc = "See [`arch::vtbl1_s8`]."] + #[inline(always)] + pub fn vtbl1_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vtbl1_s8(a, b) } + } + #[doc = "See [`arch::vtbl1_u8`]."] + #[inline(always)] + pub fn vtbl1_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vtbl1_u8(a, b) } + } + #[doc = "See [`arch::vtbl1_p8`]."] + #[inline(always)] + pub fn vtbl1_p8(self, a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vtbl1_p8(a, b) } + } + #[doc = "See [`arch::vtbl2_s8`]."] + #[inline(always)] + pub fn vtbl2_s8(self, a: int8x8x2_t, b: int8x8_t) -> int8x8_t { + unsafe { vtbl2_s8(a, b) } + } + #[doc = "See [`arch::vtbl2_u8`]."] + #[inline(always)] + pub fn vtbl2_u8(self, a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vtbl2_u8(a, b) } + } + #[doc = "See [`arch::vtbl2_p8`]."] + #[inline(always)] + pub fn vtbl2_p8(self, a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vtbl2_p8(a, b) } + } + #[doc = "See [`arch::vtbl3_s8`]."] + #[inline(always)] + pub fn vtbl3_s8(self, a: int8x8x3_t, b: int8x8_t) -> int8x8_t { + unsafe { vtbl3_s8(a, b) } + } + #[doc = "See [`arch::vtbl3_u8`]."] + #[inline(always)] + pub fn vtbl3_u8(self, a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vtbl3_u8(a, b) } + } + #[doc = "See [`arch::vtbl3_p8`]."] + #[inline(always)] + pub fn vtbl3_p8(self, a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vtbl3_p8(a, b) } + } + #[doc = "See [`arch::vtbl4_s8`]."] + #[inline(always)] + pub fn vtbl4_s8(self, a: int8x8x4_t, b: int8x8_t) -> int8x8_t { + unsafe { vtbl4_s8(a, b) } + } + #[doc = "See [`arch::vtbl4_u8`]."] + #[inline(always)] + pub fn vtbl4_u8(self, a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vtbl4_u8(a, b) } + } + #[doc = "See [`arch::vtbl4_p8`]."] + #[inline(always)] + pub fn vtbl4_p8(self, a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { + unsafe { vtbl4_p8(a, b) } + } + #[doc = "See [`arch::vtbx1_s8`]."] + #[inline(always)] + pub fn vtbx1_s8(self, a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { vtbx1_s8(a, b, c) } + } + #[doc = "See [`arch::vtbx1_u8`]."] + #[inline(always)] + pub fn vtbx1_u8(self, a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vtbx1_u8(a, b, c) } + } + #[doc = "See [`arch::vtbx1_p8`]."] + #[inline(always)] + pub fn vtbx1_p8(self, a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vtbx1_p8(a, b, c) } + } + #[doc = "See [`arch::vtbx2_s8`]."] + #[inline(always)] + pub fn vtbx2_s8(self, a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { + unsafe { vtbx2_s8(a, b, c) } + } + #[doc = "See [`arch::vtbx2_u8`]."] + #[inline(always)] + pub fn vtbx2_u8(self, a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vtbx2_u8(a, b, c) } + } + #[doc = "See [`arch::vtbx2_p8`]."] + #[inline(always)] + pub fn vtbx2_p8(self, a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vtbx2_p8(a, b, c) } + } + #[doc = "See [`arch::vtbx3_s8`]."] + #[inline(always)] + pub fn vtbx3_s8(self, a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { + unsafe { vtbx3_s8(a, b, c) } + } + #[doc = "See [`arch::vtbx3_u8`]."] + #[inline(always)] + pub fn vtbx3_u8(self, a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vtbx3_u8(a, b, c) } + } + #[doc = "See [`arch::vtbx3_p8`]."] + #[inline(always)] + pub fn vtbx3_p8(self, a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vtbx3_p8(a, b, c) } + } + #[doc = "See [`arch::vtbx4_s8`]."] + #[inline(always)] + pub fn vtbx4_s8(self, a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { + unsafe { vtbx4_s8(a, b, c) } + } + #[doc = "See [`arch::vtbx4_u8`]."] + #[inline(always)] + pub fn vtbx4_u8(self, a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vtbx4_u8(a, b, c) } + } + #[doc = "See [`arch::vtbx4_p8`]."] + #[inline(always)] + pub fn vtbx4_p8(self, a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { + unsafe { vtbx4_p8(a, b, c) } + } + #[doc = "See [`arch::vtrn1_f32`]."] + #[inline(always)] + pub fn vtrn1_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vtrn1_f32(a, b) } + } + #[doc = "See [`arch::vtrn1q_f64`]."] + #[inline(always)] + pub fn vtrn1q_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vtrn1q_f64(a, b) } + } + #[doc = "See [`arch::vtrn1_s32`]."] + #[inline(always)] + pub fn vtrn1_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vtrn1_s32(a, b) } + } + #[doc = "See [`arch::vtrn1q_s64`]."] + #[inline(always)] + pub fn vtrn1q_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vtrn1q_s64(a, b) } + } + #[doc = "See [`arch::vtrn1_u32`]."] + #[inline(always)] + pub fn vtrn1_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vtrn1_u32(a, b) } + } + #[doc = "See [`arch::vtrn1q_u64`]."] + #[inline(always)] + pub fn vtrn1q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vtrn1q_u64(a, b) } + } + #[doc = "See [`arch::vtrn1q_p64`]."] + #[inline(always)] + pub fn vtrn1q_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vtrn1q_p64(a, b) } + } + #[doc = "See [`arch::vtrn1q_f32`]."] + #[inline(always)] + pub fn vtrn1q_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vtrn1q_f32(a, b) } + } + #[doc = "See [`arch::vtrn1_s8`]."] + #[inline(always)] + pub fn vtrn1_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vtrn1_s8(a, b) } + } + #[doc = "See [`arch::vtrn1q_s8`]."] + #[inline(always)] + pub fn vtrn1q_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vtrn1q_s8(a, b) } + } + #[doc = "See [`arch::vtrn1_s16`]."] + #[inline(always)] + pub fn vtrn1_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vtrn1_s16(a, b) } + } + #[doc = "See [`arch::vtrn1q_s16`]."] + #[inline(always)] + pub fn vtrn1q_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vtrn1q_s16(a, b) } + } + #[doc = "See [`arch::vtrn1q_s32`]."] + #[inline(always)] + pub fn vtrn1q_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vtrn1q_s32(a, b) } + } + #[doc = "See [`arch::vtrn1_u8`]."] + #[inline(always)] + pub fn vtrn1_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vtrn1_u8(a, b) } + } + #[doc = "See [`arch::vtrn1q_u8`]."] + #[inline(always)] + pub fn vtrn1q_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vtrn1q_u8(a, b) } + } + #[doc = "See [`arch::vtrn1_u16`]."] + #[inline(always)] + pub fn vtrn1_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vtrn1_u16(a, b) } + } + #[doc = "See [`arch::vtrn1q_u16`]."] + #[inline(always)] + pub fn vtrn1q_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vtrn1q_u16(a, b) } + } + #[doc = "See [`arch::vtrn1q_u32`]."] + #[inline(always)] + pub fn vtrn1q_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vtrn1q_u32(a, b) } + } + #[doc = "See [`arch::vtrn1_p8`]."] + #[inline(always)] + pub fn vtrn1_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vtrn1_p8(a, b) } + } + #[doc = "See [`arch::vtrn1q_p8`]."] + #[inline(always)] + pub fn vtrn1q_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vtrn1q_p8(a, b) } + } + #[doc = "See [`arch::vtrn1_p16`]."] + #[inline(always)] + pub fn vtrn1_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vtrn1_p16(a, b) } + } + #[doc = "See [`arch::vtrn1q_p16`]."] + #[inline(always)] + pub fn vtrn1q_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vtrn1q_p16(a, b) } + } + #[doc = "See [`arch::vtrn2_f32`]."] + #[inline(always)] + pub fn vtrn2_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vtrn2_f32(a, b) } + } + #[doc = "See [`arch::vtrn2q_f64`]."] + #[inline(always)] + pub fn vtrn2q_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vtrn2q_f64(a, b) } + } + #[doc = "See [`arch::vtrn2_s32`]."] + #[inline(always)] + pub fn vtrn2_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vtrn2_s32(a, b) } + } + #[doc = "See [`arch::vtrn2q_s64`]."] + #[inline(always)] + pub fn vtrn2q_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vtrn2q_s64(a, b) } + } + #[doc = "See [`arch::vtrn2_u32`]."] + #[inline(always)] + pub fn vtrn2_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vtrn2_u32(a, b) } + } + #[doc = "See [`arch::vtrn2q_u64`]."] + #[inline(always)] + pub fn vtrn2q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vtrn2q_u64(a, b) } + } + #[doc = "See [`arch::vtrn2q_p64`]."] + #[inline(always)] + pub fn vtrn2q_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vtrn2q_p64(a, b) } + } + #[doc = "See [`arch::vtrn2q_f32`]."] + #[inline(always)] + pub fn vtrn2q_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vtrn2q_f32(a, b) } + } + #[doc = "See [`arch::vtrn2_s8`]."] + #[inline(always)] + pub fn vtrn2_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vtrn2_s8(a, b) } + } + #[doc = "See [`arch::vtrn2q_s8`]."] + #[inline(always)] + pub fn vtrn2q_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vtrn2q_s8(a, b) } + } + #[doc = "See [`arch::vtrn2_s16`]."] + #[inline(always)] + pub fn vtrn2_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vtrn2_s16(a, b) } + } + #[doc = "See [`arch::vtrn2q_s16`]."] + #[inline(always)] + pub fn vtrn2q_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vtrn2q_s16(a, b) } + } + #[doc = "See [`arch::vtrn2q_s32`]."] + #[inline(always)] + pub fn vtrn2q_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vtrn2q_s32(a, b) } + } + #[doc = "See [`arch::vtrn2_u8`]."] + #[inline(always)] + pub fn vtrn2_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vtrn2_u8(a, b) } + } + #[doc = "See [`arch::vtrn2q_u8`]."] + #[inline(always)] + pub fn vtrn2q_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vtrn2q_u8(a, b) } + } + #[doc = "See [`arch::vtrn2_u16`]."] + #[inline(always)] + pub fn vtrn2_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vtrn2_u16(a, b) } + } + #[doc = "See [`arch::vtrn2q_u16`]."] + #[inline(always)] + pub fn vtrn2q_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vtrn2q_u16(a, b) } + } + #[doc = "See [`arch::vtrn2q_u32`]."] + #[inline(always)] + pub fn vtrn2q_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vtrn2q_u32(a, b) } + } + #[doc = "See [`arch::vtrn2_p8`]."] + #[inline(always)] + pub fn vtrn2_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vtrn2_p8(a, b) } + } + #[doc = "See [`arch::vtrn2q_p8`]."] + #[inline(always)] + pub fn vtrn2q_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vtrn2q_p8(a, b) } + } + #[doc = "See [`arch::vtrn2_p16`]."] + #[inline(always)] + pub fn vtrn2_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vtrn2_p16(a, b) } + } + #[doc = "See [`arch::vtrn2q_p16`]."] + #[inline(always)] + pub fn vtrn2q_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vtrn2q_p16(a, b) } + } + #[doc = "See [`arch::vtst_s64`]."] + #[inline(always)] + pub fn vtst_s64(self, a: int64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vtst_s64(a, b) } + } + #[doc = "See [`arch::vtstq_s64`]."] + #[inline(always)] + pub fn vtstq_s64(self, a: int64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vtstq_s64(a, b) } + } + #[doc = "See [`arch::vtst_p64`]."] + #[inline(always)] + pub fn vtst_p64(self, a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { + unsafe { vtst_p64(a, b) } + } + #[doc = "See [`arch::vtstq_p64`]."] + #[inline(always)] + pub fn vtstq_p64(self, a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { + unsafe { vtstq_p64(a, b) } + } + #[doc = "See [`arch::vtst_u64`]."] + #[inline(always)] + pub fn vtst_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vtst_u64(a, b) } + } + #[doc = "See [`arch::vtstq_u64`]."] + #[inline(always)] + pub fn vtstq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vtstq_u64(a, b) } + } + #[doc = "See [`arch::vtstd_s64`]."] + #[inline(always)] + pub fn vtstd_s64(self, a: i64, b: i64) -> u64 { + unsafe { vtstd_s64(a, b) } + } + #[doc = "See [`arch::vtstd_u64`]."] + #[inline(always)] + pub fn vtstd_u64(self, a: u64, b: u64) -> u64 { + unsafe { vtstd_u64(a, b) } + } + #[doc = "See [`arch::vuqadd_s8`]."] + #[inline(always)] + pub fn vuqadd_s8(self, a: int8x8_t, b: uint8x8_t) -> int8x8_t { + unsafe { vuqadd_s8(a, b) } + } + #[doc = "See [`arch::vuqaddq_s8`]."] + #[inline(always)] + pub fn vuqaddq_s8(self, a: int8x16_t, b: uint8x16_t) -> int8x16_t { + unsafe { vuqaddq_s8(a, b) } + } + #[doc = "See [`arch::vuqadd_s16`]."] + #[inline(always)] + pub fn vuqadd_s16(self, a: int16x4_t, b: uint16x4_t) -> int16x4_t { + unsafe { vuqadd_s16(a, b) } + } + #[doc = "See [`arch::vuqaddq_s16`]."] + #[inline(always)] + pub fn vuqaddq_s16(self, a: int16x8_t, b: uint16x8_t) -> int16x8_t { + unsafe { vuqaddq_s16(a, b) } + } + #[doc = "See [`arch::vuqadd_s32`]."] + #[inline(always)] + pub fn vuqadd_s32(self, a: int32x2_t, b: uint32x2_t) -> int32x2_t { + unsafe { vuqadd_s32(a, b) } + } + #[doc = "See [`arch::vuqaddq_s32`]."] + #[inline(always)] + pub fn vuqaddq_s32(self, a: int32x4_t, b: uint32x4_t) -> int32x4_t { + unsafe { vuqaddq_s32(a, b) } + } + #[doc = "See [`arch::vuqadd_s64`]."] + #[inline(always)] + pub fn vuqadd_s64(self, a: int64x1_t, b: uint64x1_t) -> int64x1_t { + unsafe { vuqadd_s64(a, b) } + } + #[doc = "See [`arch::vuqaddq_s64`]."] + #[inline(always)] + pub fn vuqaddq_s64(self, a: int64x2_t, b: uint64x2_t) -> int64x2_t { + unsafe { vuqaddq_s64(a, b) } + } + #[doc = "See [`arch::vuqaddb_s8`]."] + #[inline(always)] + pub fn vuqaddb_s8(self, a: i8, b: u8) -> i8 { + unsafe { vuqaddb_s8(a, b) } + } + #[doc = "See [`arch::vuqaddh_s16`]."] + #[inline(always)] + pub fn vuqaddh_s16(self, a: i16, b: u16) -> i16 { + unsafe { vuqaddh_s16(a, b) } + } + #[doc = "See [`arch::vuqaddd_s64`]."] + #[inline(always)] + pub fn vuqaddd_s64(self, a: i64, b: u64) -> i64 { + unsafe { vuqaddd_s64(a, b) } + } + #[doc = "See [`arch::vuqadds_s32`]."] + #[inline(always)] + pub fn vuqadds_s32(self, a: i32, b: u32) -> i32 { + unsafe { vuqadds_s32(a, b) } + } + #[doc = "See [`arch::vuzp1_f32`]."] + #[inline(always)] + pub fn vuzp1_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vuzp1_f32(a, b) } + } + #[doc = "See [`arch::vuzp1q_f64`]."] + #[inline(always)] + pub fn vuzp1q_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vuzp1q_f64(a, b) } + } + #[doc = "See [`arch::vuzp1_s32`]."] + #[inline(always)] + pub fn vuzp1_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vuzp1_s32(a, b) } + } + #[doc = "See [`arch::vuzp1q_s64`]."] + #[inline(always)] + pub fn vuzp1q_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vuzp1q_s64(a, b) } + } + #[doc = "See [`arch::vuzp1_u32`]."] + #[inline(always)] + pub fn vuzp1_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vuzp1_u32(a, b) } + } + #[doc = "See [`arch::vuzp1q_u64`]."] + #[inline(always)] + pub fn vuzp1q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vuzp1q_u64(a, b) } + } + #[doc = "See [`arch::vuzp1q_p64`]."] + #[inline(always)] + pub fn vuzp1q_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vuzp1q_p64(a, b) } + } + #[doc = "See [`arch::vuzp1q_f32`]."] + #[inline(always)] + pub fn vuzp1q_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vuzp1q_f32(a, b) } + } + #[doc = "See [`arch::vuzp1_s8`]."] + #[inline(always)] + pub fn vuzp1_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vuzp1_s8(a, b) } + } + #[doc = "See [`arch::vuzp1q_s8`]."] + #[inline(always)] + pub fn vuzp1q_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vuzp1q_s8(a, b) } + } + #[doc = "See [`arch::vuzp1_s16`]."] + #[inline(always)] + pub fn vuzp1_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vuzp1_s16(a, b) } + } + #[doc = "See [`arch::vuzp1q_s16`]."] + #[inline(always)] + pub fn vuzp1q_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vuzp1q_s16(a, b) } + } + #[doc = "See [`arch::vuzp1q_s32`]."] + #[inline(always)] + pub fn vuzp1q_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vuzp1q_s32(a, b) } + } + #[doc = "See [`arch::vuzp1_u8`]."] + #[inline(always)] + pub fn vuzp1_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vuzp1_u8(a, b) } + } + #[doc = "See [`arch::vuzp1q_u8`]."] + #[inline(always)] + pub fn vuzp1q_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vuzp1q_u8(a, b) } + } + #[doc = "See [`arch::vuzp1_u16`]."] + #[inline(always)] + pub fn vuzp1_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vuzp1_u16(a, b) } + } + #[doc = "See [`arch::vuzp1q_u16`]."] + #[inline(always)] + pub fn vuzp1q_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vuzp1q_u16(a, b) } + } + #[doc = "See [`arch::vuzp1q_u32`]."] + #[inline(always)] + pub fn vuzp1q_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vuzp1q_u32(a, b) } + } + #[doc = "See [`arch::vuzp1_p8`]."] + #[inline(always)] + pub fn vuzp1_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vuzp1_p8(a, b) } + } + #[doc = "See [`arch::vuzp1q_p8`]."] + #[inline(always)] + pub fn vuzp1q_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vuzp1q_p8(a, b) } + } + #[doc = "See [`arch::vuzp1_p16`]."] + #[inline(always)] + pub fn vuzp1_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vuzp1_p16(a, b) } + } + #[doc = "See [`arch::vuzp1q_p16`]."] + #[inline(always)] + pub fn vuzp1q_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vuzp1q_p16(a, b) } + } + #[doc = "See [`arch::vuzp2_f32`]."] + #[inline(always)] + pub fn vuzp2_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vuzp2_f32(a, b) } + } + #[doc = "See [`arch::vuzp2q_f64`]."] + #[inline(always)] + pub fn vuzp2q_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vuzp2q_f64(a, b) } + } + #[doc = "See [`arch::vuzp2_s32`]."] + #[inline(always)] + pub fn vuzp2_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vuzp2_s32(a, b) } + } + #[doc = "See [`arch::vuzp2q_s64`]."] + #[inline(always)] + pub fn vuzp2q_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vuzp2q_s64(a, b) } + } + #[doc = "See [`arch::vuzp2_u32`]."] + #[inline(always)] + pub fn vuzp2_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vuzp2_u32(a, b) } + } + #[doc = "See [`arch::vuzp2q_u64`]."] + #[inline(always)] + pub fn vuzp2q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vuzp2q_u64(a, b) } + } + #[doc = "See [`arch::vuzp2q_p64`]."] + #[inline(always)] + pub fn vuzp2q_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vuzp2q_p64(a, b) } + } + #[doc = "See [`arch::vuzp2q_f32`]."] + #[inline(always)] + pub fn vuzp2q_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vuzp2q_f32(a, b) } + } + #[doc = "See [`arch::vuzp2_s8`]."] + #[inline(always)] + pub fn vuzp2_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vuzp2_s8(a, b) } + } + #[doc = "See [`arch::vuzp2q_s8`]."] + #[inline(always)] + pub fn vuzp2q_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vuzp2q_s8(a, b) } + } + #[doc = "See [`arch::vuzp2_s16`]."] + #[inline(always)] + pub fn vuzp2_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vuzp2_s16(a, b) } + } + #[doc = "See [`arch::vuzp2q_s16`]."] + #[inline(always)] + pub fn vuzp2q_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vuzp2q_s16(a, b) } + } + #[doc = "See [`arch::vuzp2q_s32`]."] + #[inline(always)] + pub fn vuzp2q_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vuzp2q_s32(a, b) } + } + #[doc = "See [`arch::vuzp2_u8`]."] + #[inline(always)] + pub fn vuzp2_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vuzp2_u8(a, b) } + } + #[doc = "See [`arch::vuzp2q_u8`]."] + #[inline(always)] + pub fn vuzp2q_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vuzp2q_u8(a, b) } + } + #[doc = "See [`arch::vuzp2_u16`]."] + #[inline(always)] + pub fn vuzp2_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vuzp2_u16(a, b) } + } + #[doc = "See [`arch::vuzp2q_u16`]."] + #[inline(always)] + pub fn vuzp2q_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vuzp2q_u16(a, b) } + } + #[doc = "See [`arch::vuzp2q_u32`]."] + #[inline(always)] + pub fn vuzp2q_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vuzp2q_u32(a, b) } + } + #[doc = "See [`arch::vuzp2_p8`]."] + #[inline(always)] + pub fn vuzp2_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vuzp2_p8(a, b) } + } + #[doc = "See [`arch::vuzp2q_p8`]."] + #[inline(always)] + pub fn vuzp2q_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vuzp2q_p8(a, b) } + } + #[doc = "See [`arch::vuzp2_p16`]."] + #[inline(always)] + pub fn vuzp2_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vuzp2_p16(a, b) } + } + #[doc = "See [`arch::vuzp2q_p16`]."] + #[inline(always)] + pub fn vuzp2q_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vuzp2q_p16(a, b) } + } + #[doc = "See [`arch::vxarq_u64`]."] + #[inline(always)] + pub fn vxarq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vxarq_u64::(a, b) } + } + #[doc = "See [`arch::vzip1_f32`]."] + #[inline(always)] + pub fn vzip1_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vzip1_f32(a, b) } + } + #[doc = "See [`arch::vzip1q_f32`]."] + #[inline(always)] + pub fn vzip1q_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vzip1q_f32(a, b) } + } + #[doc = "See [`arch::vzip1q_f64`]."] + #[inline(always)] + pub fn vzip1q_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vzip1q_f64(a, b) } + } + #[doc = "See [`arch::vzip1_s8`]."] + #[inline(always)] + pub fn vzip1_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vzip1_s8(a, b) } + } + #[doc = "See [`arch::vzip1q_s8`]."] + #[inline(always)] + pub fn vzip1q_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vzip1q_s8(a, b) } + } + #[doc = "See [`arch::vzip1_s16`]."] + #[inline(always)] + pub fn vzip1_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vzip1_s16(a, b) } + } + #[doc = "See [`arch::vzip1q_s16`]."] + #[inline(always)] + pub fn vzip1q_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vzip1q_s16(a, b) } + } + #[doc = "See [`arch::vzip1_s32`]."] + #[inline(always)] + pub fn vzip1_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vzip1_s32(a, b) } + } + #[doc = "See [`arch::vzip1q_s32`]."] + #[inline(always)] + pub fn vzip1q_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vzip1q_s32(a, b) } + } + #[doc = "See [`arch::vzip1q_s64`]."] + #[inline(always)] + pub fn vzip1q_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vzip1q_s64(a, b) } + } + #[doc = "See [`arch::vzip1_u8`]."] + #[inline(always)] + pub fn vzip1_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vzip1_u8(a, b) } + } + #[doc = "See [`arch::vzip1q_u8`]."] + #[inline(always)] + pub fn vzip1q_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vzip1q_u8(a, b) } + } + #[doc = "See [`arch::vzip1_u16`]."] + #[inline(always)] + pub fn vzip1_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vzip1_u16(a, b) } + } + #[doc = "See [`arch::vzip1q_u16`]."] + #[inline(always)] + pub fn vzip1q_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vzip1q_u16(a, b) } + } + #[doc = "See [`arch::vzip1_u32`]."] + #[inline(always)] + pub fn vzip1_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vzip1_u32(a, b) } + } + #[doc = "See [`arch::vzip1q_u32`]."] + #[inline(always)] + pub fn vzip1q_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vzip1q_u32(a, b) } + } + #[doc = "See [`arch::vzip1q_u64`]."] + #[inline(always)] + pub fn vzip1q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vzip1q_u64(a, b) } + } + #[doc = "See [`arch::vzip1_p8`]."] + #[inline(always)] + pub fn vzip1_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vzip1_p8(a, b) } + } + #[doc = "See [`arch::vzip1q_p8`]."] + #[inline(always)] + pub fn vzip1q_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vzip1q_p8(a, b) } + } + #[doc = "See [`arch::vzip1_p16`]."] + #[inline(always)] + pub fn vzip1_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vzip1_p16(a, b) } + } + #[doc = "See [`arch::vzip1q_p16`]."] + #[inline(always)] + pub fn vzip1q_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vzip1q_p16(a, b) } + } + #[doc = "See [`arch::vzip1q_p64`]."] + #[inline(always)] + pub fn vzip1q_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vzip1q_p64(a, b) } + } + #[doc = "See [`arch::vzip2_f32`]."] + #[inline(always)] + pub fn vzip2_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vzip2_f32(a, b) } + } + #[doc = "See [`arch::vzip2q_f32`]."] + #[inline(always)] + pub fn vzip2q_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vzip2q_f32(a, b) } + } + #[doc = "See [`arch::vzip2q_f64`]."] + #[inline(always)] + pub fn vzip2q_f64(self, a: float64x2_t, b: float64x2_t) -> float64x2_t { + unsafe { vzip2q_f64(a, b) } + } + #[doc = "See [`arch::vzip2_s8`]."] + #[inline(always)] + pub fn vzip2_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vzip2_s8(a, b) } + } + #[doc = "See [`arch::vzip2q_s8`]."] + #[inline(always)] + pub fn vzip2q_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vzip2q_s8(a, b) } + } + #[doc = "See [`arch::vzip2_s16`]."] + #[inline(always)] + pub fn vzip2_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vzip2_s16(a, b) } + } + #[doc = "See [`arch::vzip2q_s16`]."] + #[inline(always)] + pub fn vzip2q_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vzip2q_s16(a, b) } + } + #[doc = "See [`arch::vzip2_s32`]."] + #[inline(always)] + pub fn vzip2_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vzip2_s32(a, b) } + } + #[doc = "See [`arch::vzip2q_s32`]."] + #[inline(always)] + pub fn vzip2q_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vzip2q_s32(a, b) } + } + #[doc = "See [`arch::vzip2q_s64`]."] + #[inline(always)] + pub fn vzip2q_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vzip2q_s64(a, b) } + } + #[doc = "See [`arch::vzip2_u8`]."] + #[inline(always)] + pub fn vzip2_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vzip2_u8(a, b) } + } + #[doc = "See [`arch::vzip2q_u8`]."] + #[inline(always)] + pub fn vzip2q_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vzip2q_u8(a, b) } + } + #[doc = "See [`arch::vzip2_u16`]."] + #[inline(always)] + pub fn vzip2_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vzip2_u16(a, b) } + } + #[doc = "See [`arch::vzip2q_u16`]."] + #[inline(always)] + pub fn vzip2q_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vzip2q_u16(a, b) } + } + #[doc = "See [`arch::vzip2_u32`]."] + #[inline(always)] + pub fn vzip2_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vzip2_u32(a, b) } + } + #[doc = "See [`arch::vzip2q_u32`]."] + #[inline(always)] + pub fn vzip2q_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vzip2q_u32(a, b) } + } + #[doc = "See [`arch::vzip2q_u64`]."] + #[inline(always)] + pub fn vzip2q_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vzip2q_u64(a, b) } + } + #[doc = "See [`arch::vzip2_p8`]."] + #[inline(always)] + pub fn vzip2_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vzip2_p8(a, b) } + } + #[doc = "See [`arch::vzip2q_p8`]."] + #[inline(always)] + pub fn vzip2q_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vzip2q_p8(a, b) } + } + #[doc = "See [`arch::vzip2_p16`]."] + #[inline(always)] + pub fn vzip2_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vzip2_p16(a, b) } + } + #[doc = "See [`arch::vzip2q_p16`]."] + #[inline(always)] + pub fn vzip2q_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vzip2q_p16(a, b) } + } + #[doc = "See [`arch::vzip2q_p64`]."] + #[inline(always)] + pub fn vzip2q_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vzip2q_p64(a, b) } + } + #[doc = "See [`arch::vaba_s16`]."] + #[inline(always)] + pub fn vaba_s16(self, a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { vaba_s16(a, b, c) } + } + #[doc = "See [`arch::vaba_s32`]."] + #[inline(always)] + pub fn vaba_s32(self, a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { vaba_s32(a, b, c) } + } + #[doc = "See [`arch::vaba_s8`]."] + #[inline(always)] + pub fn vaba_s8(self, a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { vaba_s8(a, b, c) } + } + #[doc = "See [`arch::vaba_u16`]."] + #[inline(always)] + pub fn vaba_u16(self, a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { vaba_u16(a, b, c) } + } + #[doc = "See [`arch::vaba_u32`]."] + #[inline(always)] + pub fn vaba_u32(self, a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { vaba_u32(a, b, c) } + } + #[doc = "See [`arch::vaba_u8`]."] + #[inline(always)] + pub fn vaba_u8(self, a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vaba_u8(a, b, c) } + } + #[doc = "See [`arch::vabal_s8`]."] + #[inline(always)] + pub fn vabal_s8(self, a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + unsafe { vabal_s8(a, b, c) } + } + #[doc = "See [`arch::vabal_s16`]."] + #[inline(always)] + pub fn vabal_s16(self, a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { vabal_s16(a, b, c) } + } + #[doc = "See [`arch::vabal_s32`]."] + #[inline(always)] + pub fn vabal_s32(self, a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { vabal_s32(a, b, c) } + } + #[doc = "See [`arch::vabal_u8`]."] + #[inline(always)] + pub fn vabal_u8(self, a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + unsafe { vabal_u8(a, b, c) } + } + #[doc = "See [`arch::vabal_u16`]."] + #[inline(always)] + pub fn vabal_u16(self, a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + unsafe { vabal_u16(a, b, c) } + } + #[doc = "See [`arch::vabal_u32`]."] + #[inline(always)] + pub fn vabal_u32(self, a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + unsafe { vabal_u32(a, b, c) } + } + #[doc = "See [`arch::vabaq_s16`]."] + #[inline(always)] + pub fn vabaq_s16(self, a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { vabaq_s16(a, b, c) } + } + #[doc = "See [`arch::vabaq_s32`]."] + #[inline(always)] + pub fn vabaq_s32(self, a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { vabaq_s32(a, b, c) } + } + #[doc = "See [`arch::vabaq_s8`]."] + #[inline(always)] + pub fn vabaq_s8(self, a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { vabaq_s8(a, b, c) } + } + #[doc = "See [`arch::vabaq_u16`]."] + #[inline(always)] + pub fn vabaq_u16(self, a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { vabaq_u16(a, b, c) } + } + #[doc = "See [`arch::vabaq_u32`]."] + #[inline(always)] + pub fn vabaq_u32(self, a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { vabaq_u32(a, b, c) } + } + #[doc = "See [`arch::vabaq_u8`]."] + #[inline(always)] + pub fn vabaq_u8(self, a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vabaq_u8(a, b, c) } + } + #[doc = "See [`arch::vabd_f32`]."] + #[inline(always)] + pub fn vabd_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vabd_f32(a, b) } + } + #[doc = "See [`arch::vabdq_f32`]."] + #[inline(always)] + pub fn vabdq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vabdq_f32(a, b) } + } + #[doc = "See [`arch::vabd_s8`]."] + #[inline(always)] + pub fn vabd_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vabd_s8(a, b) } + } + #[doc = "See [`arch::vabdq_s8`]."] + #[inline(always)] + pub fn vabdq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vabdq_s8(a, b) } + } + #[doc = "See [`arch::vabd_s16`]."] + #[inline(always)] + pub fn vabd_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vabd_s16(a, b) } + } + #[doc = "See [`arch::vabdq_s16`]."] + #[inline(always)] + pub fn vabdq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vabdq_s16(a, b) } + } + #[doc = "See [`arch::vabd_s32`]."] + #[inline(always)] + pub fn vabd_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vabd_s32(a, b) } + } + #[doc = "See [`arch::vabdq_s32`]."] + #[inline(always)] + pub fn vabdq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vabdq_s32(a, b) } + } + #[doc = "See [`arch::vabd_u8`]."] + #[inline(always)] + pub fn vabd_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vabd_u8(a, b) } + } + #[doc = "See [`arch::vabdq_u8`]."] + #[inline(always)] + pub fn vabdq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vabdq_u8(a, b) } + } + #[doc = "See [`arch::vabd_u16`]."] + #[inline(always)] + pub fn vabd_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vabd_u16(a, b) } + } + #[doc = "See [`arch::vabdq_u16`]."] + #[inline(always)] + pub fn vabdq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vabdq_u16(a, b) } + } + #[doc = "See [`arch::vabd_u32`]."] + #[inline(always)] + pub fn vabd_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vabd_u32(a, b) } + } + #[doc = "See [`arch::vabdq_u32`]."] + #[inline(always)] + pub fn vabdq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vabdq_u32(a, b) } + } + #[doc = "See [`arch::vabdl_s8`]."] + #[inline(always)] + pub fn vabdl_s8(self, a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe { vabdl_s8(a, b) } + } + #[doc = "See [`arch::vabdl_s16`]."] + #[inline(always)] + pub fn vabdl_s16(self, a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vabdl_s16(a, b) } + } + #[doc = "See [`arch::vabdl_s32`]."] + #[inline(always)] + pub fn vabdl_s32(self, a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vabdl_s32(a, b) } + } + #[doc = "See [`arch::vabdl_u8`]."] + #[inline(always)] + pub fn vabdl_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { vabdl_u8(a, b) } + } + #[doc = "See [`arch::vabdl_u16`]."] + #[inline(always)] + pub fn vabdl_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vabdl_u16(a, b) } + } + #[doc = "See [`arch::vabdl_u32`]."] + #[inline(always)] + pub fn vabdl_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vabdl_u32(a, b) } + } + #[doc = "See [`arch::vabs_f32`]."] + #[inline(always)] + pub fn vabs_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vabs_f32(a) } + } + #[doc = "See [`arch::vabsq_f32`]."] + #[inline(always)] + pub fn vabsq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vabsq_f32(a) } + } + #[doc = "See [`arch::vabs_s8`]."] + #[inline(always)] + pub fn vabs_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vabs_s8(a) } + } + #[doc = "See [`arch::vabsq_s8`]."] + #[inline(always)] + pub fn vabsq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vabsq_s8(a) } + } + #[doc = "See [`arch::vabs_s16`]."] + #[inline(always)] + pub fn vabs_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vabs_s16(a) } + } + #[doc = "See [`arch::vabsq_s16`]."] + #[inline(always)] + pub fn vabsq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vabsq_s16(a) } + } + #[doc = "See [`arch::vabs_s32`]."] + #[inline(always)] + pub fn vabs_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vabs_s32(a) } + } + #[doc = "See [`arch::vabsq_s32`]."] + #[inline(always)] + pub fn vabsq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vabsq_s32(a) } + } + #[doc = "See [`arch::vadd_f32`]."] + #[inline(always)] + pub fn vadd_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vadd_f32(a, b) } + } + #[doc = "See [`arch::vadd_s16`]."] + #[inline(always)] + pub fn vadd_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vadd_s16(a, b) } + } + #[doc = "See [`arch::vadd_s32`]."] + #[inline(always)] + pub fn vadd_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vadd_s32(a, b) } + } + #[doc = "See [`arch::vadd_s8`]."] + #[inline(always)] + pub fn vadd_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vadd_s8(a, b) } + } + #[doc = "See [`arch::vadd_u16`]."] + #[inline(always)] + pub fn vadd_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vadd_u16(a, b) } + } + #[doc = "See [`arch::vadd_u32`]."] + #[inline(always)] + pub fn vadd_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vadd_u32(a, b) } + } + #[doc = "See [`arch::vadd_u8`]."] + #[inline(always)] + pub fn vadd_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vadd_u8(a, b) } + } + #[doc = "See [`arch::vaddq_f32`]."] + #[inline(always)] + pub fn vaddq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vaddq_f32(a, b) } + } + #[doc = "See [`arch::vaddq_s16`]."] + #[inline(always)] + pub fn vaddq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vaddq_s16(a, b) } + } + #[doc = "See [`arch::vaddq_s32`]."] + #[inline(always)] + pub fn vaddq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vaddq_s32(a, b) } + } + #[doc = "See [`arch::vaddq_s64`]."] + #[inline(always)] + pub fn vaddq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vaddq_s64(a, b) } + } + #[doc = "See [`arch::vaddq_s8`]."] + #[inline(always)] + pub fn vaddq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vaddq_s8(a, b) } + } + #[doc = "See [`arch::vaddq_u16`]."] + #[inline(always)] + pub fn vaddq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vaddq_u16(a, b) } + } + #[doc = "See [`arch::vaddq_u32`]."] + #[inline(always)] + pub fn vaddq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vaddq_u32(a, b) } + } + #[doc = "See [`arch::vaddq_u64`]."] + #[inline(always)] + pub fn vaddq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vaddq_u64(a, b) } + } + #[doc = "See [`arch::vaddq_u8`]."] + #[inline(always)] + pub fn vaddq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vaddq_u8(a, b) } + } + #[doc = "See [`arch::vadd_p8`]."] + #[inline(always)] + pub fn vadd_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vadd_p8(a, b) } + } + #[doc = "See [`arch::vaddq_p8`]."] + #[inline(always)] + pub fn vaddq_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vaddq_p8(a, b) } + } + #[doc = "See [`arch::vadd_p16`]."] + #[inline(always)] + pub fn vadd_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vadd_p16(a, b) } + } + #[doc = "See [`arch::vaddq_p16`]."] + #[inline(always)] + pub fn vaddq_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vaddq_p16(a, b) } + } + #[doc = "See [`arch::vadd_p64`]."] + #[inline(always)] + pub fn vadd_p64(self, a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { + unsafe { vadd_p64(a, b) } + } + #[doc = "See [`arch::vaddq_p64`]."] + #[inline(always)] + pub fn vaddq_p64(self, a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { + unsafe { vaddq_p64(a, b) } + } + #[doc = "See [`arch::vaddhn_high_s16`]."] + #[inline(always)] + pub fn vaddhn_high_s16(self, r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { + unsafe { vaddhn_high_s16(r, a, b) } + } + #[doc = "See [`arch::vaddhn_high_s32`]."] + #[inline(always)] + pub fn vaddhn_high_s32(self, r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { + unsafe { vaddhn_high_s32(r, a, b) } + } + #[doc = "See [`arch::vaddhn_high_s64`]."] + #[inline(always)] + pub fn vaddhn_high_s64(self, r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { + unsafe { vaddhn_high_s64(r, a, b) } + } + #[doc = "See [`arch::vaddhn_high_u16`]."] + #[inline(always)] + pub fn vaddhn_high_u16(self, r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t { + unsafe { vaddhn_high_u16(r, a, b) } + } + #[doc = "See [`arch::vaddhn_high_u32`]."] + #[inline(always)] + pub fn vaddhn_high_u32(self, r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t { + unsafe { vaddhn_high_u32(r, a, b) } + } + #[doc = "See [`arch::vaddhn_high_u64`]."] + #[inline(always)] + pub fn vaddhn_high_u64(self, r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t { + unsafe { vaddhn_high_u64(r, a, b) } + } + #[doc = "See [`arch::vaddhn_s16`]."] + #[inline(always)] + pub fn vaddhn_s16(self, a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe { vaddhn_s16(a, b) } + } + #[doc = "See [`arch::vaddhn_s32`]."] + #[inline(always)] + pub fn vaddhn_s32(self, a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe { vaddhn_s32(a, b) } + } + #[doc = "See [`arch::vaddhn_s64`]."] + #[inline(always)] + pub fn vaddhn_s64(self, a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe { vaddhn_s64(a, b) } + } + #[doc = "See [`arch::vaddhn_u16`]."] + #[inline(always)] + pub fn vaddhn_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { vaddhn_u16(a, b) } + } + #[doc = "See [`arch::vaddhn_u32`]."] + #[inline(always)] + pub fn vaddhn_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { vaddhn_u32(a, b) } + } + #[doc = "See [`arch::vaddhn_u64`]."] + #[inline(always)] + pub fn vaddhn_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { vaddhn_u64(a, b) } + } + #[doc = "See [`arch::vaddl_high_s16`]."] + #[inline(always)] + pub fn vaddl_high_s16(self, a: int16x8_t, b: int16x8_t) -> int32x4_t { + unsafe { vaddl_high_s16(a, b) } + } + #[doc = "See [`arch::vaddl_high_s32`]."] + #[inline(always)] + pub fn vaddl_high_s32(self, a: int32x4_t, b: int32x4_t) -> int64x2_t { + unsafe { vaddl_high_s32(a, b) } + } + #[doc = "See [`arch::vaddl_high_s8`]."] + #[inline(always)] + pub fn vaddl_high_s8(self, a: int8x16_t, b: int8x16_t) -> int16x8_t { + unsafe { vaddl_high_s8(a, b) } + } + #[doc = "See [`arch::vaddl_high_u16`]."] + #[inline(always)] + pub fn vaddl_high_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vaddl_high_u16(a, b) } + } + #[doc = "See [`arch::vaddl_high_u32`]."] + #[inline(always)] + pub fn vaddl_high_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vaddl_high_u32(a, b) } + } + #[doc = "See [`arch::vaddl_high_u8`]."] + #[inline(always)] + pub fn vaddl_high_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { + unsafe { vaddl_high_u8(a, b) } + } + #[doc = "See [`arch::vaddl_s16`]."] + #[inline(always)] + pub fn vaddl_s16(self, a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vaddl_s16(a, b) } + } + #[doc = "See [`arch::vaddl_s32`]."] + #[inline(always)] + pub fn vaddl_s32(self, a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vaddl_s32(a, b) } + } + #[doc = "See [`arch::vaddl_s8`]."] + #[inline(always)] + pub fn vaddl_s8(self, a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe { vaddl_s8(a, b) } + } + #[doc = "See [`arch::vaddl_u16`]."] + #[inline(always)] + pub fn vaddl_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vaddl_u16(a, b) } + } + #[doc = "See [`arch::vaddl_u32`]."] + #[inline(always)] + pub fn vaddl_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vaddl_u32(a, b) } + } + #[doc = "See [`arch::vaddl_u8`]."] + #[inline(always)] + pub fn vaddl_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { vaddl_u8(a, b) } + } + #[doc = "See [`arch::vaddq_p128`]."] + #[inline(always)] + pub fn vaddq_p128(self, a: p128, b: p128) -> p128 { + unsafe { vaddq_p128(a, b) } + } + #[doc = "See [`arch::vaddw_high_s16`]."] + #[inline(always)] + pub fn vaddw_high_s16(self, a: int32x4_t, b: int16x8_t) -> int32x4_t { + unsafe { vaddw_high_s16(a, b) } + } + #[doc = "See [`arch::vaddw_high_s32`]."] + #[inline(always)] + pub fn vaddw_high_s32(self, a: int64x2_t, b: int32x4_t) -> int64x2_t { + unsafe { vaddw_high_s32(a, b) } + } + #[doc = "See [`arch::vaddw_high_s8`]."] + #[inline(always)] + pub fn vaddw_high_s8(self, a: int16x8_t, b: int8x16_t) -> int16x8_t { + unsafe { vaddw_high_s8(a, b) } + } + #[doc = "See [`arch::vaddw_high_u16`]."] + #[inline(always)] + pub fn vaddw_high_u16(self, a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vaddw_high_u16(a, b) } + } + #[doc = "See [`arch::vaddw_high_u32`]."] + #[inline(always)] + pub fn vaddw_high_u32(self, a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vaddw_high_u32(a, b) } + } + #[doc = "See [`arch::vaddw_high_u8`]."] + #[inline(always)] + pub fn vaddw_high_u8(self, a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + unsafe { vaddw_high_u8(a, b) } + } + #[doc = "See [`arch::vaddw_s16`]."] + #[inline(always)] + pub fn vaddw_s16(self, a: int32x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vaddw_s16(a, b) } + } + #[doc = "See [`arch::vaddw_s32`]."] + #[inline(always)] + pub fn vaddw_s32(self, a: int64x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vaddw_s32(a, b) } + } + #[doc = "See [`arch::vaddw_s8`]."] + #[inline(always)] + pub fn vaddw_s8(self, a: int16x8_t, b: int8x8_t) -> int16x8_t { + unsafe { vaddw_s8(a, b) } + } + #[doc = "See [`arch::vaddw_u16`]."] + #[inline(always)] + pub fn vaddw_u16(self, a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vaddw_u16(a, b) } + } + #[doc = "See [`arch::vaddw_u32`]."] + #[inline(always)] + pub fn vaddw_u32(self, a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vaddw_u32(a, b) } + } + #[doc = "See [`arch::vaddw_u8`]."] + #[inline(always)] + pub fn vaddw_u8(self, a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { vaddw_u8(a, b) } + } + #[doc = "See [`arch::vand_s8`]."] + #[inline(always)] + pub fn vand_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vand_s8(a, b) } + } + #[doc = "See [`arch::vandq_s8`]."] + #[inline(always)] + pub fn vandq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vandq_s8(a, b) } + } + #[doc = "See [`arch::vand_s16`]."] + #[inline(always)] + pub fn vand_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vand_s16(a, b) } + } + #[doc = "See [`arch::vandq_s16`]."] + #[inline(always)] + pub fn vandq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vandq_s16(a, b) } + } + #[doc = "See [`arch::vand_s32`]."] + #[inline(always)] + pub fn vand_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vand_s32(a, b) } + } + #[doc = "See [`arch::vandq_s32`]."] + #[inline(always)] + pub fn vandq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vandq_s32(a, b) } + } + #[doc = "See [`arch::vand_s64`]."] + #[inline(always)] + pub fn vand_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vand_s64(a, b) } + } + #[doc = "See [`arch::vandq_s64`]."] + #[inline(always)] + pub fn vandq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vandq_s64(a, b) } + } + #[doc = "See [`arch::vand_u8`]."] + #[inline(always)] + pub fn vand_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vand_u8(a, b) } + } + #[doc = "See [`arch::vandq_u8`]."] + #[inline(always)] + pub fn vandq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vandq_u8(a, b) } + } + #[doc = "See [`arch::vand_u16`]."] + #[inline(always)] + pub fn vand_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vand_u16(a, b) } + } + #[doc = "See [`arch::vandq_u16`]."] + #[inline(always)] + pub fn vandq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vandq_u16(a, b) } + } + #[doc = "See [`arch::vand_u32`]."] + #[inline(always)] + pub fn vand_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vand_u32(a, b) } + } + #[doc = "See [`arch::vandq_u32`]."] + #[inline(always)] + pub fn vandq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vandq_u32(a, b) } + } + #[doc = "See [`arch::vand_u64`]."] + #[inline(always)] + pub fn vand_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vand_u64(a, b) } + } + #[doc = "See [`arch::vandq_u64`]."] + #[inline(always)] + pub fn vandq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vandq_u64(a, b) } + } + #[doc = "See [`arch::vbic_s16`]."] + #[inline(always)] + pub fn vbic_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vbic_s16(a, b) } + } + #[doc = "See [`arch::vbic_s32`]."] + #[inline(always)] + pub fn vbic_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vbic_s32(a, b) } + } + #[doc = "See [`arch::vbic_s64`]."] + #[inline(always)] + pub fn vbic_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vbic_s64(a, b) } + } + #[doc = "See [`arch::vbic_s8`]."] + #[inline(always)] + pub fn vbic_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vbic_s8(a, b) } + } + #[doc = "See [`arch::vbicq_s16`]."] + #[inline(always)] + pub fn vbicq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vbicq_s16(a, b) } + } + #[doc = "See [`arch::vbicq_s32`]."] + #[inline(always)] + pub fn vbicq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vbicq_s32(a, b) } + } + #[doc = "See [`arch::vbicq_s64`]."] + #[inline(always)] + pub fn vbicq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vbicq_s64(a, b) } + } + #[doc = "See [`arch::vbicq_s8`]."] + #[inline(always)] + pub fn vbicq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vbicq_s8(a, b) } + } + #[doc = "See [`arch::vbic_u16`]."] + #[inline(always)] + pub fn vbic_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vbic_u16(a, b) } + } + #[doc = "See [`arch::vbic_u32`]."] + #[inline(always)] + pub fn vbic_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vbic_u32(a, b) } + } + #[doc = "See [`arch::vbic_u64`]."] + #[inline(always)] + pub fn vbic_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vbic_u64(a, b) } + } + #[doc = "See [`arch::vbic_u8`]."] + #[inline(always)] + pub fn vbic_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vbic_u8(a, b) } + } + #[doc = "See [`arch::vbicq_u16`]."] + #[inline(always)] + pub fn vbicq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vbicq_u16(a, b) } + } + #[doc = "See [`arch::vbicq_u32`]."] + #[inline(always)] + pub fn vbicq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vbicq_u32(a, b) } + } + #[doc = "See [`arch::vbicq_u64`]."] + #[inline(always)] + pub fn vbicq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vbicq_u64(a, b) } + } + #[doc = "See [`arch::vbicq_u8`]."] + #[inline(always)] + pub fn vbicq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vbicq_u8(a, b) } + } + #[doc = "See [`arch::vbsl_f32`]."] + #[inline(always)] + pub fn vbsl_f32(self, a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { vbsl_f32(a, b, c) } + } + #[doc = "See [`arch::vbsl_p16`]."] + #[inline(always)] + pub fn vbsl_p16(self, a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { + unsafe { vbsl_p16(a, b, c) } + } + #[doc = "See [`arch::vbsl_p8`]."] + #[inline(always)] + pub fn vbsl_p8(self, a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { + unsafe { vbsl_p8(a, b, c) } + } + #[doc = "See [`arch::vbsl_s16`]."] + #[inline(always)] + pub fn vbsl_s16(self, a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { vbsl_s16(a, b, c) } + } + #[doc = "See [`arch::vbsl_s32`]."] + #[inline(always)] + pub fn vbsl_s32(self, a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { vbsl_s32(a, b, c) } + } + #[doc = "See [`arch::vbsl_s64`]."] + #[inline(always)] + pub fn vbsl_s64(self, a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { + unsafe { vbsl_s64(a, b, c) } + } + #[doc = "See [`arch::vbsl_s8`]."] + #[inline(always)] + pub fn vbsl_s8(self, a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { vbsl_s8(a, b, c) } + } + #[doc = "See [`arch::vbslq_f32`]."] + #[inline(always)] + pub fn vbslq_f32(self, a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { vbslq_f32(a, b, c) } + } + #[doc = "See [`arch::vbslq_p16`]."] + #[inline(always)] + pub fn vbslq_p16(self, a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { + unsafe { vbslq_p16(a, b, c) } + } + #[doc = "See [`arch::vbslq_p8`]."] + #[inline(always)] + pub fn vbslq_p8(self, a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { + unsafe { vbslq_p8(a, b, c) } + } + #[doc = "See [`arch::vbslq_s16`]."] + #[inline(always)] + pub fn vbslq_s16(self, a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { vbslq_s16(a, b, c) } + } + #[doc = "See [`arch::vbslq_s32`]."] + #[inline(always)] + pub fn vbslq_s32(self, a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { vbslq_s32(a, b, c) } + } + #[doc = "See [`arch::vbslq_s64`]."] + #[inline(always)] + pub fn vbslq_s64(self, a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { + unsafe { vbslq_s64(a, b, c) } + } + #[doc = "See [`arch::vbslq_s8`]."] + #[inline(always)] + pub fn vbslq_s8(self, a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { vbslq_s8(a, b, c) } + } + #[doc = "See [`arch::vbsl_u16`]."] + #[inline(always)] + pub fn vbsl_u16(self, a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { vbsl_u16(a, b, c) } + } + #[doc = "See [`arch::vbsl_u32`]."] + #[inline(always)] + pub fn vbsl_u32(self, a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { vbsl_u32(a, b, c) } + } + #[doc = "See [`arch::vbsl_u64`]."] + #[inline(always)] + pub fn vbsl_u64(self, a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { + unsafe { vbsl_u64(a, b, c) } + } + #[doc = "See [`arch::vbsl_u8`]."] + #[inline(always)] + pub fn vbsl_u8(self, a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vbsl_u8(a, b, c) } + } + #[doc = "See [`arch::vbslq_u16`]."] + #[inline(always)] + pub fn vbslq_u16(self, a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { vbslq_u16(a, b, c) } + } + #[doc = "See [`arch::vbslq_u32`]."] + #[inline(always)] + pub fn vbslq_u32(self, a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { vbslq_u32(a, b, c) } + } + #[doc = "See [`arch::vbslq_u64`]."] + #[inline(always)] + pub fn vbslq_u64(self, a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { + unsafe { vbslq_u64(a, b, c) } + } + #[doc = "See [`arch::vbslq_u8`]."] + #[inline(always)] + pub fn vbslq_u8(self, a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vbslq_u8(a, b, c) } + } + #[doc = "See [`arch::vcage_f32`]."] + #[inline(always)] + pub fn vcage_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vcage_f32(a, b) } + } + #[doc = "See [`arch::vcageq_f32`]."] + #[inline(always)] + pub fn vcageq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcageq_f32(a, b) } + } + #[doc = "See [`arch::vcagt_f32`]."] + #[inline(always)] + pub fn vcagt_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vcagt_f32(a, b) } + } + #[doc = "See [`arch::vcagtq_f32`]."] + #[inline(always)] + pub fn vcagtq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcagtq_f32(a, b) } + } + #[doc = "See [`arch::vcale_f32`]."] + #[inline(always)] + pub fn vcale_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vcale_f32(a, b) } + } + #[doc = "See [`arch::vcaleq_f32`]."] + #[inline(always)] + pub fn vcaleq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcaleq_f32(a, b) } + } + #[doc = "See [`arch::vcalt_f32`]."] + #[inline(always)] + pub fn vcalt_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vcalt_f32(a, b) } + } + #[doc = "See [`arch::vcaltq_f32`]."] + #[inline(always)] + pub fn vcaltq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcaltq_f32(a, b) } + } + #[doc = "See [`arch::vceq_f32`]."] + #[inline(always)] + pub fn vceq_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vceq_f32(a, b) } + } + #[doc = "See [`arch::vceqq_f32`]."] + #[inline(always)] + pub fn vceqq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vceqq_f32(a, b) } + } + #[doc = "See [`arch::vceq_s8`]."] + #[inline(always)] + pub fn vceq_s8(self, a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vceq_s8(a, b) } + } + #[doc = "See [`arch::vceqq_s8`]."] + #[inline(always)] + pub fn vceqq_s8(self, a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vceqq_s8(a, b) } + } + #[doc = "See [`arch::vceq_s16`]."] + #[inline(always)] + pub fn vceq_s16(self, a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vceq_s16(a, b) } + } + #[doc = "See [`arch::vceqq_s16`]."] + #[inline(always)] + pub fn vceqq_s16(self, a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vceqq_s16(a, b) } + } + #[doc = "See [`arch::vceq_s32`]."] + #[inline(always)] + pub fn vceq_s32(self, a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vceq_s32(a, b) } + } + #[doc = "See [`arch::vceqq_s32`]."] + #[inline(always)] + pub fn vceqq_s32(self, a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vceqq_s32(a, b) } + } + #[doc = "See [`arch::vceq_u8`]."] + #[inline(always)] + pub fn vceq_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vceq_u8(a, b) } + } + #[doc = "See [`arch::vceqq_u8`]."] + #[inline(always)] + pub fn vceqq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vceqq_u8(a, b) } + } + #[doc = "See [`arch::vceq_u16`]."] + #[inline(always)] + pub fn vceq_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vceq_u16(a, b) } + } + #[doc = "See [`arch::vceqq_u16`]."] + #[inline(always)] + pub fn vceqq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vceqq_u16(a, b) } + } + #[doc = "See [`arch::vceq_u32`]."] + #[inline(always)] + pub fn vceq_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vceq_u32(a, b) } + } + #[doc = "See [`arch::vceqq_u32`]."] + #[inline(always)] + pub fn vceqq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vceqq_u32(a, b) } + } + #[doc = "See [`arch::vceq_p8`]."] + #[inline(always)] + pub fn vceq_p8(self, a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + unsafe { vceq_p8(a, b) } + } + #[doc = "See [`arch::vceqq_p8`]."] + #[inline(always)] + pub fn vceqq_p8(self, a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + unsafe { vceqq_p8(a, b) } + } + #[doc = "See [`arch::vcge_f32`]."] + #[inline(always)] + pub fn vcge_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vcge_f32(a, b) } + } + #[doc = "See [`arch::vcgeq_f32`]."] + #[inline(always)] + pub fn vcgeq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcgeq_f32(a, b) } + } + #[doc = "See [`arch::vcge_s8`]."] + #[inline(always)] + pub fn vcge_s8(self, a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vcge_s8(a, b) } + } + #[doc = "See [`arch::vcgeq_s8`]."] + #[inline(always)] + pub fn vcgeq_s8(self, a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vcgeq_s8(a, b) } + } + #[doc = "See [`arch::vcge_s16`]."] + #[inline(always)] + pub fn vcge_s16(self, a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vcge_s16(a, b) } + } + #[doc = "See [`arch::vcgeq_s16`]."] + #[inline(always)] + pub fn vcgeq_s16(self, a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vcgeq_s16(a, b) } + } + #[doc = "See [`arch::vcge_s32`]."] + #[inline(always)] + pub fn vcge_s32(self, a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vcge_s32(a, b) } + } + #[doc = "See [`arch::vcgeq_s32`]."] + #[inline(always)] + pub fn vcgeq_s32(self, a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vcgeq_s32(a, b) } + } + #[doc = "See [`arch::vcge_u8`]."] + #[inline(always)] + pub fn vcge_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vcge_u8(a, b) } + } + #[doc = "See [`arch::vcgeq_u8`]."] + #[inline(always)] + pub fn vcgeq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vcgeq_u8(a, b) } + } + #[doc = "See [`arch::vcge_u16`]."] + #[inline(always)] + pub fn vcge_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vcge_u16(a, b) } + } + #[doc = "See [`arch::vcgeq_u16`]."] + #[inline(always)] + pub fn vcgeq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vcgeq_u16(a, b) } + } + #[doc = "See [`arch::vcge_u32`]."] + #[inline(always)] + pub fn vcge_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vcge_u32(a, b) } + } + #[doc = "See [`arch::vcgeq_u32`]."] + #[inline(always)] + pub fn vcgeq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vcgeq_u32(a, b) } + } + #[doc = "See [`arch::vcgt_f32`]."] + #[inline(always)] + pub fn vcgt_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vcgt_f32(a, b) } + } + #[doc = "See [`arch::vcgtq_f32`]."] + #[inline(always)] + pub fn vcgtq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcgtq_f32(a, b) } + } + #[doc = "See [`arch::vcgt_s8`]."] + #[inline(always)] + pub fn vcgt_s8(self, a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vcgt_s8(a, b) } + } + #[doc = "See [`arch::vcgtq_s8`]."] + #[inline(always)] + pub fn vcgtq_s8(self, a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vcgtq_s8(a, b) } + } + #[doc = "See [`arch::vcgt_s16`]."] + #[inline(always)] + pub fn vcgt_s16(self, a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vcgt_s16(a, b) } + } + #[doc = "See [`arch::vcgtq_s16`]."] + #[inline(always)] + pub fn vcgtq_s16(self, a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vcgtq_s16(a, b) } + } + #[doc = "See [`arch::vcgt_s32`]."] + #[inline(always)] + pub fn vcgt_s32(self, a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vcgt_s32(a, b) } + } + #[doc = "See [`arch::vcgtq_s32`]."] + #[inline(always)] + pub fn vcgtq_s32(self, a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vcgtq_s32(a, b) } + } + #[doc = "See [`arch::vcgt_u8`]."] + #[inline(always)] + pub fn vcgt_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vcgt_u8(a, b) } + } + #[doc = "See [`arch::vcgtq_u8`]."] + #[inline(always)] + pub fn vcgtq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vcgtq_u8(a, b) } + } + #[doc = "See [`arch::vcgt_u16`]."] + #[inline(always)] + pub fn vcgt_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vcgt_u16(a, b) } + } + #[doc = "See [`arch::vcgtq_u16`]."] + #[inline(always)] + pub fn vcgtq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vcgtq_u16(a, b) } + } + #[doc = "See [`arch::vcgt_u32`]."] + #[inline(always)] + pub fn vcgt_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vcgt_u32(a, b) } + } + #[doc = "See [`arch::vcgtq_u32`]."] + #[inline(always)] + pub fn vcgtq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vcgtq_u32(a, b) } + } + #[doc = "See [`arch::vcle_f32`]."] + #[inline(always)] + pub fn vcle_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vcle_f32(a, b) } + } + #[doc = "See [`arch::vcleq_f32`]."] + #[inline(always)] + pub fn vcleq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcleq_f32(a, b) } + } + #[doc = "See [`arch::vcle_s8`]."] + #[inline(always)] + pub fn vcle_s8(self, a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vcle_s8(a, b) } + } + #[doc = "See [`arch::vcleq_s8`]."] + #[inline(always)] + pub fn vcleq_s8(self, a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vcleq_s8(a, b) } + } + #[doc = "See [`arch::vcle_s16`]."] + #[inline(always)] + pub fn vcle_s16(self, a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vcle_s16(a, b) } + } + #[doc = "See [`arch::vcleq_s16`]."] + #[inline(always)] + pub fn vcleq_s16(self, a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vcleq_s16(a, b) } + } + #[doc = "See [`arch::vcle_s32`]."] + #[inline(always)] + pub fn vcle_s32(self, a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vcle_s32(a, b) } + } + #[doc = "See [`arch::vcleq_s32`]."] + #[inline(always)] + pub fn vcleq_s32(self, a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vcleq_s32(a, b) } + } + #[doc = "See [`arch::vcle_u8`]."] + #[inline(always)] + pub fn vcle_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vcle_u8(a, b) } + } + #[doc = "See [`arch::vcleq_u8`]."] + #[inline(always)] + pub fn vcleq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vcleq_u8(a, b) } + } + #[doc = "See [`arch::vcle_u16`]."] + #[inline(always)] + pub fn vcle_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vcle_u16(a, b) } + } + #[doc = "See [`arch::vcleq_u16`]."] + #[inline(always)] + pub fn vcleq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vcleq_u16(a, b) } + } + #[doc = "See [`arch::vcle_u32`]."] + #[inline(always)] + pub fn vcle_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vcle_u32(a, b) } + } + #[doc = "See [`arch::vcleq_u32`]."] + #[inline(always)] + pub fn vcleq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vcleq_u32(a, b) } + } + #[doc = "See [`arch::vcls_s8`]."] + #[inline(always)] + pub fn vcls_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vcls_s8(a) } + } + #[doc = "See [`arch::vclsq_s8`]."] + #[inline(always)] + pub fn vclsq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vclsq_s8(a) } + } + #[doc = "See [`arch::vcls_s16`]."] + #[inline(always)] + pub fn vcls_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vcls_s16(a) } + } + #[doc = "See [`arch::vclsq_s16`]."] + #[inline(always)] + pub fn vclsq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vclsq_s16(a) } + } + #[doc = "See [`arch::vcls_s32`]."] + #[inline(always)] + pub fn vcls_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vcls_s32(a) } + } + #[doc = "See [`arch::vclsq_s32`]."] + #[inline(always)] + pub fn vclsq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vclsq_s32(a) } + } + #[doc = "See [`arch::vcls_u8`]."] + #[inline(always)] + pub fn vcls_u8(self, a: uint8x8_t) -> int8x8_t { + unsafe { vcls_u8(a) } + } + #[doc = "See [`arch::vclsq_u8`]."] + #[inline(always)] + pub fn vclsq_u8(self, a: uint8x16_t) -> int8x16_t { + unsafe { vclsq_u8(a) } + } + #[doc = "See [`arch::vcls_u16`]."] + #[inline(always)] + pub fn vcls_u16(self, a: uint16x4_t) -> int16x4_t { + unsafe { vcls_u16(a) } + } + #[doc = "See [`arch::vclsq_u16`]."] + #[inline(always)] + pub fn vclsq_u16(self, a: uint16x8_t) -> int16x8_t { + unsafe { vclsq_u16(a) } + } + #[doc = "See [`arch::vcls_u32`]."] + #[inline(always)] + pub fn vcls_u32(self, a: uint32x2_t) -> int32x2_t { + unsafe { vcls_u32(a) } + } + #[doc = "See [`arch::vclsq_u32`]."] + #[inline(always)] + pub fn vclsq_u32(self, a: uint32x4_t) -> int32x4_t { + unsafe { vclsq_u32(a) } + } + #[doc = "See [`arch::vclt_f32`]."] + #[inline(always)] + pub fn vclt_f32(self, a: float32x2_t, b: float32x2_t) -> uint32x2_t { + unsafe { vclt_f32(a, b) } + } + #[doc = "See [`arch::vcltq_f32`]."] + #[inline(always)] + pub fn vcltq_f32(self, a: float32x4_t, b: float32x4_t) -> uint32x4_t { + unsafe { vcltq_f32(a, b) } + } + #[doc = "See [`arch::vclt_s8`]."] + #[inline(always)] + pub fn vclt_s8(self, a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vclt_s8(a, b) } + } + #[doc = "See [`arch::vcltq_s8`]."] + #[inline(always)] + pub fn vcltq_s8(self, a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vcltq_s8(a, b) } + } + #[doc = "See [`arch::vclt_s16`]."] + #[inline(always)] + pub fn vclt_s16(self, a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vclt_s16(a, b) } + } + #[doc = "See [`arch::vcltq_s16`]."] + #[inline(always)] + pub fn vcltq_s16(self, a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vcltq_s16(a, b) } + } + #[doc = "See [`arch::vclt_s32`]."] + #[inline(always)] + pub fn vclt_s32(self, a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vclt_s32(a, b) } + } + #[doc = "See [`arch::vcltq_s32`]."] + #[inline(always)] + pub fn vcltq_s32(self, a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vcltq_s32(a, b) } + } + #[doc = "See [`arch::vclt_u8`]."] + #[inline(always)] + pub fn vclt_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vclt_u8(a, b) } + } + #[doc = "See [`arch::vcltq_u8`]."] + #[inline(always)] + pub fn vcltq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vcltq_u8(a, b) } + } + #[doc = "See [`arch::vclt_u16`]."] + #[inline(always)] + pub fn vclt_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vclt_u16(a, b) } + } + #[doc = "See [`arch::vcltq_u16`]."] + #[inline(always)] + pub fn vcltq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vcltq_u16(a, b) } + } + #[doc = "See [`arch::vclt_u32`]."] + #[inline(always)] + pub fn vclt_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vclt_u32(a, b) } + } + #[doc = "See [`arch::vcltq_u32`]."] + #[inline(always)] + pub fn vcltq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vcltq_u32(a, b) } + } + #[doc = "See [`arch::vclz_s8`]."] + #[inline(always)] + pub fn vclz_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vclz_s8(a) } + } + #[doc = "See [`arch::vclzq_s8`]."] + #[inline(always)] + pub fn vclzq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vclzq_s8(a) } + } + #[doc = "See [`arch::vclz_s16`]."] + #[inline(always)] + pub fn vclz_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vclz_s16(a) } + } + #[doc = "See [`arch::vclzq_s16`]."] + #[inline(always)] + pub fn vclzq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vclzq_s16(a) } + } + #[doc = "See [`arch::vclz_s32`]."] + #[inline(always)] + pub fn vclz_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vclz_s32(a) } + } + #[doc = "See [`arch::vclzq_s32`]."] + #[inline(always)] + pub fn vclzq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vclzq_s32(a) } + } + #[doc = "See [`arch::vclz_u16`]."] + #[inline(always)] + pub fn vclz_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vclz_u16(a) } + } + #[doc = "See [`arch::vclzq_u16`]."] + #[inline(always)] + pub fn vclzq_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vclzq_u16(a) } + } + #[doc = "See [`arch::vclz_u32`]."] + #[inline(always)] + pub fn vclz_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vclz_u32(a) } + } + #[doc = "See [`arch::vclzq_u32`]."] + #[inline(always)] + pub fn vclzq_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vclzq_u32(a) } + } + #[doc = "See [`arch::vclz_u8`]."] + #[inline(always)] + pub fn vclz_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vclz_u8(a) } + } + #[doc = "See [`arch::vclzq_u8`]."] + #[inline(always)] + pub fn vclzq_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vclzq_u8(a) } + } + #[doc = "See [`arch::vcnt_s8`]."] + #[inline(always)] + pub fn vcnt_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vcnt_s8(a) } + } + #[doc = "See [`arch::vcntq_s8`]."] + #[inline(always)] + pub fn vcntq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vcntq_s8(a) } + } + #[doc = "See [`arch::vcnt_u8`]."] + #[inline(always)] + pub fn vcnt_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vcnt_u8(a) } + } + #[doc = "See [`arch::vcntq_u8`]."] + #[inline(always)] + pub fn vcntq_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vcntq_u8(a) } + } + #[doc = "See [`arch::vcnt_p8`]."] + #[inline(always)] + pub fn vcnt_p8(self, a: poly8x8_t) -> poly8x8_t { + unsafe { vcnt_p8(a) } + } + #[doc = "See [`arch::vcntq_p8`]."] + #[inline(always)] + pub fn vcntq_p8(self, a: poly8x16_t) -> poly8x16_t { + unsafe { vcntq_p8(a) } + } + #[doc = "See [`arch::vcombine_f32`]."] + #[inline(always)] + pub fn vcombine_f32(self, a: float32x2_t, b: float32x2_t) -> float32x4_t { + unsafe { vcombine_f32(a, b) } + } + #[doc = "See [`arch::vcombine_s8`]."] + #[inline(always)] + pub fn vcombine_s8(self, a: int8x8_t, b: int8x8_t) -> int8x16_t { + unsafe { vcombine_s8(a, b) } + } + #[doc = "See [`arch::vcombine_s16`]."] + #[inline(always)] + pub fn vcombine_s16(self, a: int16x4_t, b: int16x4_t) -> int16x8_t { + unsafe { vcombine_s16(a, b) } + } + #[doc = "See [`arch::vcombine_s32`]."] + #[inline(always)] + pub fn vcombine_s32(self, a: int32x2_t, b: int32x2_t) -> int32x4_t { + unsafe { vcombine_s32(a, b) } + } + #[doc = "See [`arch::vcombine_s64`]."] + #[inline(always)] + pub fn vcombine_s64(self, a: int64x1_t, b: int64x1_t) -> int64x2_t { + unsafe { vcombine_s64(a, b) } + } + #[doc = "See [`arch::vcombine_u8`]."] + #[inline(always)] + pub fn vcombine_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { + unsafe { vcombine_u8(a, b) } + } + #[doc = "See [`arch::vcombine_u16`]."] + #[inline(always)] + pub fn vcombine_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { + unsafe { vcombine_u16(a, b) } + } + #[doc = "See [`arch::vcombine_u32`]."] + #[inline(always)] + pub fn vcombine_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { + unsafe { vcombine_u32(a, b) } + } + #[doc = "See [`arch::vcombine_u64`]."] + #[inline(always)] + pub fn vcombine_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { + unsafe { vcombine_u64(a, b) } + } + #[doc = "See [`arch::vcombine_p8`]."] + #[inline(always)] + pub fn vcombine_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { + unsafe { vcombine_p8(a, b) } + } + #[doc = "See [`arch::vcombine_p16`]."] + #[inline(always)] + pub fn vcombine_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { + unsafe { vcombine_p16(a, b) } + } + #[doc = "See [`arch::vcombine_p64`]."] + #[inline(always)] + pub fn vcombine_p64(self, a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { + unsafe { vcombine_p64(a, b) } + } + #[doc = "See [`arch::vcreate_f32`]."] + #[inline(always)] + pub fn vcreate_f32(self, a: u64) -> float32x2_t { + unsafe { vcreate_f32(a) } + } + #[doc = "See [`arch::vcreate_s8`]."] + #[inline(always)] + pub fn vcreate_s8(self, a: u64) -> int8x8_t { + unsafe { vcreate_s8(a) } + } + #[doc = "See [`arch::vcreate_s16`]."] + #[inline(always)] + pub fn vcreate_s16(self, a: u64) -> int16x4_t { + unsafe { vcreate_s16(a) } + } + #[doc = "See [`arch::vcreate_s32`]."] + #[inline(always)] + pub fn vcreate_s32(self, a: u64) -> int32x2_t { + unsafe { vcreate_s32(a) } + } + #[doc = "See [`arch::vcreate_s64`]."] + #[inline(always)] + pub fn vcreate_s64(self, a: u64) -> int64x1_t { + unsafe { vcreate_s64(a) } + } + #[doc = "See [`arch::vcreate_u8`]."] + #[inline(always)] + pub fn vcreate_u8(self, a: u64) -> uint8x8_t { + unsafe { vcreate_u8(a) } + } + #[doc = "See [`arch::vcreate_u16`]."] + #[inline(always)] + pub fn vcreate_u16(self, a: u64) -> uint16x4_t { + unsafe { vcreate_u16(a) } + } + #[doc = "See [`arch::vcreate_u32`]."] + #[inline(always)] + pub fn vcreate_u32(self, a: u64) -> uint32x2_t { + unsafe { vcreate_u32(a) } + } + #[doc = "See [`arch::vcreate_u64`]."] + #[inline(always)] + pub fn vcreate_u64(self, a: u64) -> uint64x1_t { + unsafe { vcreate_u64(a) } + } + #[doc = "See [`arch::vcreate_p8`]."] + #[inline(always)] + pub fn vcreate_p8(self, a: u64) -> poly8x8_t { + unsafe { vcreate_p8(a) } + } + #[doc = "See [`arch::vcreate_p16`]."] + #[inline(always)] + pub fn vcreate_p16(self, a: u64) -> poly16x4_t { + unsafe { vcreate_p16(a) } + } + #[doc = "See [`arch::vcreate_p64`]."] + #[inline(always)] + pub fn vcreate_p64(self, a: u64) -> poly64x1_t { + unsafe { vcreate_p64(a) } + } + #[doc = "See [`arch::vcvt_f32_s32`]."] + #[inline(always)] + pub fn vcvt_f32_s32(self, a: int32x2_t) -> float32x2_t { + unsafe { vcvt_f32_s32(a) } + } + #[doc = "See [`arch::vcvtq_f32_s32`]."] + #[inline(always)] + pub fn vcvtq_f32_s32(self, a: int32x4_t) -> float32x4_t { + unsafe { vcvtq_f32_s32(a) } + } + #[doc = "See [`arch::vcvt_f32_u32`]."] + #[inline(always)] + pub fn vcvt_f32_u32(self, a: uint32x2_t) -> float32x2_t { + unsafe { vcvt_f32_u32(a) } + } + #[doc = "See [`arch::vcvtq_f32_u32`]."] + #[inline(always)] + pub fn vcvtq_f32_u32(self, a: uint32x4_t) -> float32x4_t { + unsafe { vcvtq_f32_u32(a) } + } + #[doc = "See [`arch::vcvt_s32_f32`]."] + #[inline(always)] + pub fn vcvt_s32_f32(self, a: float32x2_t) -> int32x2_t { + unsafe { vcvt_s32_f32(a) } + } + #[doc = "See [`arch::vcvtq_s32_f32`]."] + #[inline(always)] + pub fn vcvtq_s32_f32(self, a: float32x4_t) -> int32x4_t { + unsafe { vcvtq_s32_f32(a) } + } + #[doc = "See [`arch::vcvt_u32_f32`]."] + #[inline(always)] + pub fn vcvt_u32_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vcvt_u32_f32(a) } + } + #[doc = "See [`arch::vcvtq_u32_f32`]."] + #[inline(always)] + pub fn vcvtq_u32_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vcvtq_u32_f32(a) } + } + #[doc = "See [`arch::vdup_lane_f32`]."] + #[inline(always)] + pub fn vdup_lane_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vdup_lane_f32::(a) } + } + #[doc = "See [`arch::vdup_lane_s32`]."] + #[inline(always)] + pub fn vdup_lane_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vdup_lane_s32::(a) } + } + #[doc = "See [`arch::vdup_lane_u32`]."] + #[inline(always)] + pub fn vdup_lane_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vdup_lane_u32::(a) } + } + #[doc = "See [`arch::vdupq_lane_f32`]."] + #[inline(always)] + pub fn vdupq_lane_f32(self, a: float32x2_t) -> float32x4_t { + unsafe { vdupq_lane_f32::(a) } + } + #[doc = "See [`arch::vdupq_lane_s32`]."] + #[inline(always)] + pub fn vdupq_lane_s32(self, a: int32x2_t) -> int32x4_t { + unsafe { vdupq_lane_s32::(a) } + } + #[doc = "See [`arch::vdupq_lane_u32`]."] + #[inline(always)] + pub fn vdupq_lane_u32(self, a: uint32x2_t) -> uint32x4_t { + unsafe { vdupq_lane_u32::(a) } + } + #[doc = "See [`arch::vdup_lane_p16`]."] + #[inline(always)] + pub fn vdup_lane_p16(self, a: poly16x4_t) -> poly16x4_t { + unsafe { vdup_lane_p16::(a) } + } + #[doc = "See [`arch::vdup_lane_s16`]."] + #[inline(always)] + pub fn vdup_lane_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vdup_lane_s16::(a) } + } + #[doc = "See [`arch::vdup_lane_u16`]."] + #[inline(always)] + pub fn vdup_lane_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vdup_lane_u16::(a) } + } + #[doc = "See [`arch::vdupq_lane_p16`]."] + #[inline(always)] + pub fn vdupq_lane_p16(self, a: poly16x4_t) -> poly16x8_t { + unsafe { vdupq_lane_p16::(a) } + } + #[doc = "See [`arch::vdupq_lane_s16`]."] + #[inline(always)] + pub fn vdupq_lane_s16(self, a: int16x4_t) -> int16x8_t { + unsafe { vdupq_lane_s16::(a) } + } + #[doc = "See [`arch::vdupq_lane_u16`]."] + #[inline(always)] + pub fn vdupq_lane_u16(self, a: uint16x4_t) -> uint16x8_t { + unsafe { vdupq_lane_u16::(a) } + } + #[doc = "See [`arch::vdup_lane_p8`]."] + #[inline(always)] + pub fn vdup_lane_p8(self, a: poly8x8_t) -> poly8x8_t { + unsafe { vdup_lane_p8::(a) } + } + #[doc = "See [`arch::vdup_lane_s8`]."] + #[inline(always)] + pub fn vdup_lane_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vdup_lane_s8::(a) } + } + #[doc = "See [`arch::vdup_lane_u8`]."] + #[inline(always)] + pub fn vdup_lane_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vdup_lane_u8::(a) } + } + #[doc = "See [`arch::vdupq_lane_p8`]."] + #[inline(always)] + pub fn vdupq_lane_p8(self, a: poly8x8_t) -> poly8x16_t { + unsafe { vdupq_lane_p8::(a) } + } + #[doc = "See [`arch::vdupq_lane_s8`]."] + #[inline(always)] + pub fn vdupq_lane_s8(self, a: int8x8_t) -> int8x16_t { + unsafe { vdupq_lane_s8::(a) } + } + #[doc = "See [`arch::vdupq_lane_u8`]."] + #[inline(always)] + pub fn vdupq_lane_u8(self, a: uint8x8_t) -> uint8x16_t { + unsafe { vdupq_lane_u8::(a) } + } + #[doc = "See [`arch::vdup_lane_s64`]."] + #[inline(always)] + pub fn vdup_lane_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vdup_lane_s64::(a) } + } + #[doc = "See [`arch::vdup_lane_u64`]."] + #[inline(always)] + pub fn vdup_lane_u64(self, a: uint64x1_t) -> uint64x1_t { + unsafe { vdup_lane_u64::(a) } + } + #[doc = "See [`arch::vdup_laneq_f32`]."] + #[inline(always)] + pub fn vdup_laneq_f32(self, a: float32x4_t) -> float32x2_t { + unsafe { vdup_laneq_f32::(a) } + } + #[doc = "See [`arch::vdup_laneq_s32`]."] + #[inline(always)] + pub fn vdup_laneq_s32(self, a: int32x4_t) -> int32x2_t { + unsafe { vdup_laneq_s32::(a) } + } + #[doc = "See [`arch::vdup_laneq_u32`]."] + #[inline(always)] + pub fn vdup_laneq_u32(self, a: uint32x4_t) -> uint32x2_t { + unsafe { vdup_laneq_u32::(a) } + } + #[doc = "See [`arch::vdupq_laneq_f32`]."] + #[inline(always)] + pub fn vdupq_laneq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vdupq_laneq_f32::(a) } + } + #[doc = "See [`arch::vdupq_laneq_s32`]."] + #[inline(always)] + pub fn vdupq_laneq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vdupq_laneq_s32::(a) } + } + #[doc = "See [`arch::vdupq_laneq_u32`]."] + #[inline(always)] + pub fn vdupq_laneq_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vdupq_laneq_u32::(a) } + } + #[doc = "See [`arch::vdup_laneq_p16`]."] + #[inline(always)] + pub fn vdup_laneq_p16(self, a: poly16x8_t) -> poly16x4_t { + unsafe { vdup_laneq_p16::(a) } + } + #[doc = "See [`arch::vdup_laneq_s16`]."] + #[inline(always)] + pub fn vdup_laneq_s16(self, a: int16x8_t) -> int16x4_t { + unsafe { vdup_laneq_s16::(a) } + } + #[doc = "See [`arch::vdup_laneq_u16`]."] + #[inline(always)] + pub fn vdup_laneq_u16(self, a: uint16x8_t) -> uint16x4_t { + unsafe { vdup_laneq_u16::(a) } + } + #[doc = "See [`arch::vdupq_laneq_p16`]."] + #[inline(always)] + pub fn vdupq_laneq_p16(self, a: poly16x8_t) -> poly16x8_t { + unsafe { vdupq_laneq_p16::(a) } + } + #[doc = "See [`arch::vdupq_laneq_s16`]."] + #[inline(always)] + pub fn vdupq_laneq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vdupq_laneq_s16::(a) } + } + #[doc = "See [`arch::vdupq_laneq_u16`]."] + #[inline(always)] + pub fn vdupq_laneq_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vdupq_laneq_u16::(a) } + } + #[doc = "See [`arch::vdup_laneq_p8`]."] + #[inline(always)] + pub fn vdup_laneq_p8(self, a: poly8x16_t) -> poly8x8_t { + unsafe { vdup_laneq_p8::(a) } + } + #[doc = "See [`arch::vdup_laneq_s8`]."] + #[inline(always)] + pub fn vdup_laneq_s8(self, a: int8x16_t) -> int8x8_t { + unsafe { vdup_laneq_s8::(a) } + } + #[doc = "See [`arch::vdup_laneq_u8`]."] + #[inline(always)] + pub fn vdup_laneq_u8(self, a: uint8x16_t) -> uint8x8_t { + unsafe { vdup_laneq_u8::(a) } + } + #[doc = "See [`arch::vdupq_laneq_p8`]."] + #[inline(always)] + pub fn vdupq_laneq_p8(self, a: poly8x16_t) -> poly8x16_t { + unsafe { vdupq_laneq_p8::(a) } + } + #[doc = "See [`arch::vdupq_laneq_s8`]."] + #[inline(always)] + pub fn vdupq_laneq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vdupq_laneq_s8::(a) } + } + #[doc = "See [`arch::vdupq_laneq_u8`]."] + #[inline(always)] + pub fn vdupq_laneq_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vdupq_laneq_u8::(a) } + } + #[doc = "See [`arch::vdup_laneq_s64`]."] + #[inline(always)] + pub fn vdup_laneq_s64(self, a: int64x2_t) -> int64x1_t { + unsafe { vdup_laneq_s64::(a) } + } + #[doc = "See [`arch::vdup_laneq_u64`]."] + #[inline(always)] + pub fn vdup_laneq_u64(self, a: uint64x2_t) -> uint64x1_t { + unsafe { vdup_laneq_u64::(a) } + } + #[doc = "See [`arch::vdup_n_f32`]."] + #[inline(always)] + pub fn vdup_n_f32(self, value: f32) -> float32x2_t { + unsafe { vdup_n_f32(value) } + } + #[doc = "See [`arch::vdup_n_p16`]."] + #[inline(always)] + pub fn vdup_n_p16(self, value: p16) -> poly16x4_t { + unsafe { vdup_n_p16(value) } + } + #[doc = "See [`arch::vdup_n_p8`]."] + #[inline(always)] + pub fn vdup_n_p8(self, value: p8) -> poly8x8_t { + unsafe { vdup_n_p8(value) } + } + #[doc = "See [`arch::vdup_n_s16`]."] + #[inline(always)] + pub fn vdup_n_s16(self, value: i16) -> int16x4_t { + unsafe { vdup_n_s16(value) } + } + #[doc = "See [`arch::vdup_n_s32`]."] + #[inline(always)] + pub fn vdup_n_s32(self, value: i32) -> int32x2_t { + unsafe { vdup_n_s32(value) } + } + #[doc = "See [`arch::vdup_n_s64`]."] + #[inline(always)] + pub fn vdup_n_s64(self, value: i64) -> int64x1_t { + unsafe { vdup_n_s64(value) } + } + #[doc = "See [`arch::vdup_n_s8`]."] + #[inline(always)] + pub fn vdup_n_s8(self, value: i8) -> int8x8_t { + unsafe { vdup_n_s8(value) } + } + #[doc = "See [`arch::vdup_n_u16`]."] + #[inline(always)] + pub fn vdup_n_u16(self, value: u16) -> uint16x4_t { + unsafe { vdup_n_u16(value) } + } + #[doc = "See [`arch::vdup_n_u32`]."] + #[inline(always)] + pub fn vdup_n_u32(self, value: u32) -> uint32x2_t { + unsafe { vdup_n_u32(value) } + } + #[doc = "See [`arch::vdup_n_u64`]."] + #[inline(always)] + pub fn vdup_n_u64(self, value: u64) -> uint64x1_t { + unsafe { vdup_n_u64(value) } + } + #[doc = "See [`arch::vdup_n_u8`]."] + #[inline(always)] + pub fn vdup_n_u8(self, value: u8) -> uint8x8_t { + unsafe { vdup_n_u8(value) } + } + #[doc = "See [`arch::vdupq_n_f32`]."] + #[inline(always)] + pub fn vdupq_n_f32(self, value: f32) -> float32x4_t { + unsafe { vdupq_n_f32(value) } + } + #[doc = "See [`arch::vdupq_n_p16`]."] + #[inline(always)] + pub fn vdupq_n_p16(self, value: p16) -> poly16x8_t { + unsafe { vdupq_n_p16(value) } + } + #[doc = "See [`arch::vdupq_n_p8`]."] + #[inline(always)] + pub fn vdupq_n_p8(self, value: p8) -> poly8x16_t { + unsafe { vdupq_n_p8(value) } + } + #[doc = "See [`arch::vdupq_n_s16`]."] + #[inline(always)] + pub fn vdupq_n_s16(self, value: i16) -> int16x8_t { + unsafe { vdupq_n_s16(value) } + } + #[doc = "See [`arch::vdupq_n_s32`]."] + #[inline(always)] + pub fn vdupq_n_s32(self, value: i32) -> int32x4_t { + unsafe { vdupq_n_s32(value) } + } + #[doc = "See [`arch::vdupq_n_s64`]."] + #[inline(always)] + pub fn vdupq_n_s64(self, value: i64) -> int64x2_t { + unsafe { vdupq_n_s64(value) } + } + #[doc = "See [`arch::vdupq_n_s8`]."] + #[inline(always)] + pub fn vdupq_n_s8(self, value: i8) -> int8x16_t { + unsafe { vdupq_n_s8(value) } + } + #[doc = "See [`arch::vdupq_n_u16`]."] + #[inline(always)] + pub fn vdupq_n_u16(self, value: u16) -> uint16x8_t { + unsafe { vdupq_n_u16(value) } + } + #[doc = "See [`arch::vdupq_n_u32`]."] + #[inline(always)] + pub fn vdupq_n_u32(self, value: u32) -> uint32x4_t { + unsafe { vdupq_n_u32(value) } + } + #[doc = "See [`arch::vdupq_n_u64`]."] + #[inline(always)] + pub fn vdupq_n_u64(self, value: u64) -> uint64x2_t { + unsafe { vdupq_n_u64(value) } + } + #[doc = "See [`arch::vdupq_n_u8`]."] + #[inline(always)] + pub fn vdupq_n_u8(self, value: u8) -> uint8x16_t { + unsafe { vdupq_n_u8(value) } + } + #[doc = "See [`arch::vdupq_lane_s64`]."] + #[inline(always)] + pub fn vdupq_lane_s64(self, a: int64x1_t) -> int64x2_t { + unsafe { vdupq_lane_s64::(a) } + } + #[doc = "See [`arch::vdupq_lane_u64`]."] + #[inline(always)] + pub fn vdupq_lane_u64(self, a: uint64x1_t) -> uint64x2_t { + unsafe { vdupq_lane_u64::(a) } + } + #[doc = "See [`arch::vdupq_laneq_s64`]."] + #[inline(always)] + pub fn vdupq_laneq_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vdupq_laneq_s64::(a) } + } + #[doc = "See [`arch::vdupq_laneq_u64`]."] + #[inline(always)] + pub fn vdupq_laneq_u64(self, a: uint64x2_t) -> uint64x2_t { + unsafe { vdupq_laneq_u64::(a) } + } + #[doc = "See [`arch::veor_s8`]."] + #[inline(always)] + pub fn veor_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { veor_s8(a, b) } + } + #[doc = "See [`arch::veorq_s8`]."] + #[inline(always)] + pub fn veorq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { veorq_s8(a, b) } + } + #[doc = "See [`arch::veor_s16`]."] + #[inline(always)] + pub fn veor_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { veor_s16(a, b) } + } + #[doc = "See [`arch::veorq_s16`]."] + #[inline(always)] + pub fn veorq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { veorq_s16(a, b) } + } + #[doc = "See [`arch::veor_s32`]."] + #[inline(always)] + pub fn veor_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { veor_s32(a, b) } + } + #[doc = "See [`arch::veorq_s32`]."] + #[inline(always)] + pub fn veorq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { veorq_s32(a, b) } + } + #[doc = "See [`arch::veor_s64`]."] + #[inline(always)] + pub fn veor_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { veor_s64(a, b) } + } + #[doc = "See [`arch::veorq_s64`]."] + #[inline(always)] + pub fn veorq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { veorq_s64(a, b) } + } + #[doc = "See [`arch::veor_u8`]."] + #[inline(always)] + pub fn veor_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { veor_u8(a, b) } + } + #[doc = "See [`arch::veorq_u8`]."] + #[inline(always)] + pub fn veorq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { veorq_u8(a, b) } + } + #[doc = "See [`arch::veor_u16`]."] + #[inline(always)] + pub fn veor_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { veor_u16(a, b) } + } + #[doc = "See [`arch::veorq_u16`]."] + #[inline(always)] + pub fn veorq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { veorq_u16(a, b) } + } + #[doc = "See [`arch::veor_u32`]."] + #[inline(always)] + pub fn veor_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { veor_u32(a, b) } + } + #[doc = "See [`arch::veorq_u32`]."] + #[inline(always)] + pub fn veorq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { veorq_u32(a, b) } + } + #[doc = "See [`arch::veor_u64`]."] + #[inline(always)] + pub fn veor_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { veor_u64(a, b) } + } + #[doc = "See [`arch::veorq_u64`]."] + #[inline(always)] + pub fn veorq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { veorq_u64(a, b) } + } + #[doc = "See [`arch::vext_f32`]."] + #[inline(always)] + pub fn vext_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vext_f32::(a, b) } + } + #[doc = "See [`arch::vext_s32`]."] + #[inline(always)] + pub fn vext_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vext_s32::(a, b) } + } + #[doc = "See [`arch::vext_u32`]."] + #[inline(always)] + pub fn vext_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vext_u32::(a, b) } + } + #[doc = "See [`arch::vext_s64`]."] + #[inline(always)] + pub unsafe fn vext_s64(self, a: int64x1_t, _b: int64x1_t) -> int64x1_t { + unsafe { vext_s64::(a, _b) } + } + #[doc = "See [`arch::vext_u64`]."] + #[inline(always)] + pub unsafe fn vext_u64(self, a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t { + unsafe { vext_u64::(a, _b) } + } + #[doc = "See [`arch::vext_s8`]."] + #[inline(always)] + pub fn vext_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vext_s8::(a, b) } + } + #[doc = "See [`arch::vextq_s16`]."] + #[inline(always)] + pub fn vextq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vextq_s16::(a, b) } + } + #[doc = "See [`arch::vext_u8`]."] + #[inline(always)] + pub fn vext_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vext_u8::(a, b) } + } + #[doc = "See [`arch::vextq_u16`]."] + #[inline(always)] + pub fn vextq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vextq_u16::(a, b) } + } + #[doc = "See [`arch::vext_p8`]."] + #[inline(always)] + pub fn vext_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vext_p8::(a, b) } + } + #[doc = "See [`arch::vextq_p16`]."] + #[inline(always)] + pub fn vextq_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + unsafe { vextq_p16::(a, b) } + } + #[doc = "See [`arch::vextq_f32`]."] + #[inline(always)] + pub fn vextq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vextq_f32::(a, b) } + } + #[doc = "See [`arch::vext_s16`]."] + #[inline(always)] + pub fn vext_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vext_s16::(a, b) } + } + #[doc = "See [`arch::vextq_s32`]."] + #[inline(always)] + pub fn vextq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vextq_s32::(a, b) } + } + #[doc = "See [`arch::vext_u16`]."] + #[inline(always)] + pub fn vext_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vext_u16::(a, b) } + } + #[doc = "See [`arch::vextq_u32`]."] + #[inline(always)] + pub fn vextq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vextq_u32::(a, b) } + } + #[doc = "See [`arch::vext_p16`]."] + #[inline(always)] + pub fn vext_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + unsafe { vext_p16::(a, b) } + } + #[doc = "See [`arch::vextq_s64`]."] + #[inline(always)] + pub fn vextq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vextq_s64::(a, b) } + } + #[doc = "See [`arch::vextq_u64`]."] + #[inline(always)] + pub fn vextq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vextq_u64::(a, b) } + } + #[doc = "See [`arch::vextq_s8`]."] + #[inline(always)] + pub fn vextq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vextq_s8::(a, b) } + } + #[doc = "See [`arch::vextq_u8`]."] + #[inline(always)] + pub fn vextq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vextq_u8::(a, b) } + } + #[doc = "See [`arch::vextq_p8`]."] + #[inline(always)] + pub fn vextq_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vextq_p8::(a, b) } + } + #[doc = "See [`arch::vfma_f32`]."] + #[inline(always)] + pub fn vfma_f32(self, a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { vfma_f32(a, b, c) } + } + #[doc = "See [`arch::vfmaq_f32`]."] + #[inline(always)] + pub fn vfmaq_f32(self, a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { vfmaq_f32(a, b, c) } + } + #[doc = "See [`arch::vfma_n_f32`]."] + #[inline(always)] + pub fn vfma_n_f32(self, a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + unsafe { vfma_n_f32(a, b, c) } + } + #[doc = "See [`arch::vfmaq_n_f32`]."] + #[inline(always)] + pub fn vfmaq_n_f32(self, a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + unsafe { vfmaq_n_f32(a, b, c) } + } + #[doc = "See [`arch::vfms_f32`]."] + #[inline(always)] + pub fn vfms_f32(self, a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { vfms_f32(a, b, c) } + } + #[doc = "See [`arch::vfmsq_f32`]."] + #[inline(always)] + pub fn vfmsq_f32(self, a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { vfmsq_f32(a, b, c) } + } + #[doc = "See [`arch::vfms_n_f32`]."] + #[inline(always)] + pub fn vfms_n_f32(self, a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + unsafe { vfms_n_f32(a, b, c) } + } + #[doc = "See [`arch::vfmsq_n_f32`]."] + #[inline(always)] + pub fn vfmsq_n_f32(self, a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + unsafe { vfmsq_n_f32(a, b, c) } + } + #[doc = "See [`arch::vget_high_f32`]."] + #[inline(always)] + pub fn vget_high_f32(self, a: float32x4_t) -> float32x2_t { + unsafe { vget_high_f32(a) } + } + #[doc = "See [`arch::vget_high_p16`]."] + #[inline(always)] + pub fn vget_high_p16(self, a: poly16x8_t) -> poly16x4_t { + unsafe { vget_high_p16(a) } + } + #[doc = "See [`arch::vget_high_p8`]."] + #[inline(always)] + pub fn vget_high_p8(self, a: poly8x16_t) -> poly8x8_t { + unsafe { vget_high_p8(a) } + } + #[doc = "See [`arch::vget_high_s16`]."] + #[inline(always)] + pub fn vget_high_s16(self, a: int16x8_t) -> int16x4_t { + unsafe { vget_high_s16(a) } + } + #[doc = "See [`arch::vget_high_s32`]."] + #[inline(always)] + pub fn vget_high_s32(self, a: int32x4_t) -> int32x2_t { + unsafe { vget_high_s32(a) } + } + #[doc = "See [`arch::vget_high_s8`]."] + #[inline(always)] + pub fn vget_high_s8(self, a: int8x16_t) -> int8x8_t { + unsafe { vget_high_s8(a) } + } + #[doc = "See [`arch::vget_high_u16`]."] + #[inline(always)] + pub fn vget_high_u16(self, a: uint16x8_t) -> uint16x4_t { + unsafe { vget_high_u16(a) } + } + #[doc = "See [`arch::vget_high_u32`]."] + #[inline(always)] + pub fn vget_high_u32(self, a: uint32x4_t) -> uint32x2_t { + unsafe { vget_high_u32(a) } + } + #[doc = "See [`arch::vget_high_u8`]."] + #[inline(always)] + pub fn vget_high_u8(self, a: uint8x16_t) -> uint8x8_t { + unsafe { vget_high_u8(a) } + } + #[doc = "See [`arch::vget_high_s64`]."] + #[inline(always)] + pub fn vget_high_s64(self, a: int64x2_t) -> int64x1_t { + unsafe { vget_high_s64(a) } + } + #[doc = "See [`arch::vget_high_u64`]."] + #[inline(always)] + pub fn vget_high_u64(self, a: uint64x2_t) -> uint64x1_t { + unsafe { vget_high_u64(a) } + } + #[doc = "See [`arch::vget_lane_f32`]."] + #[inline(always)] + pub fn vget_lane_f32(self, v: float32x2_t) -> f32 { + unsafe { vget_lane_f32::(v) } + } + #[doc = "See [`arch::vget_lane_p16`]."] + #[inline(always)] + pub fn vget_lane_p16(self, v: poly16x4_t) -> p16 { + unsafe { vget_lane_p16::(v) } + } + #[doc = "See [`arch::vget_lane_p8`]."] + #[inline(always)] + pub fn vget_lane_p8(self, v: poly8x8_t) -> p8 { + unsafe { vget_lane_p8::(v) } + } + #[doc = "See [`arch::vget_lane_s16`]."] + #[inline(always)] + pub fn vget_lane_s16(self, v: int16x4_t) -> i16 { + unsafe { vget_lane_s16::(v) } + } + #[doc = "See [`arch::vget_lane_s32`]."] + #[inline(always)] + pub fn vget_lane_s32(self, v: int32x2_t) -> i32 { + unsafe { vget_lane_s32::(v) } + } + #[doc = "See [`arch::vget_lane_s8`]."] + #[inline(always)] + pub fn vget_lane_s8(self, v: int8x8_t) -> i8 { + unsafe { vget_lane_s8::(v) } + } + #[doc = "See [`arch::vget_lane_u16`]."] + #[inline(always)] + pub fn vget_lane_u16(self, v: uint16x4_t) -> u16 { + unsafe { vget_lane_u16::(v) } + } + #[doc = "See [`arch::vget_lane_u32`]."] + #[inline(always)] + pub fn vget_lane_u32(self, v: uint32x2_t) -> u32 { + unsafe { vget_lane_u32::(v) } + } + #[doc = "See [`arch::vget_lane_u8`]."] + #[inline(always)] + pub fn vget_lane_u8(self, v: uint8x8_t) -> u8 { + unsafe { vget_lane_u8::(v) } + } + #[doc = "See [`arch::vgetq_lane_f32`]."] + #[inline(always)] + pub fn vgetq_lane_f32(self, v: float32x4_t) -> f32 { + unsafe { vgetq_lane_f32::(v) } + } + #[doc = "See [`arch::vgetq_lane_p16`]."] + #[inline(always)] + pub fn vgetq_lane_p16(self, v: poly16x8_t) -> p16 { + unsafe { vgetq_lane_p16::(v) } + } + #[doc = "See [`arch::vgetq_lane_p64`]."] + #[inline(always)] + pub fn vgetq_lane_p64(self, v: poly64x2_t) -> p64 { + unsafe { vgetq_lane_p64::(v) } + } + #[doc = "See [`arch::vgetq_lane_p8`]."] + #[inline(always)] + pub fn vgetq_lane_p8(self, v: poly8x16_t) -> p8 { + unsafe { vgetq_lane_p8::(v) } + } + #[doc = "See [`arch::vgetq_lane_s16`]."] + #[inline(always)] + pub fn vgetq_lane_s16(self, v: int16x8_t) -> i16 { + unsafe { vgetq_lane_s16::(v) } + } + #[doc = "See [`arch::vgetq_lane_s32`]."] + #[inline(always)] + pub fn vgetq_lane_s32(self, v: int32x4_t) -> i32 { + unsafe { vgetq_lane_s32::(v) } + } + #[doc = "See [`arch::vgetq_lane_s64`]."] + #[inline(always)] + pub fn vgetq_lane_s64(self, v: int64x2_t) -> i64 { + unsafe { vgetq_lane_s64::(v) } + } + #[doc = "See [`arch::vgetq_lane_s8`]."] + #[inline(always)] + pub fn vgetq_lane_s8(self, v: int8x16_t) -> i8 { + unsafe { vgetq_lane_s8::(v) } + } + #[doc = "See [`arch::vgetq_lane_u16`]."] + #[inline(always)] + pub fn vgetq_lane_u16(self, v: uint16x8_t) -> u16 { + unsafe { vgetq_lane_u16::(v) } + } + #[doc = "See [`arch::vgetq_lane_u32`]."] + #[inline(always)] + pub fn vgetq_lane_u32(self, v: uint32x4_t) -> u32 { + unsafe { vgetq_lane_u32::(v) } + } + #[doc = "See [`arch::vgetq_lane_u64`]."] + #[inline(always)] + pub fn vgetq_lane_u64(self, v: uint64x2_t) -> u64 { + unsafe { vgetq_lane_u64::(v) } + } + #[doc = "See [`arch::vgetq_lane_u8`]."] + #[inline(always)] + pub fn vgetq_lane_u8(self, v: uint8x16_t) -> u8 { + unsafe { vgetq_lane_u8::(v) } + } + #[doc = "See [`arch::vget_lane_p64`]."] + #[inline(always)] + pub fn vget_lane_p64(self, v: poly64x1_t) -> p64 { + unsafe { vget_lane_p64::(v) } + } + #[doc = "See [`arch::vget_lane_s64`]."] + #[inline(always)] + pub fn vget_lane_s64(self, v: int64x1_t) -> i64 { + unsafe { vget_lane_s64::(v) } + } + #[doc = "See [`arch::vget_lane_u64`]."] + #[inline(always)] + pub fn vget_lane_u64(self, v: uint64x1_t) -> u64 { + unsafe { vget_lane_u64::(v) } + } + #[doc = "See [`arch::vget_low_f32`]."] + #[inline(always)] + pub fn vget_low_f32(self, a: float32x4_t) -> float32x2_t { + unsafe { vget_low_f32(a) } + } + #[doc = "See [`arch::vget_low_p16`]."] + #[inline(always)] + pub fn vget_low_p16(self, a: poly16x8_t) -> poly16x4_t { + unsafe { vget_low_p16(a) } + } + #[doc = "See [`arch::vget_low_p8`]."] + #[inline(always)] + pub fn vget_low_p8(self, a: poly8x16_t) -> poly8x8_t { + unsafe { vget_low_p8(a) } + } + #[doc = "See [`arch::vget_low_s16`]."] + #[inline(always)] + pub fn vget_low_s16(self, a: int16x8_t) -> int16x4_t { + unsafe { vget_low_s16(a) } + } + #[doc = "See [`arch::vget_low_s32`]."] + #[inline(always)] + pub fn vget_low_s32(self, a: int32x4_t) -> int32x2_t { + unsafe { vget_low_s32(a) } + } + #[doc = "See [`arch::vget_low_s8`]."] + #[inline(always)] + pub fn vget_low_s8(self, a: int8x16_t) -> int8x8_t { + unsafe { vget_low_s8(a) } + } + #[doc = "See [`arch::vget_low_u16`]."] + #[inline(always)] + pub fn vget_low_u16(self, a: uint16x8_t) -> uint16x4_t { + unsafe { vget_low_u16(a) } + } + #[doc = "See [`arch::vget_low_u32`]."] + #[inline(always)] + pub fn vget_low_u32(self, a: uint32x4_t) -> uint32x2_t { + unsafe { vget_low_u32(a) } + } + #[doc = "See [`arch::vget_low_u8`]."] + #[inline(always)] + pub fn vget_low_u8(self, a: uint8x16_t) -> uint8x8_t { + unsafe { vget_low_u8(a) } + } + #[doc = "See [`arch::vget_low_s64`]."] + #[inline(always)] + pub fn vget_low_s64(self, a: int64x2_t) -> int64x1_t { + unsafe { vget_low_s64(a) } + } + #[doc = "See [`arch::vget_low_u64`]."] + #[inline(always)] + pub fn vget_low_u64(self, a: uint64x2_t) -> uint64x1_t { + unsafe { vget_low_u64(a) } + } + #[doc = "See [`arch::vhadd_s8`]."] + #[inline(always)] + pub fn vhadd_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vhadd_s8(a, b) } + } + #[doc = "See [`arch::vhaddq_s8`]."] + #[inline(always)] + pub fn vhaddq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vhaddq_s8(a, b) } + } + #[doc = "See [`arch::vhadd_s16`]."] + #[inline(always)] + pub fn vhadd_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vhadd_s16(a, b) } + } + #[doc = "See [`arch::vhaddq_s16`]."] + #[inline(always)] + pub fn vhaddq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vhaddq_s16(a, b) } + } + #[doc = "See [`arch::vhadd_s32`]."] + #[inline(always)] + pub fn vhadd_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vhadd_s32(a, b) } + } + #[doc = "See [`arch::vhaddq_s32`]."] + #[inline(always)] + pub fn vhaddq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vhaddq_s32(a, b) } + } + #[doc = "See [`arch::vhadd_u8`]."] + #[inline(always)] + pub fn vhadd_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vhadd_u8(a, b) } + } + #[doc = "See [`arch::vhaddq_u8`]."] + #[inline(always)] + pub fn vhaddq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vhaddq_u8(a, b) } + } + #[doc = "See [`arch::vhadd_u16`]."] + #[inline(always)] + pub fn vhadd_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vhadd_u16(a, b) } + } + #[doc = "See [`arch::vhaddq_u16`]."] + #[inline(always)] + pub fn vhaddq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vhaddq_u16(a, b) } + } + #[doc = "See [`arch::vhadd_u32`]."] + #[inline(always)] + pub fn vhadd_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vhadd_u32(a, b) } + } + #[doc = "See [`arch::vhaddq_u32`]."] + #[inline(always)] + pub fn vhaddq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vhaddq_u32(a, b) } + } + #[doc = "See [`arch::vhsub_s16`]."] + #[inline(always)] + pub fn vhsub_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vhsub_s16(a, b) } + } + #[doc = "See [`arch::vhsubq_s16`]."] + #[inline(always)] + pub fn vhsubq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vhsubq_s16(a, b) } + } + #[doc = "See [`arch::vhsub_s32`]."] + #[inline(always)] + pub fn vhsub_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vhsub_s32(a, b) } + } + #[doc = "See [`arch::vhsubq_s32`]."] + #[inline(always)] + pub fn vhsubq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vhsubq_s32(a, b) } + } + #[doc = "See [`arch::vhsub_s8`]."] + #[inline(always)] + pub fn vhsub_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vhsub_s8(a, b) } + } + #[doc = "See [`arch::vhsubq_s8`]."] + #[inline(always)] + pub fn vhsubq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vhsubq_s8(a, b) } + } + #[doc = "See [`arch::vhsub_u8`]."] + #[inline(always)] + pub fn vhsub_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vhsub_u8(a, b) } + } + #[doc = "See [`arch::vhsubq_u8`]."] + #[inline(always)] + pub fn vhsubq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vhsubq_u8(a, b) } + } + #[doc = "See [`arch::vhsub_u16`]."] + #[inline(always)] + pub fn vhsub_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vhsub_u16(a, b) } + } + #[doc = "See [`arch::vhsubq_u16`]."] + #[inline(always)] + pub fn vhsubq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vhsubq_u16(a, b) } + } + #[doc = "See [`arch::vhsub_u32`]."] + #[inline(always)] + pub fn vhsub_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vhsub_u32(a, b) } + } + #[doc = "See [`arch::vhsubq_u32`]."] + #[inline(always)] + pub fn vhsubq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vhsubq_u32(a, b) } + } + #[doc = "See [`arch::vld1_dup_f32`]."] + #[inline(always)] + pub unsafe fn vld1_dup_f32(self, ptr: *const f32) -> float32x2_t { + unsafe { vld1_dup_f32(ptr) } + } + #[doc = "See [`arch::vld1_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld1_dup_p16(self, ptr: *const p16) -> poly16x4_t { + unsafe { vld1_dup_p16(ptr) } + } + #[doc = "See [`arch::vld1_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld1_dup_p8(self, ptr: *const p8) -> poly8x8_t { + unsafe { vld1_dup_p8(ptr) } + } + #[doc = "See [`arch::vld1_dup_s16`]."] + #[inline(always)] + pub unsafe fn vld1_dup_s16(self, ptr: *const i16) -> int16x4_t { + unsafe { vld1_dup_s16(ptr) } + } + #[doc = "See [`arch::vld1_dup_s32`]."] + #[inline(always)] + pub unsafe fn vld1_dup_s32(self, ptr: *const i32) -> int32x2_t { + unsafe { vld1_dup_s32(ptr) } + } + #[doc = "See [`arch::vld1_dup_s8`]."] + #[inline(always)] + pub unsafe fn vld1_dup_s8(self, ptr: *const i8) -> int8x8_t { + unsafe { vld1_dup_s8(ptr) } + } + #[doc = "See [`arch::vld1_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld1_dup_u16(self, ptr: *const u16) -> uint16x4_t { + unsafe { vld1_dup_u16(ptr) } + } + #[doc = "See [`arch::vld1_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld1_dup_u32(self, ptr: *const u32) -> uint32x2_t { + unsafe { vld1_dup_u32(ptr) } + } + #[doc = "See [`arch::vld1_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld1_dup_u8(self, ptr: *const u8) -> uint8x8_t { + unsafe { vld1_dup_u8(ptr) } + } + #[doc = "See [`arch::vld1q_dup_f32`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_f32(self, ptr: *const f32) -> float32x4_t { + unsafe { vld1q_dup_f32(ptr) } + } + #[doc = "See [`arch::vld1q_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_p16(self, ptr: *const p16) -> poly16x8_t { + unsafe { vld1q_dup_p16(ptr) } + } + #[doc = "See [`arch::vld1q_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_p8(self, ptr: *const p8) -> poly8x16_t { + unsafe { vld1q_dup_p8(ptr) } + } + #[doc = "See [`arch::vld1q_dup_s16`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_s16(self, ptr: *const i16) -> int16x8_t { + unsafe { vld1q_dup_s16(ptr) } + } + #[doc = "See [`arch::vld1q_dup_s32`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_s32(self, ptr: *const i32) -> int32x4_t { + unsafe { vld1q_dup_s32(ptr) } + } + #[doc = "See [`arch::vld1q_dup_s64`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_s64(self, ptr: *const i64) -> int64x2_t { + unsafe { vld1q_dup_s64(ptr) } + } + #[doc = "See [`arch::vld1q_dup_s8`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_s8(self, ptr: *const i8) -> int8x16_t { + unsafe { vld1q_dup_s8(ptr) } + } + #[doc = "See [`arch::vld1q_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_u16(self, ptr: *const u16) -> uint16x8_t { + unsafe { vld1q_dup_u16(ptr) } + } + #[doc = "See [`arch::vld1q_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_u32(self, ptr: *const u32) -> uint32x4_t { + unsafe { vld1q_dup_u32(ptr) } + } + #[doc = "See [`arch::vld1q_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_u64(self, ptr: *const u64) -> uint64x2_t { + unsafe { vld1q_dup_u64(ptr) } + } + #[doc = "See [`arch::vld1q_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_u8(self, ptr: *const u8) -> uint8x16_t { + unsafe { vld1q_dup_u8(ptr) } + } + #[doc = "See [`arch::vld1_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld1_dup_p64(self, ptr: *const p64) -> poly64x1_t { + unsafe { vld1_dup_p64(ptr) } + } + #[doc = "See [`arch::vld1_dup_s64`]."] + #[inline(always)] + pub unsafe fn vld1_dup_s64(self, ptr: *const i64) -> int64x1_t { + unsafe { vld1_dup_s64(ptr) } + } + #[doc = "See [`arch::vld1_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld1_dup_u64(self, ptr: *const u64) -> uint64x1_t { + unsafe { vld1_dup_u64(ptr) } + } + #[doc = "See [`arch::vld1_f32_x2`]."] + #[inline(always)] + pub unsafe fn vld1_f32_x2(self, a: *const f32) -> float32x2x2_t { + unsafe { vld1_f32_x2(a) } + } + #[doc = "See [`arch::vld1_f32_x3`]."] + #[inline(always)] + pub unsafe fn vld1_f32_x3(self, a: *const f32) -> float32x2x3_t { + unsafe { vld1_f32_x3(a) } + } + #[doc = "See [`arch::vld1_f32_x4`]."] + #[inline(always)] + pub unsafe fn vld1_f32_x4(self, a: *const f32) -> float32x2x4_t { + unsafe { vld1_f32_x4(a) } + } + #[doc = "See [`arch::vld1q_f32_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_f32_x2(self, a: *const f32) -> float32x4x2_t { + unsafe { vld1q_f32_x2(a) } + } + #[doc = "See [`arch::vld1q_f32_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_f32_x3(self, a: *const f32) -> float32x4x3_t { + unsafe { vld1q_f32_x3(a) } + } + #[doc = "See [`arch::vld1q_f32_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_f32_x4(self, a: *const f32) -> float32x4x4_t { + unsafe { vld1q_f32_x4(a) } + } + #[doc = "See [`arch::vld1_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld1_lane_f32( + self, + ptr: *const f32, + src: float32x2_t, + ) -> float32x2_t { + unsafe { vld1_lane_f32::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld1_lane_p16( + self, + ptr: *const p16, + src: poly16x4_t, + ) -> poly16x4_t { + unsafe { vld1_lane_p16::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld1_lane_p8(self, ptr: *const p8, src: poly8x8_t) -> poly8x8_t { + unsafe { vld1_lane_p8::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld1_lane_s16( + self, + ptr: *const i16, + src: int16x4_t, + ) -> int16x4_t { + unsafe { vld1_lane_s16::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld1_lane_s32( + self, + ptr: *const i32, + src: int32x2_t, + ) -> int32x2_t { + unsafe { vld1_lane_s32::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld1_lane_s64( + self, + ptr: *const i64, + src: int64x1_t, + ) -> int64x1_t { + unsafe { vld1_lane_s64::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld1_lane_s8(self, ptr: *const i8, src: int8x8_t) -> int8x8_t { + unsafe { vld1_lane_s8::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld1_lane_u16( + self, + ptr: *const u16, + src: uint16x4_t, + ) -> uint16x4_t { + unsafe { vld1_lane_u16::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld1_lane_u32( + self, + ptr: *const u32, + src: uint32x2_t, + ) -> uint32x2_t { + unsafe { vld1_lane_u32::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld1_lane_u64( + self, + ptr: *const u64, + src: uint64x1_t, + ) -> uint64x1_t { + unsafe { vld1_lane_u64::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld1_lane_u8(self, ptr: *const u8, src: uint8x8_t) -> uint8x8_t { + unsafe { vld1_lane_u8::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_f32( + self, + ptr: *const f32, + src: float32x4_t, + ) -> float32x4_t { + unsafe { vld1q_lane_f32::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_p16( + self, + ptr: *const p16, + src: poly16x8_t, + ) -> poly16x8_t { + unsafe { vld1q_lane_p16::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_p8( + self, + ptr: *const p8, + src: poly8x16_t, + ) -> poly8x16_t { + unsafe { vld1q_lane_p8::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_s16( + self, + ptr: *const i16, + src: int16x8_t, + ) -> int16x8_t { + unsafe { vld1q_lane_s16::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_s32( + self, + ptr: *const i32, + src: int32x4_t, + ) -> int32x4_t { + unsafe { vld1q_lane_s32::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_s64( + self, + ptr: *const i64, + src: int64x2_t, + ) -> int64x2_t { + unsafe { vld1q_lane_s64::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_s8( + self, + ptr: *const i8, + src: int8x16_t, + ) -> int8x16_t { + unsafe { vld1q_lane_s8::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_u16( + self, + ptr: *const u16, + src: uint16x8_t, + ) -> uint16x8_t { + unsafe { vld1q_lane_u16::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_u32( + self, + ptr: *const u32, + src: uint32x4_t, + ) -> uint32x4_t { + unsafe { vld1q_lane_u32::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_u64( + self, + ptr: *const u64, + src: uint64x2_t, + ) -> uint64x2_t { + unsafe { vld1q_lane_u64::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_u8( + self, + ptr: *const u8, + src: uint8x16_t, + ) -> uint8x16_t { + unsafe { vld1q_lane_u8::(ptr, src) } + } + #[doc = "See [`arch::vld1_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld1_lane_p64( + self, + ptr: *const p64, + src: poly64x1_t, + ) -> poly64x1_t { + unsafe { vld1_lane_p64::(ptr, src) } + } + #[doc = "See [`arch::vld1q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vld1q_lane_p64( + self, + ptr: *const p64, + src: poly64x2_t, + ) -> poly64x2_t { + unsafe { vld1q_lane_p64::(ptr, src) } + } + #[doc = "See [`arch::vld1_p64_x2`]."] + #[inline(always)] + pub unsafe fn vld1_p64_x2(self, a: *const p64) -> poly64x1x2_t { + unsafe { vld1_p64_x2(a) } + } + #[doc = "See [`arch::vld1_p64_x3`]."] + #[inline(always)] + pub unsafe fn vld1_p64_x3(self, a: *const p64) -> poly64x1x3_t { + unsafe { vld1_p64_x3(a) } + } + #[doc = "See [`arch::vld1_p64_x4`]."] + #[inline(always)] + pub unsafe fn vld1_p64_x4(self, a: *const p64) -> poly64x1x4_t { + unsafe { vld1_p64_x4(a) } + } + #[doc = "See [`arch::vld1q_p64_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_p64_x2(self, a: *const p64) -> poly64x2x2_t { + unsafe { vld1q_p64_x2(a) } + } + #[doc = "See [`arch::vld1q_p64_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_p64_x3(self, a: *const p64) -> poly64x2x3_t { + unsafe { vld1q_p64_x3(a) } + } + #[doc = "See [`arch::vld1q_p64_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_p64_x4(self, a: *const p64) -> poly64x2x4_t { + unsafe { vld1q_p64_x4(a) } + } + #[doc = "See [`arch::vld1_s8_x2`]."] + #[inline(always)] + pub unsafe fn vld1_s8_x2(self, a: *const i8) -> int8x8x2_t { + unsafe { vld1_s8_x2(a) } + } + #[doc = "See [`arch::vld1_s8_x3`]."] + #[inline(always)] + pub unsafe fn vld1_s8_x3(self, a: *const i8) -> int8x8x3_t { + unsafe { vld1_s8_x3(a) } + } + #[doc = "See [`arch::vld1_s8_x4`]."] + #[inline(always)] + pub unsafe fn vld1_s8_x4(self, a: *const i8) -> int8x8x4_t { + unsafe { vld1_s8_x4(a) } + } + #[doc = "See [`arch::vld1q_s8_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_s8_x2(self, a: *const i8) -> int8x16x2_t { + unsafe { vld1q_s8_x2(a) } + } + #[doc = "See [`arch::vld1q_s8_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_s8_x3(self, a: *const i8) -> int8x16x3_t { + unsafe { vld1q_s8_x3(a) } + } + #[doc = "See [`arch::vld1q_s8_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_s8_x4(self, a: *const i8) -> int8x16x4_t { + unsafe { vld1q_s8_x4(a) } + } + #[doc = "See [`arch::vld1_s16_x2`]."] + #[inline(always)] + pub unsafe fn vld1_s16_x2(self, a: *const i16) -> int16x4x2_t { + unsafe { vld1_s16_x2(a) } + } + #[doc = "See [`arch::vld1_s16_x3`]."] + #[inline(always)] + pub unsafe fn vld1_s16_x3(self, a: *const i16) -> int16x4x3_t { + unsafe { vld1_s16_x3(a) } + } + #[doc = "See [`arch::vld1_s16_x4`]."] + #[inline(always)] + pub unsafe fn vld1_s16_x4(self, a: *const i16) -> int16x4x4_t { + unsafe { vld1_s16_x4(a) } + } + #[doc = "See [`arch::vld1q_s16_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_s16_x2(self, a: *const i16) -> int16x8x2_t { + unsafe { vld1q_s16_x2(a) } + } + #[doc = "See [`arch::vld1q_s16_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_s16_x3(self, a: *const i16) -> int16x8x3_t { + unsafe { vld1q_s16_x3(a) } + } + #[doc = "See [`arch::vld1q_s16_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_s16_x4(self, a: *const i16) -> int16x8x4_t { + unsafe { vld1q_s16_x4(a) } + } + #[doc = "See [`arch::vld1_s32_x2`]."] + #[inline(always)] + pub unsafe fn vld1_s32_x2(self, a: *const i32) -> int32x2x2_t { + unsafe { vld1_s32_x2(a) } + } + #[doc = "See [`arch::vld1_s32_x3`]."] + #[inline(always)] + pub unsafe fn vld1_s32_x3(self, a: *const i32) -> int32x2x3_t { + unsafe { vld1_s32_x3(a) } + } + #[doc = "See [`arch::vld1_s32_x4`]."] + #[inline(always)] + pub unsafe fn vld1_s32_x4(self, a: *const i32) -> int32x2x4_t { + unsafe { vld1_s32_x4(a) } + } + #[doc = "See [`arch::vld1q_s32_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_s32_x2(self, a: *const i32) -> int32x4x2_t { + unsafe { vld1q_s32_x2(a) } + } + #[doc = "See [`arch::vld1q_s32_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_s32_x3(self, a: *const i32) -> int32x4x3_t { + unsafe { vld1q_s32_x3(a) } + } + #[doc = "See [`arch::vld1q_s32_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_s32_x4(self, a: *const i32) -> int32x4x4_t { + unsafe { vld1q_s32_x4(a) } + } + #[doc = "See [`arch::vld1_s64_x2`]."] + #[inline(always)] + pub unsafe fn vld1_s64_x2(self, a: *const i64) -> int64x1x2_t { + unsafe { vld1_s64_x2(a) } + } + #[doc = "See [`arch::vld1_s64_x3`]."] + #[inline(always)] + pub unsafe fn vld1_s64_x3(self, a: *const i64) -> int64x1x3_t { + unsafe { vld1_s64_x3(a) } + } + #[doc = "See [`arch::vld1_s64_x4`]."] + #[inline(always)] + pub unsafe fn vld1_s64_x4(self, a: *const i64) -> int64x1x4_t { + unsafe { vld1_s64_x4(a) } + } + #[doc = "See [`arch::vld1q_s64_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_s64_x2(self, a: *const i64) -> int64x2x2_t { + unsafe { vld1q_s64_x2(a) } + } + #[doc = "See [`arch::vld1q_s64_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_s64_x3(self, a: *const i64) -> int64x2x3_t { + unsafe { vld1q_s64_x3(a) } + } + #[doc = "See [`arch::vld1q_s64_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_s64_x4(self, a: *const i64) -> int64x2x4_t { + unsafe { vld1q_s64_x4(a) } + } + #[doc = "See [`arch::vld1_u8_x2`]."] + #[inline(always)] + pub unsafe fn vld1_u8_x2(self, a: *const u8) -> uint8x8x2_t { + unsafe { vld1_u8_x2(a) } + } + #[doc = "See [`arch::vld1_u8_x3`]."] + #[inline(always)] + pub unsafe fn vld1_u8_x3(self, a: *const u8) -> uint8x8x3_t { + unsafe { vld1_u8_x3(a) } + } + #[doc = "See [`arch::vld1_u8_x4`]."] + #[inline(always)] + pub unsafe fn vld1_u8_x4(self, a: *const u8) -> uint8x8x4_t { + unsafe { vld1_u8_x4(a) } + } + #[doc = "See [`arch::vld1q_u8_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_u8_x2(self, a: *const u8) -> uint8x16x2_t { + unsafe { vld1q_u8_x2(a) } + } + #[doc = "See [`arch::vld1q_u8_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_u8_x3(self, a: *const u8) -> uint8x16x3_t { + unsafe { vld1q_u8_x3(a) } + } + #[doc = "See [`arch::vld1q_u8_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_u8_x4(self, a: *const u8) -> uint8x16x4_t { + unsafe { vld1q_u8_x4(a) } + } + #[doc = "See [`arch::vld1_u16_x2`]."] + #[inline(always)] + pub unsafe fn vld1_u16_x2(self, a: *const u16) -> uint16x4x2_t { + unsafe { vld1_u16_x2(a) } + } + #[doc = "See [`arch::vld1_u16_x3`]."] + #[inline(always)] + pub unsafe fn vld1_u16_x3(self, a: *const u16) -> uint16x4x3_t { + unsafe { vld1_u16_x3(a) } + } + #[doc = "See [`arch::vld1_u16_x4`]."] + #[inline(always)] + pub unsafe fn vld1_u16_x4(self, a: *const u16) -> uint16x4x4_t { + unsafe { vld1_u16_x4(a) } + } + #[doc = "See [`arch::vld1q_u16_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_u16_x2(self, a: *const u16) -> uint16x8x2_t { + unsafe { vld1q_u16_x2(a) } + } + #[doc = "See [`arch::vld1q_u16_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_u16_x3(self, a: *const u16) -> uint16x8x3_t { + unsafe { vld1q_u16_x3(a) } + } + #[doc = "See [`arch::vld1q_u16_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_u16_x4(self, a: *const u16) -> uint16x8x4_t { + unsafe { vld1q_u16_x4(a) } + } + #[doc = "See [`arch::vld1_u32_x2`]."] + #[inline(always)] + pub unsafe fn vld1_u32_x2(self, a: *const u32) -> uint32x2x2_t { + unsafe { vld1_u32_x2(a) } + } + #[doc = "See [`arch::vld1_u32_x3`]."] + #[inline(always)] + pub unsafe fn vld1_u32_x3(self, a: *const u32) -> uint32x2x3_t { + unsafe { vld1_u32_x3(a) } + } + #[doc = "See [`arch::vld1_u32_x4`]."] + #[inline(always)] + pub unsafe fn vld1_u32_x4(self, a: *const u32) -> uint32x2x4_t { + unsafe { vld1_u32_x4(a) } + } + #[doc = "See [`arch::vld1q_u32_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_u32_x2(self, a: *const u32) -> uint32x4x2_t { + unsafe { vld1q_u32_x2(a) } + } + #[doc = "See [`arch::vld1q_u32_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_u32_x3(self, a: *const u32) -> uint32x4x3_t { + unsafe { vld1q_u32_x3(a) } + } + #[doc = "See [`arch::vld1q_u32_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_u32_x4(self, a: *const u32) -> uint32x4x4_t { + unsafe { vld1q_u32_x4(a) } + } + #[doc = "See [`arch::vld1_u64_x2`]."] + #[inline(always)] + pub unsafe fn vld1_u64_x2(self, a: *const u64) -> uint64x1x2_t { + unsafe { vld1_u64_x2(a) } + } + #[doc = "See [`arch::vld1_u64_x3`]."] + #[inline(always)] + pub unsafe fn vld1_u64_x3(self, a: *const u64) -> uint64x1x3_t { + unsafe { vld1_u64_x3(a) } + } + #[doc = "See [`arch::vld1_u64_x4`]."] + #[inline(always)] + pub unsafe fn vld1_u64_x4(self, a: *const u64) -> uint64x1x4_t { + unsafe { vld1_u64_x4(a) } + } + #[doc = "See [`arch::vld1q_u64_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_u64_x2(self, a: *const u64) -> uint64x2x2_t { + unsafe { vld1q_u64_x2(a) } + } + #[doc = "See [`arch::vld1q_u64_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_u64_x3(self, a: *const u64) -> uint64x2x3_t { + unsafe { vld1q_u64_x3(a) } + } + #[doc = "See [`arch::vld1q_u64_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_u64_x4(self, a: *const u64) -> uint64x2x4_t { + unsafe { vld1q_u64_x4(a) } + } + #[doc = "See [`arch::vld1_p8_x2`]."] + #[inline(always)] + pub unsafe fn vld1_p8_x2(self, a: *const p8) -> poly8x8x2_t { + unsafe { vld1_p8_x2(a) } + } + #[doc = "See [`arch::vld1_p8_x3`]."] + #[inline(always)] + pub unsafe fn vld1_p8_x3(self, a: *const p8) -> poly8x8x3_t { + unsafe { vld1_p8_x3(a) } + } + #[doc = "See [`arch::vld1_p8_x4`]."] + #[inline(always)] + pub unsafe fn vld1_p8_x4(self, a: *const p8) -> poly8x8x4_t { + unsafe { vld1_p8_x4(a) } + } + #[doc = "See [`arch::vld1q_p8_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_p8_x2(self, a: *const p8) -> poly8x16x2_t { + unsafe { vld1q_p8_x2(a) } + } + #[doc = "See [`arch::vld1q_p8_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_p8_x3(self, a: *const p8) -> poly8x16x3_t { + unsafe { vld1q_p8_x3(a) } + } + #[doc = "See [`arch::vld1q_p8_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_p8_x4(self, a: *const p8) -> poly8x16x4_t { + unsafe { vld1q_p8_x4(a) } + } + #[doc = "See [`arch::vld1_p16_x2`]."] + #[inline(always)] + pub unsafe fn vld1_p16_x2(self, a: *const p16) -> poly16x4x2_t { + unsafe { vld1_p16_x2(a) } + } + #[doc = "See [`arch::vld1_p16_x3`]."] + #[inline(always)] + pub unsafe fn vld1_p16_x3(self, a: *const p16) -> poly16x4x3_t { + unsafe { vld1_p16_x3(a) } + } + #[doc = "See [`arch::vld1_p16_x4`]."] + #[inline(always)] + pub unsafe fn vld1_p16_x4(self, a: *const p16) -> poly16x4x4_t { + unsafe { vld1_p16_x4(a) } + } + #[doc = "See [`arch::vld1q_p16_x2`]."] + #[inline(always)] + pub unsafe fn vld1q_p16_x2(self, a: *const p16) -> poly16x8x2_t { + unsafe { vld1q_p16_x2(a) } + } + #[doc = "See [`arch::vld1q_p16_x3`]."] + #[inline(always)] + pub unsafe fn vld1q_p16_x3(self, a: *const p16) -> poly16x8x3_t { + unsafe { vld1q_p16_x3(a) } + } + #[doc = "See [`arch::vld1q_p16_x4`]."] + #[inline(always)] + pub unsafe fn vld1q_p16_x4(self, a: *const p16) -> poly16x8x4_t { + unsafe { vld1q_p16_x4(a) } + } + #[doc = "See [`arch::vld1q_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld1q_dup_p64(self, ptr: *const p64) -> poly64x2_t { + unsafe { vld1q_dup_p64(ptr) } + } + #[doc = "See [`arch::vld2_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld2_dup_p64(self, a: *const p64) -> poly64x1x2_t { + unsafe { vld2_dup_p64(a) } + } + #[doc = "See [`arch::vld2_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld2_dup_u64(self, a: *const u64) -> uint64x1x2_t { + unsafe { vld2_dup_u64(a) } + } + #[doc = "See [`arch::vld2_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld2_dup_u8(self, a: *const u8) -> uint8x8x2_t { + unsafe { vld2_dup_u8(a) } + } + #[doc = "See [`arch::vld2q_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_u8(self, a: *const u8) -> uint8x16x2_t { + unsafe { vld2q_dup_u8(a) } + } + #[doc = "See [`arch::vld2_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld2_dup_u16(self, a: *const u16) -> uint16x4x2_t { + unsafe { vld2_dup_u16(a) } + } + #[doc = "See [`arch::vld2q_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_u16(self, a: *const u16) -> uint16x8x2_t { + unsafe { vld2q_dup_u16(a) } + } + #[doc = "See [`arch::vld2_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld2_dup_u32(self, a: *const u32) -> uint32x2x2_t { + unsafe { vld2_dup_u32(a) } + } + #[doc = "See [`arch::vld2q_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_u32(self, a: *const u32) -> uint32x4x2_t { + unsafe { vld2q_dup_u32(a) } + } + #[doc = "See [`arch::vld2_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld2_dup_p8(self, a: *const p8) -> poly8x8x2_t { + unsafe { vld2_dup_p8(a) } + } + #[doc = "See [`arch::vld2q_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_p8(self, a: *const p8) -> poly8x16x2_t { + unsafe { vld2q_dup_p8(a) } + } + #[doc = "See [`arch::vld2_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld2_dup_p16(self, a: *const p16) -> poly16x4x2_t { + unsafe { vld2_dup_p16(a) } + } + #[doc = "See [`arch::vld2q_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld2q_dup_p16(self, a: *const p16) -> poly16x8x2_t { + unsafe { vld2q_dup_p16(a) } + } + #[doc = "See [`arch::vld2_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld2_lane_f32( + self, + a: *const f32, + b: float32x2x2_t, + ) -> float32x2x2_t { + unsafe { vld2_lane_f32::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_f32( + self, + a: *const f32, + b: float32x4x2_t, + ) -> float32x4x2_t { + unsafe { vld2q_lane_f32::(a, b) } + } + #[doc = "See [`arch::vld2_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld2_lane_s8(self, a: *const i8, b: int8x8x2_t) -> int8x8x2_t { + unsafe { vld2_lane_s8::(a, b) } + } + #[doc = "See [`arch::vld2_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld2_lane_s16( + self, + a: *const i16, + b: int16x4x2_t, + ) -> int16x4x2_t { + unsafe { vld2_lane_s16::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_s16( + self, + a: *const i16, + b: int16x8x2_t, + ) -> int16x8x2_t { + unsafe { vld2q_lane_s16::(a, b) } + } + #[doc = "See [`arch::vld2_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld2_lane_s32( + self, + a: *const i32, + b: int32x2x2_t, + ) -> int32x2x2_t { + unsafe { vld2_lane_s32::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_s32( + self, + a: *const i32, + b: int32x4x2_t, + ) -> int32x4x2_t { + unsafe { vld2q_lane_s32::(a, b) } + } + #[doc = "See [`arch::vld2_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld2_lane_u8(self, a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { + unsafe { vld2_lane_u8::(a, b) } + } + #[doc = "See [`arch::vld2_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld2_lane_u16( + self, + a: *const u16, + b: uint16x4x2_t, + ) -> uint16x4x2_t { + unsafe { vld2_lane_u16::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_u16( + self, + a: *const u16, + b: uint16x8x2_t, + ) -> uint16x8x2_t { + unsafe { vld2q_lane_u16::(a, b) } + } + #[doc = "See [`arch::vld2_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld2_lane_u32( + self, + a: *const u32, + b: uint32x2x2_t, + ) -> uint32x2x2_t { + unsafe { vld2_lane_u32::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_u32( + self, + a: *const u32, + b: uint32x4x2_t, + ) -> uint32x4x2_t { + unsafe { vld2q_lane_u32::(a, b) } + } + #[doc = "See [`arch::vld2_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld2_lane_p8(self, a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { + unsafe { vld2_lane_p8::(a, b) } + } + #[doc = "See [`arch::vld2_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld2_lane_p16( + self, + a: *const p16, + b: poly16x4x2_t, + ) -> poly16x4x2_t { + unsafe { vld2_lane_p16::(a, b) } + } + #[doc = "See [`arch::vld2q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld2q_lane_p16( + self, + a: *const p16, + b: poly16x8x2_t, + ) -> poly16x8x2_t { + unsafe { vld2q_lane_p16::(a, b) } + } + #[doc = "See [`arch::vld2_p64`]."] + #[inline(always)] + pub unsafe fn vld2_p64(self, a: *const p64) -> poly64x1x2_t { + unsafe { vld2_p64(a) } + } + #[doc = "See [`arch::vld2_u64`]."] + #[inline(always)] + pub unsafe fn vld2_u64(self, a: *const u64) -> uint64x1x2_t { + unsafe { vld2_u64(a) } + } + #[doc = "See [`arch::vld2_u8`]."] + #[inline(always)] + pub unsafe fn vld2_u8(self, a: *const u8) -> uint8x8x2_t { + unsafe { vld2_u8(a) } + } + #[doc = "See [`arch::vld2q_u8`]."] + #[inline(always)] + pub unsafe fn vld2q_u8(self, a: *const u8) -> uint8x16x2_t { + unsafe { vld2q_u8(a) } + } + #[doc = "See [`arch::vld2_u16`]."] + #[inline(always)] + pub unsafe fn vld2_u16(self, a: *const u16) -> uint16x4x2_t { + unsafe { vld2_u16(a) } + } + #[doc = "See [`arch::vld2q_u16`]."] + #[inline(always)] + pub unsafe fn vld2q_u16(self, a: *const u16) -> uint16x8x2_t { + unsafe { vld2q_u16(a) } + } + #[doc = "See [`arch::vld2_u32`]."] + #[inline(always)] + pub unsafe fn vld2_u32(self, a: *const u32) -> uint32x2x2_t { + unsafe { vld2_u32(a) } + } + #[doc = "See [`arch::vld2q_u32`]."] + #[inline(always)] + pub unsafe fn vld2q_u32(self, a: *const u32) -> uint32x4x2_t { + unsafe { vld2q_u32(a) } + } + #[doc = "See [`arch::vld2_p8`]."] + #[inline(always)] + pub unsafe fn vld2_p8(self, a: *const p8) -> poly8x8x2_t { + unsafe { vld2_p8(a) } + } + #[doc = "See [`arch::vld2q_p8`]."] + #[inline(always)] + pub unsafe fn vld2q_p8(self, a: *const p8) -> poly8x16x2_t { + unsafe { vld2q_p8(a) } + } + #[doc = "See [`arch::vld2_p16`]."] + #[inline(always)] + pub unsafe fn vld2_p16(self, a: *const p16) -> poly16x4x2_t { + unsafe { vld2_p16(a) } + } + #[doc = "See [`arch::vld2q_p16`]."] + #[inline(always)] + pub unsafe fn vld2q_p16(self, a: *const p16) -> poly16x8x2_t { + unsafe { vld2q_p16(a) } + } + #[doc = "See [`arch::vld3_dup_f32`]."] + #[inline(always)] + pub unsafe fn vld3_dup_f32(self, a: *const f32) -> float32x2x3_t { + unsafe { vld3_dup_f32(a) } + } + #[doc = "See [`arch::vld3q_dup_f32`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_f32(self, a: *const f32) -> float32x4x3_t { + unsafe { vld3q_dup_f32(a) } + } + #[doc = "See [`arch::vld3_dup_s8`]."] + #[inline(always)] + pub unsafe fn vld3_dup_s8(self, a: *const i8) -> int8x8x3_t { + unsafe { vld3_dup_s8(a) } + } + #[doc = "See [`arch::vld3q_dup_s8`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_s8(self, a: *const i8) -> int8x16x3_t { + unsafe { vld3q_dup_s8(a) } + } + #[doc = "See [`arch::vld3_dup_s16`]."] + #[inline(always)] + pub unsafe fn vld3_dup_s16(self, a: *const i16) -> int16x4x3_t { + unsafe { vld3_dup_s16(a) } + } + #[doc = "See [`arch::vld3q_dup_s16`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_s16(self, a: *const i16) -> int16x8x3_t { + unsafe { vld3q_dup_s16(a) } + } + #[doc = "See [`arch::vld3_dup_s32`]."] + #[inline(always)] + pub unsafe fn vld3_dup_s32(self, a: *const i32) -> int32x2x3_t { + unsafe { vld3_dup_s32(a) } + } + #[doc = "See [`arch::vld3q_dup_s32`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_s32(self, a: *const i32) -> int32x4x3_t { + unsafe { vld3q_dup_s32(a) } + } + #[doc = "See [`arch::vld3_dup_s64`]."] + #[inline(always)] + pub unsafe fn vld3_dup_s64(self, a: *const i64) -> int64x1x3_t { + unsafe { vld3_dup_s64(a) } + } + #[doc = "See [`arch::vld3_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld3_dup_p64(self, a: *const p64) -> poly64x1x3_t { + unsafe { vld3_dup_p64(a) } + } + #[doc = "See [`arch::vld3_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld3_dup_u64(self, a: *const u64) -> uint64x1x3_t { + unsafe { vld3_dup_u64(a) } + } + #[doc = "See [`arch::vld3_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld3_dup_u8(self, a: *const u8) -> uint8x8x3_t { + unsafe { vld3_dup_u8(a) } + } + #[doc = "See [`arch::vld3q_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_u8(self, a: *const u8) -> uint8x16x3_t { + unsafe { vld3q_dup_u8(a) } + } + #[doc = "See [`arch::vld3_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld3_dup_u16(self, a: *const u16) -> uint16x4x3_t { + unsafe { vld3_dup_u16(a) } + } + #[doc = "See [`arch::vld3q_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_u16(self, a: *const u16) -> uint16x8x3_t { + unsafe { vld3q_dup_u16(a) } + } + #[doc = "See [`arch::vld3_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld3_dup_u32(self, a: *const u32) -> uint32x2x3_t { + unsafe { vld3_dup_u32(a) } + } + #[doc = "See [`arch::vld3q_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_u32(self, a: *const u32) -> uint32x4x3_t { + unsafe { vld3q_dup_u32(a) } + } + #[doc = "See [`arch::vld3_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld3_dup_p8(self, a: *const p8) -> poly8x8x3_t { + unsafe { vld3_dup_p8(a) } + } + #[doc = "See [`arch::vld3q_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_p8(self, a: *const p8) -> poly8x16x3_t { + unsafe { vld3q_dup_p8(a) } + } + #[doc = "See [`arch::vld3_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld3_dup_p16(self, a: *const p16) -> poly16x4x3_t { + unsafe { vld3_dup_p16(a) } + } + #[doc = "See [`arch::vld3q_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld3q_dup_p16(self, a: *const p16) -> poly16x8x3_t { + unsafe { vld3q_dup_p16(a) } + } + #[doc = "See [`arch::vld3_f32`]."] + #[inline(always)] + pub unsafe fn vld3_f32(self, a: *const f32) -> float32x2x3_t { + unsafe { vld3_f32(a) } + } + #[doc = "See [`arch::vld3q_f32`]."] + #[inline(always)] + pub unsafe fn vld3q_f32(self, a: *const f32) -> float32x4x3_t { + unsafe { vld3q_f32(a) } + } + #[doc = "See [`arch::vld3_s8`]."] + #[inline(always)] + pub unsafe fn vld3_s8(self, a: *const i8) -> int8x8x3_t { + unsafe { vld3_s8(a) } + } + #[doc = "See [`arch::vld3q_s8`]."] + #[inline(always)] + pub unsafe fn vld3q_s8(self, a: *const i8) -> int8x16x3_t { + unsafe { vld3q_s8(a) } + } + #[doc = "See [`arch::vld3_s16`]."] + #[inline(always)] + pub unsafe fn vld3_s16(self, a: *const i16) -> int16x4x3_t { + unsafe { vld3_s16(a) } + } + #[doc = "See [`arch::vld3q_s16`]."] + #[inline(always)] + pub unsafe fn vld3q_s16(self, a: *const i16) -> int16x8x3_t { + unsafe { vld3q_s16(a) } + } + #[doc = "See [`arch::vld3_s32`]."] + #[inline(always)] + pub unsafe fn vld3_s32(self, a: *const i32) -> int32x2x3_t { + unsafe { vld3_s32(a) } + } + #[doc = "See [`arch::vld3q_s32`]."] + #[inline(always)] + pub unsafe fn vld3q_s32(self, a: *const i32) -> int32x4x3_t { + unsafe { vld3q_s32(a) } + } + #[doc = "See [`arch::vld3_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld3_lane_f32( + self, + a: *const f32, + b: float32x2x3_t, + ) -> float32x2x3_t { + unsafe { vld3_lane_f32::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_f32( + self, + a: *const f32, + b: float32x4x3_t, + ) -> float32x4x3_t { + unsafe { vld3q_lane_f32::(a, b) } + } + #[doc = "See [`arch::vld3_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld3_lane_s8(self, a: *const i8, b: int8x8x3_t) -> int8x8x3_t { + unsafe { vld3_lane_s8::(a, b) } + } + #[doc = "See [`arch::vld3_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld3_lane_s16( + self, + a: *const i16, + b: int16x4x3_t, + ) -> int16x4x3_t { + unsafe { vld3_lane_s16::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_s16( + self, + a: *const i16, + b: int16x8x3_t, + ) -> int16x8x3_t { + unsafe { vld3q_lane_s16::(a, b) } + } + #[doc = "See [`arch::vld3_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld3_lane_s32( + self, + a: *const i32, + b: int32x2x3_t, + ) -> int32x2x3_t { + unsafe { vld3_lane_s32::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_s32( + self, + a: *const i32, + b: int32x4x3_t, + ) -> int32x4x3_t { + unsafe { vld3q_lane_s32::(a, b) } + } + #[doc = "See [`arch::vld3_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld3_lane_u8(self, a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { + unsafe { vld3_lane_u8::(a, b) } + } + #[doc = "See [`arch::vld3_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld3_lane_u16( + self, + a: *const u16, + b: uint16x4x3_t, + ) -> uint16x4x3_t { + unsafe { vld3_lane_u16::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_u16( + self, + a: *const u16, + b: uint16x8x3_t, + ) -> uint16x8x3_t { + unsafe { vld3q_lane_u16::(a, b) } + } + #[doc = "See [`arch::vld3_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld3_lane_u32( + self, + a: *const u32, + b: uint32x2x3_t, + ) -> uint32x2x3_t { + unsafe { vld3_lane_u32::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_u32( + self, + a: *const u32, + b: uint32x4x3_t, + ) -> uint32x4x3_t { + unsafe { vld3q_lane_u32::(a, b) } + } + #[doc = "See [`arch::vld3_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld3_lane_p8(self, a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { + unsafe { vld3_lane_p8::(a, b) } + } + #[doc = "See [`arch::vld3_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld3_lane_p16( + self, + a: *const p16, + b: poly16x4x3_t, + ) -> poly16x4x3_t { + unsafe { vld3_lane_p16::(a, b) } + } + #[doc = "See [`arch::vld3q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld3q_lane_p16( + self, + a: *const p16, + b: poly16x8x3_t, + ) -> poly16x8x3_t { + unsafe { vld3q_lane_p16::(a, b) } + } + #[doc = "See [`arch::vld3_p64`]."] + #[inline(always)] + pub unsafe fn vld3_p64(self, a: *const p64) -> poly64x1x3_t { + unsafe { vld3_p64(a) } + } + #[doc = "See [`arch::vld3_s64`]."] + #[inline(always)] + pub unsafe fn vld3_s64(self, a: *const i64) -> int64x1x3_t { + unsafe { vld3_s64(a) } + } + #[doc = "See [`arch::vld3_u64`]."] + #[inline(always)] + pub unsafe fn vld3_u64(self, a: *const u64) -> uint64x1x3_t { + unsafe { vld3_u64(a) } + } + #[doc = "See [`arch::vld3_u8`]."] + #[inline(always)] + pub unsafe fn vld3_u8(self, a: *const u8) -> uint8x8x3_t { + unsafe { vld3_u8(a) } + } + #[doc = "See [`arch::vld3q_u8`]."] + #[inline(always)] + pub unsafe fn vld3q_u8(self, a: *const u8) -> uint8x16x3_t { + unsafe { vld3q_u8(a) } + } + #[doc = "See [`arch::vld3_u16`]."] + #[inline(always)] + pub unsafe fn vld3_u16(self, a: *const u16) -> uint16x4x3_t { + unsafe { vld3_u16(a) } + } + #[doc = "See [`arch::vld3q_u16`]."] + #[inline(always)] + pub unsafe fn vld3q_u16(self, a: *const u16) -> uint16x8x3_t { + unsafe { vld3q_u16(a) } + } + #[doc = "See [`arch::vld3_u32`]."] + #[inline(always)] + pub unsafe fn vld3_u32(self, a: *const u32) -> uint32x2x3_t { + unsafe { vld3_u32(a) } + } + #[doc = "See [`arch::vld3q_u32`]."] + #[inline(always)] + pub unsafe fn vld3q_u32(self, a: *const u32) -> uint32x4x3_t { + unsafe { vld3q_u32(a) } + } + #[doc = "See [`arch::vld3_p8`]."] + #[inline(always)] + pub unsafe fn vld3_p8(self, a: *const p8) -> poly8x8x3_t { + unsafe { vld3_p8(a) } + } + #[doc = "See [`arch::vld3q_p8`]."] + #[inline(always)] + pub unsafe fn vld3q_p8(self, a: *const p8) -> poly8x16x3_t { + unsafe { vld3q_p8(a) } + } + #[doc = "See [`arch::vld3_p16`]."] + #[inline(always)] + pub unsafe fn vld3_p16(self, a: *const p16) -> poly16x4x3_t { + unsafe { vld3_p16(a) } + } + #[doc = "See [`arch::vld3q_p16`]."] + #[inline(always)] + pub unsafe fn vld3q_p16(self, a: *const p16) -> poly16x8x3_t { + unsafe { vld3q_p16(a) } + } + #[doc = "See [`arch::vld4_dup_s64`]."] + #[inline(always)] + pub unsafe fn vld4_dup_s64(self, a: *const i64) -> int64x1x4_t { + unsafe { vld4_dup_s64(a) } + } + #[doc = "See [`arch::vld4_dup_p64`]."] + #[inline(always)] + pub unsafe fn vld4_dup_p64(self, a: *const p64) -> poly64x1x4_t { + unsafe { vld4_dup_p64(a) } + } + #[doc = "See [`arch::vld4_dup_u64`]."] + #[inline(always)] + pub unsafe fn vld4_dup_u64(self, a: *const u64) -> uint64x1x4_t { + unsafe { vld4_dup_u64(a) } + } + #[doc = "See [`arch::vld4_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld4_dup_u8(self, a: *const u8) -> uint8x8x4_t { + unsafe { vld4_dup_u8(a) } + } + #[doc = "See [`arch::vld4q_dup_u8`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_u8(self, a: *const u8) -> uint8x16x4_t { + unsafe { vld4q_dup_u8(a) } + } + #[doc = "See [`arch::vld4_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld4_dup_u16(self, a: *const u16) -> uint16x4x4_t { + unsafe { vld4_dup_u16(a) } + } + #[doc = "See [`arch::vld4q_dup_u16`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_u16(self, a: *const u16) -> uint16x8x4_t { + unsafe { vld4q_dup_u16(a) } + } + #[doc = "See [`arch::vld4_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld4_dup_u32(self, a: *const u32) -> uint32x2x4_t { + unsafe { vld4_dup_u32(a) } + } + #[doc = "See [`arch::vld4q_dup_u32`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_u32(self, a: *const u32) -> uint32x4x4_t { + unsafe { vld4q_dup_u32(a) } + } + #[doc = "See [`arch::vld4_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld4_dup_p8(self, a: *const p8) -> poly8x8x4_t { + unsafe { vld4_dup_p8(a) } + } + #[doc = "See [`arch::vld4q_dup_p8`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_p8(self, a: *const p8) -> poly8x16x4_t { + unsafe { vld4q_dup_p8(a) } + } + #[doc = "See [`arch::vld4_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld4_dup_p16(self, a: *const p16) -> poly16x4x4_t { + unsafe { vld4_dup_p16(a) } + } + #[doc = "See [`arch::vld4q_dup_p16`]."] + #[inline(always)] + pub unsafe fn vld4q_dup_p16(self, a: *const p16) -> poly16x8x4_t { + unsafe { vld4q_dup_p16(a) } + } + #[doc = "See [`arch::vld4_f32`]."] + #[inline(always)] + pub unsafe fn vld4_f32(self, a: *const f32) -> float32x2x4_t { + unsafe { vld4_f32(a) } + } + #[doc = "See [`arch::vld4q_f32`]."] + #[inline(always)] + pub unsafe fn vld4q_f32(self, a: *const f32) -> float32x4x4_t { + unsafe { vld4q_f32(a) } + } + #[doc = "See [`arch::vld4_s8`]."] + #[inline(always)] + pub unsafe fn vld4_s8(self, a: *const i8) -> int8x8x4_t { + unsafe { vld4_s8(a) } + } + #[doc = "See [`arch::vld4q_s8`]."] + #[inline(always)] + pub unsafe fn vld4q_s8(self, a: *const i8) -> int8x16x4_t { + unsafe { vld4q_s8(a) } + } + #[doc = "See [`arch::vld4_s16`]."] + #[inline(always)] + pub unsafe fn vld4_s16(self, a: *const i16) -> int16x4x4_t { + unsafe { vld4_s16(a) } + } + #[doc = "See [`arch::vld4q_s16`]."] + #[inline(always)] + pub unsafe fn vld4q_s16(self, a: *const i16) -> int16x8x4_t { + unsafe { vld4q_s16(a) } + } + #[doc = "See [`arch::vld4_s32`]."] + #[inline(always)] + pub unsafe fn vld4_s32(self, a: *const i32) -> int32x2x4_t { + unsafe { vld4_s32(a) } + } + #[doc = "See [`arch::vld4q_s32`]."] + #[inline(always)] + pub unsafe fn vld4q_s32(self, a: *const i32) -> int32x4x4_t { + unsafe { vld4q_s32(a) } + } + #[doc = "See [`arch::vld4_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld4_lane_f32( + self, + a: *const f32, + b: float32x2x4_t, + ) -> float32x2x4_t { + unsafe { vld4_lane_f32::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_f32`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_f32( + self, + a: *const f32, + b: float32x4x4_t, + ) -> float32x4x4_t { + unsafe { vld4q_lane_f32::(a, b) } + } + #[doc = "See [`arch::vld4_lane_s8`]."] + #[inline(always)] + pub unsafe fn vld4_lane_s8(self, a: *const i8, b: int8x8x4_t) -> int8x8x4_t { + unsafe { vld4_lane_s8::(a, b) } + } + #[doc = "See [`arch::vld4_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld4_lane_s16( + self, + a: *const i16, + b: int16x4x4_t, + ) -> int16x4x4_t { + unsafe { vld4_lane_s16::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_s16`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_s16( + self, + a: *const i16, + b: int16x8x4_t, + ) -> int16x8x4_t { + unsafe { vld4q_lane_s16::(a, b) } + } + #[doc = "See [`arch::vld4_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld4_lane_s32( + self, + a: *const i32, + b: int32x2x4_t, + ) -> int32x2x4_t { + unsafe { vld4_lane_s32::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_s32`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_s32( + self, + a: *const i32, + b: int32x4x4_t, + ) -> int32x4x4_t { + unsafe { vld4q_lane_s32::(a, b) } + } + #[doc = "See [`arch::vld4_lane_u8`]."] + #[inline(always)] + pub unsafe fn vld4_lane_u8(self, a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { + unsafe { vld4_lane_u8::(a, b) } + } + #[doc = "See [`arch::vld4_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld4_lane_u16( + self, + a: *const u16, + b: uint16x4x4_t, + ) -> uint16x4x4_t { + unsafe { vld4_lane_u16::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_u16( + self, + a: *const u16, + b: uint16x8x4_t, + ) -> uint16x8x4_t { + unsafe { vld4q_lane_u16::(a, b) } + } + #[doc = "See [`arch::vld4_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld4_lane_u32( + self, + a: *const u32, + b: uint32x2x4_t, + ) -> uint32x2x4_t { + unsafe { vld4_lane_u32::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_u32( + self, + a: *const u32, + b: uint32x4x4_t, + ) -> uint32x4x4_t { + unsafe { vld4q_lane_u32::(a, b) } + } + #[doc = "See [`arch::vld4_lane_p8`]."] + #[inline(always)] + pub unsafe fn vld4_lane_p8(self, a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { + unsafe { vld4_lane_p8::(a, b) } + } + #[doc = "See [`arch::vld4_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld4_lane_p16( + self, + a: *const p16, + b: poly16x4x4_t, + ) -> poly16x4x4_t { + unsafe { vld4_lane_p16::(a, b) } + } + #[doc = "See [`arch::vld4q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vld4q_lane_p16( + self, + a: *const p16, + b: poly16x8x4_t, + ) -> poly16x8x4_t { + unsafe { vld4q_lane_p16::(a, b) } + } + #[doc = "See [`arch::vld4_p64`]."] + #[inline(always)] + pub unsafe fn vld4_p64(self, a: *const p64) -> poly64x1x4_t { + unsafe { vld4_p64(a) } + } + #[doc = "See [`arch::vld4_s64`]."] + #[inline(always)] + pub unsafe fn vld4_s64(self, a: *const i64) -> int64x1x4_t { + unsafe { vld4_s64(a) } + } + #[doc = "See [`arch::vld4_u64`]."] + #[inline(always)] + pub unsafe fn vld4_u64(self, a: *const u64) -> uint64x1x4_t { + unsafe { vld4_u64(a) } + } + #[doc = "See [`arch::vld4_u8`]."] + #[inline(always)] + pub unsafe fn vld4_u8(self, a: *const u8) -> uint8x8x4_t { + unsafe { vld4_u8(a) } + } + #[doc = "See [`arch::vld4q_u8`]."] + #[inline(always)] + pub unsafe fn vld4q_u8(self, a: *const u8) -> uint8x16x4_t { + unsafe { vld4q_u8(a) } + } + #[doc = "See [`arch::vld4_u16`]."] + #[inline(always)] + pub unsafe fn vld4_u16(self, a: *const u16) -> uint16x4x4_t { + unsafe { vld4_u16(a) } + } + #[doc = "See [`arch::vld4q_u16`]."] + #[inline(always)] + pub unsafe fn vld4q_u16(self, a: *const u16) -> uint16x8x4_t { + unsafe { vld4q_u16(a) } + } + #[doc = "See [`arch::vld4_u32`]."] + #[inline(always)] + pub unsafe fn vld4_u32(self, a: *const u32) -> uint32x2x4_t { + unsafe { vld4_u32(a) } + } + #[doc = "See [`arch::vld4q_u32`]."] + #[inline(always)] + pub unsafe fn vld4q_u32(self, a: *const u32) -> uint32x4x4_t { + unsafe { vld4q_u32(a) } + } + #[doc = "See [`arch::vld4_p8`]."] + #[inline(always)] + pub unsafe fn vld4_p8(self, a: *const p8) -> poly8x8x4_t { + unsafe { vld4_p8(a) } + } + #[doc = "See [`arch::vld4q_p8`]."] + #[inline(always)] + pub unsafe fn vld4q_p8(self, a: *const p8) -> poly8x16x4_t { + unsafe { vld4q_p8(a) } + } + #[doc = "See [`arch::vld4_p16`]."] + #[inline(always)] + pub unsafe fn vld4_p16(self, a: *const p16) -> poly16x4x4_t { + unsafe { vld4_p16(a) } + } + #[doc = "See [`arch::vld4q_p16`]."] + #[inline(always)] + pub unsafe fn vld4q_p16(self, a: *const p16) -> poly16x8x4_t { + unsafe { vld4q_p16(a) } + } + #[doc = "See [`arch::vldrq_p128`]."] + #[inline(always)] + pub unsafe fn vldrq_p128(self, a: *const p128) -> p128 { + unsafe { vldrq_p128(a) } + } + #[doc = "See [`arch::vmax_f32`]."] + #[inline(always)] + pub fn vmax_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vmax_f32(a, b) } + } + #[doc = "See [`arch::vmaxq_f32`]."] + #[inline(always)] + pub fn vmaxq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vmaxq_f32(a, b) } + } + #[doc = "See [`arch::vmax_s8`]."] + #[inline(always)] + pub fn vmax_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vmax_s8(a, b) } + } + #[doc = "See [`arch::vmaxq_s8`]."] + #[inline(always)] + pub fn vmaxq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vmaxq_s8(a, b) } + } + #[doc = "See [`arch::vmax_s16`]."] + #[inline(always)] + pub fn vmax_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vmax_s16(a, b) } + } + #[doc = "See [`arch::vmaxq_s16`]."] + #[inline(always)] + pub fn vmaxq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vmaxq_s16(a, b) } + } + #[doc = "See [`arch::vmax_s32`]."] + #[inline(always)] + pub fn vmax_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vmax_s32(a, b) } + } + #[doc = "See [`arch::vmaxq_s32`]."] + #[inline(always)] + pub fn vmaxq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vmaxq_s32(a, b) } + } + #[doc = "See [`arch::vmax_u8`]."] + #[inline(always)] + pub fn vmax_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vmax_u8(a, b) } + } + #[doc = "See [`arch::vmaxq_u8`]."] + #[inline(always)] + pub fn vmaxq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vmaxq_u8(a, b) } + } + #[doc = "See [`arch::vmax_u16`]."] + #[inline(always)] + pub fn vmax_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vmax_u16(a, b) } + } + #[doc = "See [`arch::vmaxq_u16`]."] + #[inline(always)] + pub fn vmaxq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vmaxq_u16(a, b) } + } + #[doc = "See [`arch::vmax_u32`]."] + #[inline(always)] + pub fn vmax_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vmax_u32(a, b) } + } + #[doc = "See [`arch::vmaxq_u32`]."] + #[inline(always)] + pub fn vmaxq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vmaxq_u32(a, b) } + } + #[doc = "See [`arch::vmaxnm_f32`]."] + #[inline(always)] + pub fn vmaxnm_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vmaxnm_f32(a, b) } + } + #[doc = "See [`arch::vmaxnmq_f32`]."] + #[inline(always)] + pub fn vmaxnmq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vmaxnmq_f32(a, b) } + } + #[doc = "See [`arch::vmin_f32`]."] + #[inline(always)] + pub fn vmin_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vmin_f32(a, b) } + } + #[doc = "See [`arch::vminq_f32`]."] + #[inline(always)] + pub fn vminq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vminq_f32(a, b) } + } + #[doc = "See [`arch::vmin_s8`]."] + #[inline(always)] + pub fn vmin_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vmin_s8(a, b) } + } + #[doc = "See [`arch::vminq_s8`]."] + #[inline(always)] + pub fn vminq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vminq_s8(a, b) } + } + #[doc = "See [`arch::vmin_s16`]."] + #[inline(always)] + pub fn vmin_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vmin_s16(a, b) } + } + #[doc = "See [`arch::vminq_s16`]."] + #[inline(always)] + pub fn vminq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vminq_s16(a, b) } + } + #[doc = "See [`arch::vmin_s32`]."] + #[inline(always)] + pub fn vmin_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vmin_s32(a, b) } + } + #[doc = "See [`arch::vminq_s32`]."] + #[inline(always)] + pub fn vminq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vminq_s32(a, b) } + } + #[doc = "See [`arch::vmin_u8`]."] + #[inline(always)] + pub fn vmin_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vmin_u8(a, b) } + } + #[doc = "See [`arch::vminq_u8`]."] + #[inline(always)] + pub fn vminq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vminq_u8(a, b) } + } + #[doc = "See [`arch::vmin_u16`]."] + #[inline(always)] + pub fn vmin_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vmin_u16(a, b) } + } + #[doc = "See [`arch::vminq_u16`]."] + #[inline(always)] + pub fn vminq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vminq_u16(a, b) } + } + #[doc = "See [`arch::vmin_u32`]."] + #[inline(always)] + pub fn vmin_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vmin_u32(a, b) } + } + #[doc = "See [`arch::vminq_u32`]."] + #[inline(always)] + pub fn vminq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vminq_u32(a, b) } + } + #[doc = "See [`arch::vminnm_f32`]."] + #[inline(always)] + pub fn vminnm_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vminnm_f32(a, b) } + } + #[doc = "See [`arch::vminnmq_f32`]."] + #[inline(always)] + pub fn vminnmq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vminnmq_f32(a, b) } + } + #[doc = "See [`arch::vmla_f32`]."] + #[inline(always)] + pub fn vmla_f32(self, a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { vmla_f32(a, b, c) } + } + #[doc = "See [`arch::vmlaq_f32`]."] + #[inline(always)] + pub fn vmlaq_f32(self, a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { vmlaq_f32(a, b, c) } + } + #[doc = "See [`arch::vmla_lane_f32`]."] + #[inline(always)] + pub fn vmla_lane_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + ) -> float32x2_t { + unsafe { vmla_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vmla_laneq_f32`]."] + #[inline(always)] + pub fn vmla_laneq_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, + ) -> float32x2_t { + unsafe { vmla_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_lane_f32`]."] + #[inline(always)] + pub fn vmlaq_lane_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, + ) -> float32x4_t { + unsafe { vmlaq_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_laneq_f32`]."] + #[inline(always)] + pub fn vmlaq_laneq_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + ) -> float32x4_t { + unsafe { vmlaq_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vmla_lane_s16`]."] + #[inline(always)] + pub fn vmla_lane_s16( + self, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + ) -> int16x4_t { + unsafe { vmla_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmla_lane_u16`]."] + #[inline(always)] + pub fn vmla_lane_u16( + self, + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, + ) -> uint16x4_t { + unsafe { vmla_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmla_laneq_s16`]."] + #[inline(always)] + pub fn vmla_laneq_s16( + self, + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, + ) -> int16x4_t { + unsafe { vmla_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmla_laneq_u16`]."] + #[inline(always)] + pub fn vmla_laneq_u16( + self, + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, + ) -> uint16x4_t { + unsafe { vmla_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_lane_s16`]."] + #[inline(always)] + pub fn vmlaq_lane_s16( + self, + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, + ) -> int16x8_t { + unsafe { vmlaq_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_lane_u16`]."] + #[inline(always)] + pub fn vmlaq_lane_u16( + self, + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, + ) -> uint16x8_t { + unsafe { vmlaq_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_laneq_s16`]."] + #[inline(always)] + pub fn vmlaq_laneq_s16( + self, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + ) -> int16x8_t { + unsafe { vmlaq_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_laneq_u16`]."] + #[inline(always)] + pub fn vmlaq_laneq_u16( + self, + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, + ) -> uint16x8_t { + unsafe { vmlaq_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmla_lane_s32`]."] + #[inline(always)] + pub fn vmla_lane_s32( + self, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + ) -> int32x2_t { + unsafe { vmla_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmla_lane_u32`]."] + #[inline(always)] + pub fn vmla_lane_u32( + self, + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, + ) -> uint32x2_t { + unsafe { vmla_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmla_laneq_s32`]."] + #[inline(always)] + pub fn vmla_laneq_s32( + self, + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, + ) -> int32x2_t { + unsafe { vmla_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmla_laneq_u32`]."] + #[inline(always)] + pub fn vmla_laneq_u32( + self, + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, + ) -> uint32x2_t { + unsafe { vmla_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_lane_s32`]."] + #[inline(always)] + pub fn vmlaq_lane_s32( + self, + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, + ) -> int32x4_t { + unsafe { vmlaq_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_lane_u32`]."] + #[inline(always)] + pub fn vmlaq_lane_u32( + self, + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, + ) -> uint32x4_t { + unsafe { vmlaq_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_laneq_s32`]."] + #[inline(always)] + pub fn vmlaq_laneq_s32( + self, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + ) -> int32x4_t { + unsafe { vmlaq_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlaq_laneq_u32`]."] + #[inline(always)] + pub fn vmlaq_laneq_u32( + self, + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, + ) -> uint32x4_t { + unsafe { vmlaq_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmla_n_f32`]."] + #[inline(always)] + pub fn vmla_n_f32(self, a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + unsafe { vmla_n_f32(a, b, c) } + } + #[doc = "See [`arch::vmlaq_n_f32`]."] + #[inline(always)] + pub fn vmlaq_n_f32(self, a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + unsafe { vmlaq_n_f32(a, b, c) } + } + #[doc = "See [`arch::vmla_n_s16`]."] + #[inline(always)] + pub fn vmla_n_s16(self, a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + unsafe { vmla_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmlaq_n_s16`]."] + #[inline(always)] + pub fn vmlaq_n_s16(self, a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + unsafe { vmlaq_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmla_n_u16`]."] + #[inline(always)] + pub fn vmla_n_u16(self, a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + unsafe { vmla_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmlaq_n_u16`]."] + #[inline(always)] + pub fn vmlaq_n_u16(self, a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + unsafe { vmlaq_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmla_n_s32`]."] + #[inline(always)] + pub fn vmla_n_s32(self, a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + unsafe { vmla_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmlaq_n_s32`]."] + #[inline(always)] + pub fn vmlaq_n_s32(self, a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + unsafe { vmlaq_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmla_n_u32`]."] + #[inline(always)] + pub fn vmla_n_u32(self, a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + unsafe { vmla_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmlaq_n_u32`]."] + #[inline(always)] + pub fn vmlaq_n_u32(self, a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + unsafe { vmlaq_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmla_s8`]."] + #[inline(always)] + pub fn vmla_s8(self, a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { vmla_s8(a, b, c) } + } + #[doc = "See [`arch::vmlaq_s8`]."] + #[inline(always)] + pub fn vmlaq_s8(self, a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { vmlaq_s8(a, b, c) } + } + #[doc = "See [`arch::vmla_s16`]."] + #[inline(always)] + pub fn vmla_s16(self, a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { vmla_s16(a, b, c) } + } + #[doc = "See [`arch::vmlaq_s16`]."] + #[inline(always)] + pub fn vmlaq_s16(self, a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { vmlaq_s16(a, b, c) } + } + #[doc = "See [`arch::vmla_s32`]."] + #[inline(always)] + pub fn vmla_s32(self, a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { vmla_s32(a, b, c) } + } + #[doc = "See [`arch::vmlaq_s32`]."] + #[inline(always)] + pub fn vmlaq_s32(self, a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { vmlaq_s32(a, b, c) } + } + #[doc = "See [`arch::vmla_u8`]."] + #[inline(always)] + pub fn vmla_u8(self, a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vmla_u8(a, b, c) } + } + #[doc = "See [`arch::vmlaq_u8`]."] + #[inline(always)] + pub fn vmlaq_u8(self, a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vmlaq_u8(a, b, c) } + } + #[doc = "See [`arch::vmla_u16`]."] + #[inline(always)] + pub fn vmla_u16(self, a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { vmla_u16(a, b, c) } + } + #[doc = "See [`arch::vmlaq_u16`]."] + #[inline(always)] + pub fn vmlaq_u16(self, a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { vmlaq_u16(a, b, c) } + } + #[doc = "See [`arch::vmla_u32`]."] + #[inline(always)] + pub fn vmla_u32(self, a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { vmla_u32(a, b, c) } + } + #[doc = "See [`arch::vmlaq_u32`]."] + #[inline(always)] + pub fn vmlaq_u32(self, a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { vmlaq_u32(a, b, c) } + } + #[doc = "See [`arch::vmlal_lane_s16`]."] + #[inline(always)] + pub fn vmlal_lane_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vmlal_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_laneq_s16`]."] + #[inline(always)] + pub fn vmlal_laneq_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vmlal_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_lane_s32`]."] + #[inline(always)] + pub fn vmlal_lane_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vmlal_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_laneq_s32`]."] + #[inline(always)] + pub fn vmlal_laneq_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vmlal_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_lane_u16`]."] + #[inline(always)] + pub fn vmlal_lane_u16( + self, + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, + ) -> uint32x4_t { + unsafe { vmlal_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_laneq_u16`]."] + #[inline(always)] + pub fn vmlal_laneq_u16( + self, + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, + ) -> uint32x4_t { + unsafe { vmlal_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlal_lane_u32`]."] + #[inline(always)] + pub fn vmlal_lane_u32( + self, + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, + ) -> uint64x2_t { + unsafe { vmlal_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_laneq_u32`]."] + #[inline(always)] + pub fn vmlal_laneq_u32( + self, + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, + ) -> uint64x2_t { + unsafe { vmlal_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlal_n_s16`]."] + #[inline(always)] + pub fn vmlal_n_s16(self, a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + unsafe { vmlal_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmlal_n_s32`]."] + #[inline(always)] + pub fn vmlal_n_s32(self, a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + unsafe { vmlal_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmlal_n_u16`]."] + #[inline(always)] + pub fn vmlal_n_u16(self, a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + unsafe { vmlal_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmlal_n_u32`]."] + #[inline(always)] + pub fn vmlal_n_u32(self, a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + unsafe { vmlal_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmlal_s8`]."] + #[inline(always)] + pub fn vmlal_s8(self, a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + unsafe { vmlal_s8(a, b, c) } + } + #[doc = "See [`arch::vmlal_s16`]."] + #[inline(always)] + pub fn vmlal_s16(self, a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { vmlal_s16(a, b, c) } + } + #[doc = "See [`arch::vmlal_s32`]."] + #[inline(always)] + pub fn vmlal_s32(self, a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { vmlal_s32(a, b, c) } + } + #[doc = "See [`arch::vmlal_u8`]."] + #[inline(always)] + pub fn vmlal_u8(self, a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + unsafe { vmlal_u8(a, b, c) } + } + #[doc = "See [`arch::vmlal_u16`]."] + #[inline(always)] + pub fn vmlal_u16(self, a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + unsafe { vmlal_u16(a, b, c) } + } + #[doc = "See [`arch::vmlal_u32`]."] + #[inline(always)] + pub fn vmlal_u32(self, a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + unsafe { vmlal_u32(a, b, c) } + } + #[doc = "See [`arch::vmls_f32`]."] + #[inline(always)] + pub fn vmls_f32(self, a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { + unsafe { vmls_f32(a, b, c) } + } + #[doc = "See [`arch::vmlsq_f32`]."] + #[inline(always)] + pub fn vmlsq_f32(self, a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { + unsafe { vmlsq_f32(a, b, c) } + } + #[doc = "See [`arch::vmls_lane_f32`]."] + #[inline(always)] + pub fn vmls_lane_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x2_t, + ) -> float32x2_t { + unsafe { vmls_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vmls_laneq_f32`]."] + #[inline(always)] + pub fn vmls_laneq_f32( + self, + a: float32x2_t, + b: float32x2_t, + c: float32x4_t, + ) -> float32x2_t { + unsafe { vmls_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_lane_f32`]."] + #[inline(always)] + pub fn vmlsq_lane_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x2_t, + ) -> float32x4_t { + unsafe { vmlsq_lane_f32::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_laneq_f32`]."] + #[inline(always)] + pub fn vmlsq_laneq_f32( + self, + a: float32x4_t, + b: float32x4_t, + c: float32x4_t, + ) -> float32x4_t { + unsafe { vmlsq_laneq_f32::(a, b, c) } + } + #[doc = "See [`arch::vmls_lane_s16`]."] + #[inline(always)] + pub fn vmls_lane_s16( + self, + a: int16x4_t, + b: int16x4_t, + c: int16x4_t, + ) -> int16x4_t { + unsafe { vmls_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmls_lane_u16`]."] + #[inline(always)] + pub fn vmls_lane_u16( + self, + a: uint16x4_t, + b: uint16x4_t, + c: uint16x4_t, + ) -> uint16x4_t { + unsafe { vmls_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmls_laneq_s16`]."] + #[inline(always)] + pub fn vmls_laneq_s16( + self, + a: int16x4_t, + b: int16x4_t, + c: int16x8_t, + ) -> int16x4_t { + unsafe { vmls_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmls_laneq_u16`]."] + #[inline(always)] + pub fn vmls_laneq_u16( + self, + a: uint16x4_t, + b: uint16x4_t, + c: uint16x8_t, + ) -> uint16x4_t { + unsafe { vmls_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_lane_s16`]."] + #[inline(always)] + pub fn vmlsq_lane_s16( + self, + a: int16x8_t, + b: int16x8_t, + c: int16x4_t, + ) -> int16x8_t { + unsafe { vmlsq_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_lane_u16`]."] + #[inline(always)] + pub fn vmlsq_lane_u16( + self, + a: uint16x8_t, + b: uint16x8_t, + c: uint16x4_t, + ) -> uint16x8_t { + unsafe { vmlsq_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_laneq_s16`]."] + #[inline(always)] + pub fn vmlsq_laneq_s16( + self, + a: int16x8_t, + b: int16x8_t, + c: int16x8_t, + ) -> int16x8_t { + unsafe { vmlsq_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_laneq_u16`]."] + #[inline(always)] + pub fn vmlsq_laneq_u16( + self, + a: uint16x8_t, + b: uint16x8_t, + c: uint16x8_t, + ) -> uint16x8_t { + unsafe { vmlsq_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmls_lane_s32`]."] + #[inline(always)] + pub fn vmls_lane_s32( + self, + a: int32x2_t, + b: int32x2_t, + c: int32x2_t, + ) -> int32x2_t { + unsafe { vmls_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmls_lane_u32`]."] + #[inline(always)] + pub fn vmls_lane_u32( + self, + a: uint32x2_t, + b: uint32x2_t, + c: uint32x2_t, + ) -> uint32x2_t { + unsafe { vmls_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmls_laneq_s32`]."] + #[inline(always)] + pub fn vmls_laneq_s32( + self, + a: int32x2_t, + b: int32x2_t, + c: int32x4_t, + ) -> int32x2_t { + unsafe { vmls_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmls_laneq_u32`]."] + #[inline(always)] + pub fn vmls_laneq_u32( + self, + a: uint32x2_t, + b: uint32x2_t, + c: uint32x4_t, + ) -> uint32x2_t { + unsafe { vmls_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_lane_s32`]."] + #[inline(always)] + pub fn vmlsq_lane_s32( + self, + a: int32x4_t, + b: int32x4_t, + c: int32x2_t, + ) -> int32x4_t { + unsafe { vmlsq_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_lane_u32`]."] + #[inline(always)] + pub fn vmlsq_lane_u32( + self, + a: uint32x4_t, + b: uint32x4_t, + c: uint32x2_t, + ) -> uint32x4_t { + unsafe { vmlsq_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_laneq_s32`]."] + #[inline(always)] + pub fn vmlsq_laneq_s32( + self, + a: int32x4_t, + b: int32x4_t, + c: int32x4_t, + ) -> int32x4_t { + unsafe { vmlsq_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlsq_laneq_u32`]."] + #[inline(always)] + pub fn vmlsq_laneq_u32( + self, + a: uint32x4_t, + b: uint32x4_t, + c: uint32x4_t, + ) -> uint32x4_t { + unsafe { vmlsq_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmls_n_f32`]."] + #[inline(always)] + pub fn vmls_n_f32(self, a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { + unsafe { vmls_n_f32(a, b, c) } + } + #[doc = "See [`arch::vmlsq_n_f32`]."] + #[inline(always)] + pub fn vmlsq_n_f32(self, a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { + unsafe { vmlsq_n_f32(a, b, c) } + } + #[doc = "See [`arch::vmls_n_s16`]."] + #[inline(always)] + pub fn vmls_n_s16(self, a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { + unsafe { vmls_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmlsq_n_s16`]."] + #[inline(always)] + pub fn vmlsq_n_s16(self, a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { + unsafe { vmlsq_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmls_n_u16`]."] + #[inline(always)] + pub fn vmls_n_u16(self, a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { + unsafe { vmls_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmlsq_n_u16`]."] + #[inline(always)] + pub fn vmlsq_n_u16(self, a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { + unsafe { vmlsq_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmls_n_s32`]."] + #[inline(always)] + pub fn vmls_n_s32(self, a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { + unsafe { vmls_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmlsq_n_s32`]."] + #[inline(always)] + pub fn vmlsq_n_s32(self, a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { + unsafe { vmlsq_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmls_n_u32`]."] + #[inline(always)] + pub fn vmls_n_u32(self, a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { + unsafe { vmls_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmlsq_n_u32`]."] + #[inline(always)] + pub fn vmlsq_n_u32(self, a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { + unsafe { vmlsq_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmls_s8`]."] + #[inline(always)] + pub fn vmls_s8(self, a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { + unsafe { vmls_s8(a, b, c) } + } + #[doc = "See [`arch::vmlsq_s8`]."] + #[inline(always)] + pub fn vmlsq_s8(self, a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { + unsafe { vmlsq_s8(a, b, c) } + } + #[doc = "See [`arch::vmls_s16`]."] + #[inline(always)] + pub fn vmls_s16(self, a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { + unsafe { vmls_s16(a, b, c) } + } + #[doc = "See [`arch::vmlsq_s16`]."] + #[inline(always)] + pub fn vmlsq_s16(self, a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { + unsafe { vmlsq_s16(a, b, c) } + } + #[doc = "See [`arch::vmls_s32`]."] + #[inline(always)] + pub fn vmls_s32(self, a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { + unsafe { vmls_s32(a, b, c) } + } + #[doc = "See [`arch::vmlsq_s32`]."] + #[inline(always)] + pub fn vmlsq_s32(self, a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { + unsafe { vmlsq_s32(a, b, c) } + } + #[doc = "See [`arch::vmls_u8`]."] + #[inline(always)] + pub fn vmls_u8(self, a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { + unsafe { vmls_u8(a, b, c) } + } + #[doc = "See [`arch::vmlsq_u8`]."] + #[inline(always)] + pub fn vmlsq_u8(self, a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { + unsafe { vmlsq_u8(a, b, c) } + } + #[doc = "See [`arch::vmls_u16`]."] + #[inline(always)] + pub fn vmls_u16(self, a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { + unsafe { vmls_u16(a, b, c) } + } + #[doc = "See [`arch::vmlsq_u16`]."] + #[inline(always)] + pub fn vmlsq_u16(self, a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { + unsafe { vmlsq_u16(a, b, c) } + } + #[doc = "See [`arch::vmls_u32`]."] + #[inline(always)] + pub fn vmls_u32(self, a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { + unsafe { vmls_u32(a, b, c) } + } + #[doc = "See [`arch::vmlsq_u32`]."] + #[inline(always)] + pub fn vmlsq_u32(self, a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { + unsafe { vmlsq_u32(a, b, c) } + } + #[doc = "See [`arch::vmlsl_lane_s16`]."] + #[inline(always)] + pub fn vmlsl_lane_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vmlsl_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_laneq_s16`]."] + #[inline(always)] + pub fn vmlsl_laneq_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x8_t, + ) -> int32x4_t { + unsafe { vmlsl_laneq_s16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_lane_s32`]."] + #[inline(always)] + pub fn vmlsl_lane_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vmlsl_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_laneq_s32`]."] + #[inline(always)] + pub fn vmlsl_laneq_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x4_t, + ) -> int64x2_t { + unsafe { vmlsl_laneq_s32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_lane_u16`]."] + #[inline(always)] + pub fn vmlsl_lane_u16( + self, + a: uint32x4_t, + b: uint16x4_t, + c: uint16x4_t, + ) -> uint32x4_t { + unsafe { vmlsl_lane_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_laneq_u16`]."] + #[inline(always)] + pub fn vmlsl_laneq_u16( + self, + a: uint32x4_t, + b: uint16x4_t, + c: uint16x8_t, + ) -> uint32x4_t { + unsafe { vmlsl_laneq_u16::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_lane_u32`]."] + #[inline(always)] + pub fn vmlsl_lane_u32( + self, + a: uint64x2_t, + b: uint32x2_t, + c: uint32x2_t, + ) -> uint64x2_t { + unsafe { vmlsl_lane_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_laneq_u32`]."] + #[inline(always)] + pub fn vmlsl_laneq_u32( + self, + a: uint64x2_t, + b: uint32x2_t, + c: uint32x4_t, + ) -> uint64x2_t { + unsafe { vmlsl_laneq_u32::(a, b, c) } + } + #[doc = "See [`arch::vmlsl_n_s16`]."] + #[inline(always)] + pub fn vmlsl_n_s16(self, a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + unsafe { vmlsl_n_s16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_n_s32`]."] + #[inline(always)] + pub fn vmlsl_n_s32(self, a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + unsafe { vmlsl_n_s32(a, b, c) } + } + #[doc = "See [`arch::vmlsl_n_u16`]."] + #[inline(always)] + pub fn vmlsl_n_u16(self, a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { + unsafe { vmlsl_n_u16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_n_u32`]."] + #[inline(always)] + pub fn vmlsl_n_u32(self, a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { + unsafe { vmlsl_n_u32(a, b, c) } + } + #[doc = "See [`arch::vmlsl_s8`]."] + #[inline(always)] + pub fn vmlsl_s8(self, a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { + unsafe { vmlsl_s8(a, b, c) } + } + #[doc = "See [`arch::vmlsl_s16`]."] + #[inline(always)] + pub fn vmlsl_s16(self, a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { vmlsl_s16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_s32`]."] + #[inline(always)] + pub fn vmlsl_s32(self, a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { vmlsl_s32(a, b, c) } + } + #[doc = "See [`arch::vmlsl_u8`]."] + #[inline(always)] + pub fn vmlsl_u8(self, a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { + unsafe { vmlsl_u8(a, b, c) } + } + #[doc = "See [`arch::vmlsl_u16`]."] + #[inline(always)] + pub fn vmlsl_u16(self, a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { + unsafe { vmlsl_u16(a, b, c) } + } + #[doc = "See [`arch::vmlsl_u32`]."] + #[inline(always)] + pub fn vmlsl_u32(self, a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { + unsafe { vmlsl_u32(a, b, c) } + } + #[doc = "See [`arch::vmov_n_f32`]."] + #[inline(always)] + pub fn vmov_n_f32(self, value: f32) -> float32x2_t { + unsafe { vmov_n_f32(value) } + } + #[doc = "See [`arch::vmov_n_p16`]."] + #[inline(always)] + pub fn vmov_n_p16(self, value: p16) -> poly16x4_t { + unsafe { vmov_n_p16(value) } + } + #[doc = "See [`arch::vmov_n_p8`]."] + #[inline(always)] + pub fn vmov_n_p8(self, value: p8) -> poly8x8_t { + unsafe { vmov_n_p8(value) } + } + #[doc = "See [`arch::vmov_n_s16`]."] + #[inline(always)] + pub fn vmov_n_s16(self, value: i16) -> int16x4_t { + unsafe { vmov_n_s16(value) } + } + #[doc = "See [`arch::vmov_n_s32`]."] + #[inline(always)] + pub fn vmov_n_s32(self, value: i32) -> int32x2_t { + unsafe { vmov_n_s32(value) } + } + #[doc = "See [`arch::vmov_n_s64`]."] + #[inline(always)] + pub fn vmov_n_s64(self, value: i64) -> int64x1_t { + unsafe { vmov_n_s64(value) } + } + #[doc = "See [`arch::vmov_n_s8`]."] + #[inline(always)] + pub fn vmov_n_s8(self, value: i8) -> int8x8_t { + unsafe { vmov_n_s8(value) } + } + #[doc = "See [`arch::vmov_n_u16`]."] + #[inline(always)] + pub fn vmov_n_u16(self, value: u16) -> uint16x4_t { + unsafe { vmov_n_u16(value) } + } + #[doc = "See [`arch::vmov_n_u32`]."] + #[inline(always)] + pub fn vmov_n_u32(self, value: u32) -> uint32x2_t { + unsafe { vmov_n_u32(value) } + } + #[doc = "See [`arch::vmov_n_u64`]."] + #[inline(always)] + pub fn vmov_n_u64(self, value: u64) -> uint64x1_t { + unsafe { vmov_n_u64(value) } + } + #[doc = "See [`arch::vmov_n_u8`]."] + #[inline(always)] + pub fn vmov_n_u8(self, value: u8) -> uint8x8_t { + unsafe { vmov_n_u8(value) } + } + #[doc = "See [`arch::vmovq_n_f32`]."] + #[inline(always)] + pub fn vmovq_n_f32(self, value: f32) -> float32x4_t { + unsafe { vmovq_n_f32(value) } + } + #[doc = "See [`arch::vmovq_n_p16`]."] + #[inline(always)] + pub fn vmovq_n_p16(self, value: p16) -> poly16x8_t { + unsafe { vmovq_n_p16(value) } + } + #[doc = "See [`arch::vmovq_n_p8`]."] + #[inline(always)] + pub fn vmovq_n_p8(self, value: p8) -> poly8x16_t { + unsafe { vmovq_n_p8(value) } + } + #[doc = "See [`arch::vmovq_n_s16`]."] + #[inline(always)] + pub fn vmovq_n_s16(self, value: i16) -> int16x8_t { + unsafe { vmovq_n_s16(value) } + } + #[doc = "See [`arch::vmovq_n_s32`]."] + #[inline(always)] + pub fn vmovq_n_s32(self, value: i32) -> int32x4_t { + unsafe { vmovq_n_s32(value) } + } + #[doc = "See [`arch::vmovq_n_s64`]."] + #[inline(always)] + pub fn vmovq_n_s64(self, value: i64) -> int64x2_t { + unsafe { vmovq_n_s64(value) } + } + #[doc = "See [`arch::vmovq_n_s8`]."] + #[inline(always)] + pub fn vmovq_n_s8(self, value: i8) -> int8x16_t { + unsafe { vmovq_n_s8(value) } + } + #[doc = "See [`arch::vmovq_n_u16`]."] + #[inline(always)] + pub fn vmovq_n_u16(self, value: u16) -> uint16x8_t { + unsafe { vmovq_n_u16(value) } + } + #[doc = "See [`arch::vmovq_n_u32`]."] + #[inline(always)] + pub fn vmovq_n_u32(self, value: u32) -> uint32x4_t { + unsafe { vmovq_n_u32(value) } + } + #[doc = "See [`arch::vmovq_n_u64`]."] + #[inline(always)] + pub fn vmovq_n_u64(self, value: u64) -> uint64x2_t { + unsafe { vmovq_n_u64(value) } + } + #[doc = "See [`arch::vmovq_n_u8`]."] + #[inline(always)] + pub fn vmovq_n_u8(self, value: u8) -> uint8x16_t { + unsafe { vmovq_n_u8(value) } + } + #[doc = "See [`arch::vmovl_s16`]."] + #[inline(always)] + pub fn vmovl_s16(self, a: int16x4_t) -> int32x4_t { + unsafe { vmovl_s16(a) } + } + #[doc = "See [`arch::vmovl_s32`]."] + #[inline(always)] + pub fn vmovl_s32(self, a: int32x2_t) -> int64x2_t { + unsafe { vmovl_s32(a) } + } + #[doc = "See [`arch::vmovl_s8`]."] + #[inline(always)] + pub fn vmovl_s8(self, a: int8x8_t) -> int16x8_t { + unsafe { vmovl_s8(a) } + } + #[doc = "See [`arch::vmovl_u16`]."] + #[inline(always)] + pub fn vmovl_u16(self, a: uint16x4_t) -> uint32x4_t { + unsafe { vmovl_u16(a) } + } + #[doc = "See [`arch::vmovl_u32`]."] + #[inline(always)] + pub fn vmovl_u32(self, a: uint32x2_t) -> uint64x2_t { + unsafe { vmovl_u32(a) } + } + #[doc = "See [`arch::vmovl_u8`]."] + #[inline(always)] + pub fn vmovl_u8(self, a: uint8x8_t) -> uint16x8_t { + unsafe { vmovl_u8(a) } + } + #[doc = "See [`arch::vmovn_s16`]."] + #[inline(always)] + pub fn vmovn_s16(self, a: int16x8_t) -> int8x8_t { + unsafe { vmovn_s16(a) } + } + #[doc = "See [`arch::vmovn_s32`]."] + #[inline(always)] + pub fn vmovn_s32(self, a: int32x4_t) -> int16x4_t { + unsafe { vmovn_s32(a) } + } + #[doc = "See [`arch::vmovn_s64`]."] + #[inline(always)] + pub fn vmovn_s64(self, a: int64x2_t) -> int32x2_t { + unsafe { vmovn_s64(a) } + } + #[doc = "See [`arch::vmovn_u16`]."] + #[inline(always)] + pub fn vmovn_u16(self, a: uint16x8_t) -> uint8x8_t { + unsafe { vmovn_u16(a) } + } + #[doc = "See [`arch::vmovn_u32`]."] + #[inline(always)] + pub fn vmovn_u32(self, a: uint32x4_t) -> uint16x4_t { + unsafe { vmovn_u32(a) } + } + #[doc = "See [`arch::vmovn_u64`]."] + #[inline(always)] + pub fn vmovn_u64(self, a: uint64x2_t) -> uint32x2_t { + unsafe { vmovn_u64(a) } + } + #[doc = "See [`arch::vmul_f32`]."] + #[inline(always)] + pub fn vmul_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vmul_f32(a, b) } + } + #[doc = "See [`arch::vmulq_f32`]."] + #[inline(always)] + pub fn vmulq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vmulq_f32(a, b) } + } + #[doc = "See [`arch::vmul_lane_f32`]."] + #[inline(always)] + pub fn vmul_lane_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vmul_lane_f32::(a, b) } + } + #[doc = "See [`arch::vmul_laneq_f32`]."] + #[inline(always)] + pub fn vmul_laneq_f32(self, a: float32x2_t, b: float32x4_t) -> float32x2_t { + unsafe { vmul_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vmulq_lane_f32`]."] + #[inline(always)] + pub fn vmulq_lane_f32(self, a: float32x4_t, b: float32x2_t) -> float32x4_t { + unsafe { vmulq_lane_f32::(a, b) } + } + #[doc = "See [`arch::vmulq_laneq_f32`]."] + #[inline(always)] + pub fn vmulq_laneq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vmulq_laneq_f32::(a, b) } + } + #[doc = "See [`arch::vmul_lane_s16`]."] + #[inline(always)] + pub fn vmul_lane_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vmul_lane_s16::(a, b) } + } + #[doc = "See [`arch::vmulq_lane_s16`]."] + #[inline(always)] + pub fn vmulq_lane_s16(self, a: int16x8_t, b: int16x4_t) -> int16x8_t { + unsafe { vmulq_lane_s16::(a, b) } + } + #[doc = "See [`arch::vmul_lane_s32`]."] + #[inline(always)] + pub fn vmul_lane_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vmul_lane_s32::(a, b) } + } + #[doc = "See [`arch::vmulq_lane_s32`]."] + #[inline(always)] + pub fn vmulq_lane_s32(self, a: int32x4_t, b: int32x2_t) -> int32x4_t { + unsafe { vmulq_lane_s32::(a, b) } + } + #[doc = "See [`arch::vmul_lane_u16`]."] + #[inline(always)] + pub fn vmul_lane_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vmul_lane_u16::(a, b) } + } + #[doc = "See [`arch::vmulq_lane_u16`]."] + #[inline(always)] + pub fn vmulq_lane_u16(self, a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { + unsafe { vmulq_lane_u16::(a, b) } + } + #[doc = "See [`arch::vmul_lane_u32`]."] + #[inline(always)] + pub fn vmul_lane_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vmul_lane_u32::(a, b) } + } + #[doc = "See [`arch::vmulq_lane_u32`]."] + #[inline(always)] + pub fn vmulq_lane_u32(self, a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { + unsafe { vmulq_lane_u32::(a, b) } + } + #[doc = "See [`arch::vmul_laneq_s16`]."] + #[inline(always)] + pub fn vmul_laneq_s16(self, a: int16x4_t, b: int16x8_t) -> int16x4_t { + unsafe { vmul_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vmulq_laneq_s16`]."] + #[inline(always)] + pub fn vmulq_laneq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vmulq_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vmul_laneq_s32`]."] + #[inline(always)] + pub fn vmul_laneq_s32(self, a: int32x2_t, b: int32x4_t) -> int32x2_t { + unsafe { vmul_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vmulq_laneq_s32`]."] + #[inline(always)] + pub fn vmulq_laneq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vmulq_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vmul_laneq_u16`]."] + #[inline(always)] + pub fn vmul_laneq_u16(self, a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { + unsafe { vmul_laneq_u16::(a, b) } + } + #[doc = "See [`arch::vmulq_laneq_u16`]."] + #[inline(always)] + pub fn vmulq_laneq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vmulq_laneq_u16::(a, b) } + } + #[doc = "See [`arch::vmul_laneq_u32`]."] + #[inline(always)] + pub fn vmul_laneq_u32(self, a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { + unsafe { vmul_laneq_u32::(a, b) } + } + #[doc = "See [`arch::vmulq_laneq_u32`]."] + #[inline(always)] + pub fn vmulq_laneq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vmulq_laneq_u32::(a, b) } + } + #[doc = "See [`arch::vmul_n_f32`]."] + #[inline(always)] + pub fn vmul_n_f32(self, a: float32x2_t, b: f32) -> float32x2_t { + unsafe { vmul_n_f32(a, b) } + } + #[doc = "See [`arch::vmulq_n_f32`]."] + #[inline(always)] + pub fn vmulq_n_f32(self, a: float32x4_t, b: f32) -> float32x4_t { + unsafe { vmulq_n_f32(a, b) } + } + #[doc = "See [`arch::vmul_n_s16`]."] + #[inline(always)] + pub fn vmul_n_s16(self, a: int16x4_t, b: i16) -> int16x4_t { + unsafe { vmul_n_s16(a, b) } + } + #[doc = "See [`arch::vmulq_n_s16`]."] + #[inline(always)] + pub fn vmulq_n_s16(self, a: int16x8_t, b: i16) -> int16x8_t { + unsafe { vmulq_n_s16(a, b) } + } + #[doc = "See [`arch::vmul_n_s32`]."] + #[inline(always)] + pub fn vmul_n_s32(self, a: int32x2_t, b: i32) -> int32x2_t { + unsafe { vmul_n_s32(a, b) } + } + #[doc = "See [`arch::vmulq_n_s32`]."] + #[inline(always)] + pub fn vmulq_n_s32(self, a: int32x4_t, b: i32) -> int32x4_t { + unsafe { vmulq_n_s32(a, b) } + } + #[doc = "See [`arch::vmul_n_u16`]."] + #[inline(always)] + pub fn vmul_n_u16(self, a: uint16x4_t, b: u16) -> uint16x4_t { + unsafe { vmul_n_u16(a, b) } + } + #[doc = "See [`arch::vmulq_n_u16`]."] + #[inline(always)] + pub fn vmulq_n_u16(self, a: uint16x8_t, b: u16) -> uint16x8_t { + unsafe { vmulq_n_u16(a, b) } + } + #[doc = "See [`arch::vmul_n_u32`]."] + #[inline(always)] + pub fn vmul_n_u32(self, a: uint32x2_t, b: u32) -> uint32x2_t { + unsafe { vmul_n_u32(a, b) } + } + #[doc = "See [`arch::vmulq_n_u32`]."] + #[inline(always)] + pub fn vmulq_n_u32(self, a: uint32x4_t, b: u32) -> uint32x4_t { + unsafe { vmulq_n_u32(a, b) } + } + #[doc = "See [`arch::vmul_p8`]."] + #[inline(always)] + pub fn vmul_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + unsafe { vmul_p8(a, b) } + } + #[doc = "See [`arch::vmulq_p8`]."] + #[inline(always)] + pub fn vmulq_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + unsafe { vmulq_p8(a, b) } + } + #[doc = "See [`arch::vmul_s16`]."] + #[inline(always)] + pub fn vmul_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vmul_s16(a, b) } + } + #[doc = "See [`arch::vmulq_s16`]."] + #[inline(always)] + pub fn vmulq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vmulq_s16(a, b) } + } + #[doc = "See [`arch::vmul_u16`]."] + #[inline(always)] + pub fn vmul_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vmul_u16(a, b) } + } + #[doc = "See [`arch::vmulq_u16`]."] + #[inline(always)] + pub fn vmulq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vmulq_u16(a, b) } + } + #[doc = "See [`arch::vmul_s32`]."] + #[inline(always)] + pub fn vmul_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vmul_s32(a, b) } + } + #[doc = "See [`arch::vmulq_s32`]."] + #[inline(always)] + pub fn vmulq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vmulq_s32(a, b) } + } + #[doc = "See [`arch::vmul_u32`]."] + #[inline(always)] + pub fn vmul_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vmul_u32(a, b) } + } + #[doc = "See [`arch::vmulq_u32`]."] + #[inline(always)] + pub fn vmulq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vmulq_u32(a, b) } + } + #[doc = "See [`arch::vmul_s8`]."] + #[inline(always)] + pub fn vmul_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vmul_s8(a, b) } + } + #[doc = "See [`arch::vmulq_s8`]."] + #[inline(always)] + pub fn vmulq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vmulq_s8(a, b) } + } + #[doc = "See [`arch::vmul_u8`]."] + #[inline(always)] + pub fn vmul_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vmul_u8(a, b) } + } + #[doc = "See [`arch::vmulq_u8`]."] + #[inline(always)] + pub fn vmulq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vmulq_u8(a, b) } + } + #[doc = "See [`arch::vmull_lane_s16`]."] + #[inline(always)] + pub fn vmull_lane_s16(self, a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vmull_lane_s16::(a, b) } + } + #[doc = "See [`arch::vmull_laneq_s16`]."] + #[inline(always)] + pub fn vmull_laneq_s16(self, a: int16x4_t, b: int16x8_t) -> int32x4_t { + unsafe { vmull_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vmull_lane_s32`]."] + #[inline(always)] + pub fn vmull_lane_s32(self, a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vmull_lane_s32::(a, b) } + } + #[doc = "See [`arch::vmull_laneq_s32`]."] + #[inline(always)] + pub fn vmull_laneq_s32(self, a: int32x2_t, b: int32x4_t) -> int64x2_t { + unsafe { vmull_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vmull_lane_u16`]."] + #[inline(always)] + pub fn vmull_lane_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vmull_lane_u16::(a, b) } + } + #[doc = "See [`arch::vmull_laneq_u16`]."] + #[inline(always)] + pub fn vmull_laneq_u16(self, a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vmull_laneq_u16::(a, b) } + } + #[doc = "See [`arch::vmull_lane_u32`]."] + #[inline(always)] + pub fn vmull_lane_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vmull_lane_u32::(a, b) } + } + #[doc = "See [`arch::vmull_laneq_u32`]."] + #[inline(always)] + pub fn vmull_laneq_u32(self, a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vmull_laneq_u32::(a, b) } + } + #[doc = "See [`arch::vmull_n_s16`]."] + #[inline(always)] + pub fn vmull_n_s16(self, a: int16x4_t, b: i16) -> int32x4_t { + unsafe { vmull_n_s16(a, b) } + } + #[doc = "See [`arch::vmull_n_s32`]."] + #[inline(always)] + pub fn vmull_n_s32(self, a: int32x2_t, b: i32) -> int64x2_t { + unsafe { vmull_n_s32(a, b) } + } + #[doc = "See [`arch::vmull_n_u16`]."] + #[inline(always)] + pub fn vmull_n_u16(self, a: uint16x4_t, b: u16) -> uint32x4_t { + unsafe { vmull_n_u16(a, b) } + } + #[doc = "See [`arch::vmull_n_u32`]."] + #[inline(always)] + pub fn vmull_n_u32(self, a: uint32x2_t, b: u32) -> uint64x2_t { + unsafe { vmull_n_u32(a, b) } + } + #[doc = "See [`arch::vmull_p8`]."] + #[inline(always)] + pub fn vmull_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { + unsafe { vmull_p8(a, b) } + } + #[doc = "See [`arch::vmull_s16`]."] + #[inline(always)] + pub fn vmull_s16(self, a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vmull_s16(a, b) } + } + #[doc = "See [`arch::vmull_s32`]."] + #[inline(always)] + pub fn vmull_s32(self, a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vmull_s32(a, b) } + } + #[doc = "See [`arch::vmull_s8`]."] + #[inline(always)] + pub fn vmull_s8(self, a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe { vmull_s8(a, b) } + } + #[doc = "See [`arch::vmull_u8`]."] + #[inline(always)] + pub fn vmull_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { vmull_u8(a, b) } + } + #[doc = "See [`arch::vmull_u16`]."] + #[inline(always)] + pub fn vmull_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vmull_u16(a, b) } + } + #[doc = "See [`arch::vmull_u32`]."] + #[inline(always)] + pub fn vmull_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vmull_u32(a, b) } + } + #[doc = "See [`arch::vmvn_p8`]."] + #[inline(always)] + pub fn vmvn_p8(self, a: poly8x8_t) -> poly8x8_t { + unsafe { vmvn_p8(a) } + } + #[doc = "See [`arch::vmvn_s16`]."] + #[inline(always)] + pub fn vmvn_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vmvn_s16(a) } + } + #[doc = "See [`arch::vmvn_s32`]."] + #[inline(always)] + pub fn vmvn_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vmvn_s32(a) } + } + #[doc = "See [`arch::vmvn_s8`]."] + #[inline(always)] + pub fn vmvn_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vmvn_s8(a) } + } + #[doc = "See [`arch::vmvn_u16`]."] + #[inline(always)] + pub fn vmvn_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vmvn_u16(a) } + } + #[doc = "See [`arch::vmvn_u32`]."] + #[inline(always)] + pub fn vmvn_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vmvn_u32(a) } + } + #[doc = "See [`arch::vmvn_u8`]."] + #[inline(always)] + pub fn vmvn_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vmvn_u8(a) } + } + #[doc = "See [`arch::vmvnq_p8`]."] + #[inline(always)] + pub fn vmvnq_p8(self, a: poly8x16_t) -> poly8x16_t { + unsafe { vmvnq_p8(a) } + } + #[doc = "See [`arch::vmvnq_s16`]."] + #[inline(always)] + pub fn vmvnq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vmvnq_s16(a) } + } + #[doc = "See [`arch::vmvnq_s32`]."] + #[inline(always)] + pub fn vmvnq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vmvnq_s32(a) } + } + #[doc = "See [`arch::vmvnq_s8`]."] + #[inline(always)] + pub fn vmvnq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vmvnq_s8(a) } + } + #[doc = "See [`arch::vmvnq_u16`]."] + #[inline(always)] + pub fn vmvnq_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vmvnq_u16(a) } + } + #[doc = "See [`arch::vmvnq_u32`]."] + #[inline(always)] + pub fn vmvnq_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vmvnq_u32(a) } + } + #[doc = "See [`arch::vmvnq_u8`]."] + #[inline(always)] + pub fn vmvnq_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vmvnq_u8(a) } + } + #[doc = "See [`arch::vneg_f32`]."] + #[inline(always)] + pub fn vneg_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vneg_f32(a) } + } + #[doc = "See [`arch::vnegq_f32`]."] + #[inline(always)] + pub fn vnegq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vnegq_f32(a) } + } + #[doc = "See [`arch::vneg_s8`]."] + #[inline(always)] + pub fn vneg_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vneg_s8(a) } + } + #[doc = "See [`arch::vnegq_s8`]."] + #[inline(always)] + pub fn vnegq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vnegq_s8(a) } + } + #[doc = "See [`arch::vneg_s16`]."] + #[inline(always)] + pub fn vneg_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vneg_s16(a) } + } + #[doc = "See [`arch::vnegq_s16`]."] + #[inline(always)] + pub fn vnegq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vnegq_s16(a) } + } + #[doc = "See [`arch::vneg_s32`]."] + #[inline(always)] + pub fn vneg_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vneg_s32(a) } + } + #[doc = "See [`arch::vnegq_s32`]."] + #[inline(always)] + pub fn vnegq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vnegq_s32(a) } + } + #[doc = "See [`arch::vorn_s16`]."] + #[inline(always)] + pub fn vorn_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vorn_s16(a, b) } + } + #[doc = "See [`arch::vorn_s32`]."] + #[inline(always)] + pub fn vorn_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vorn_s32(a, b) } + } + #[doc = "See [`arch::vorn_s64`]."] + #[inline(always)] + pub fn vorn_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vorn_s64(a, b) } + } + #[doc = "See [`arch::vorn_s8`]."] + #[inline(always)] + pub fn vorn_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vorn_s8(a, b) } + } + #[doc = "See [`arch::vornq_s16`]."] + #[inline(always)] + pub fn vornq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vornq_s16(a, b) } + } + #[doc = "See [`arch::vornq_s32`]."] + #[inline(always)] + pub fn vornq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vornq_s32(a, b) } + } + #[doc = "See [`arch::vornq_s64`]."] + #[inline(always)] + pub fn vornq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vornq_s64(a, b) } + } + #[doc = "See [`arch::vornq_s8`]."] + #[inline(always)] + pub fn vornq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vornq_s8(a, b) } + } + #[doc = "See [`arch::vorn_u16`]."] + #[inline(always)] + pub fn vorn_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vorn_u16(a, b) } + } + #[doc = "See [`arch::vorn_u32`]."] + #[inline(always)] + pub fn vorn_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vorn_u32(a, b) } + } + #[doc = "See [`arch::vorn_u64`]."] + #[inline(always)] + pub fn vorn_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vorn_u64(a, b) } + } + #[doc = "See [`arch::vorn_u8`]."] + #[inline(always)] + pub fn vorn_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vorn_u8(a, b) } + } + #[doc = "See [`arch::vornq_u16`]."] + #[inline(always)] + pub fn vornq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vornq_u16(a, b) } + } + #[doc = "See [`arch::vornq_u32`]."] + #[inline(always)] + pub fn vornq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vornq_u32(a, b) } + } + #[doc = "See [`arch::vornq_u64`]."] + #[inline(always)] + pub fn vornq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vornq_u64(a, b) } + } + #[doc = "See [`arch::vornq_u8`]."] + #[inline(always)] + pub fn vornq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vornq_u8(a, b) } + } + #[doc = "See [`arch::vorr_s8`]."] + #[inline(always)] + pub fn vorr_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vorr_s8(a, b) } + } + #[doc = "See [`arch::vorrq_s8`]."] + #[inline(always)] + pub fn vorrq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vorrq_s8(a, b) } + } + #[doc = "See [`arch::vorr_s16`]."] + #[inline(always)] + pub fn vorr_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vorr_s16(a, b) } + } + #[doc = "See [`arch::vorrq_s16`]."] + #[inline(always)] + pub fn vorrq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vorrq_s16(a, b) } + } + #[doc = "See [`arch::vorr_s32`]."] + #[inline(always)] + pub fn vorr_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vorr_s32(a, b) } + } + #[doc = "See [`arch::vorrq_s32`]."] + #[inline(always)] + pub fn vorrq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vorrq_s32(a, b) } + } + #[doc = "See [`arch::vorr_s64`]."] + #[inline(always)] + pub fn vorr_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vorr_s64(a, b) } + } + #[doc = "See [`arch::vorrq_s64`]."] + #[inline(always)] + pub fn vorrq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vorrq_s64(a, b) } + } + #[doc = "See [`arch::vorr_u8`]."] + #[inline(always)] + pub fn vorr_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vorr_u8(a, b) } + } + #[doc = "See [`arch::vorrq_u8`]."] + #[inline(always)] + pub fn vorrq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vorrq_u8(a, b) } + } + #[doc = "See [`arch::vorr_u16`]."] + #[inline(always)] + pub fn vorr_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vorr_u16(a, b) } + } + #[doc = "See [`arch::vorrq_u16`]."] + #[inline(always)] + pub fn vorrq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vorrq_u16(a, b) } + } + #[doc = "See [`arch::vorr_u32`]."] + #[inline(always)] + pub fn vorr_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vorr_u32(a, b) } + } + #[doc = "See [`arch::vorrq_u32`]."] + #[inline(always)] + pub fn vorrq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vorrq_u32(a, b) } + } + #[doc = "See [`arch::vorr_u64`]."] + #[inline(always)] + pub fn vorr_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vorr_u64(a, b) } + } + #[doc = "See [`arch::vorrq_u64`]."] + #[inline(always)] + pub fn vorrq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vorrq_u64(a, b) } + } + #[doc = "See [`arch::vpadal_s8`]."] + #[inline(always)] + pub fn vpadal_s8(self, a: int16x4_t, b: int8x8_t) -> int16x4_t { + unsafe { vpadal_s8(a, b) } + } + #[doc = "See [`arch::vpadalq_s8`]."] + #[inline(always)] + pub fn vpadalq_s8(self, a: int16x8_t, b: int8x16_t) -> int16x8_t { + unsafe { vpadalq_s8(a, b) } + } + #[doc = "See [`arch::vpadal_s16`]."] + #[inline(always)] + pub fn vpadal_s16(self, a: int32x2_t, b: int16x4_t) -> int32x2_t { + unsafe { vpadal_s16(a, b) } + } + #[doc = "See [`arch::vpadalq_s16`]."] + #[inline(always)] + pub fn vpadalq_s16(self, a: int32x4_t, b: int16x8_t) -> int32x4_t { + unsafe { vpadalq_s16(a, b) } + } + #[doc = "See [`arch::vpadal_s32`]."] + #[inline(always)] + pub fn vpadal_s32(self, a: int64x1_t, b: int32x2_t) -> int64x1_t { + unsafe { vpadal_s32(a, b) } + } + #[doc = "See [`arch::vpadalq_s32`]."] + #[inline(always)] + pub fn vpadalq_s32(self, a: int64x2_t, b: int32x4_t) -> int64x2_t { + unsafe { vpadalq_s32(a, b) } + } + #[doc = "See [`arch::vpadal_u8`]."] + #[inline(always)] + pub fn vpadal_u8(self, a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { + unsafe { vpadal_u8(a, b) } + } + #[doc = "See [`arch::vpadalq_u8`]."] + #[inline(always)] + pub fn vpadalq_u8(self, a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { + unsafe { vpadalq_u8(a, b) } + } + #[doc = "See [`arch::vpadal_u16`]."] + #[inline(always)] + pub fn vpadal_u16(self, a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { + unsafe { vpadal_u16(a, b) } + } + #[doc = "See [`arch::vpadalq_u16`]."] + #[inline(always)] + pub fn vpadalq_u16(self, a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { + unsafe { vpadalq_u16(a, b) } + } + #[doc = "See [`arch::vpadal_u32`]."] + #[inline(always)] + pub fn vpadal_u32(self, a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { + unsafe { vpadal_u32(a, b) } + } + #[doc = "See [`arch::vpadalq_u32`]."] + #[inline(always)] + pub fn vpadalq_u32(self, a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { + unsafe { vpadalq_u32(a, b) } + } + #[doc = "See [`arch::vpadd_f32`]."] + #[inline(always)] + pub fn vpadd_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vpadd_f32(a, b) } + } + #[doc = "See [`arch::vpadd_s8`]."] + #[inline(always)] + pub fn vpadd_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vpadd_s8(a, b) } + } + #[doc = "See [`arch::vpadd_s16`]."] + #[inline(always)] + pub fn vpadd_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vpadd_s16(a, b) } + } + #[doc = "See [`arch::vpadd_s32`]."] + #[inline(always)] + pub fn vpadd_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vpadd_s32(a, b) } + } + #[doc = "See [`arch::vpadd_u8`]."] + #[inline(always)] + pub fn vpadd_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vpadd_u8(a, b) } + } + #[doc = "See [`arch::vpadd_u16`]."] + #[inline(always)] + pub fn vpadd_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vpadd_u16(a, b) } + } + #[doc = "See [`arch::vpadd_u32`]."] + #[inline(always)] + pub fn vpadd_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vpadd_u32(a, b) } + } + #[doc = "See [`arch::vpaddl_s8`]."] + #[inline(always)] + pub fn vpaddl_s8(self, a: int8x8_t) -> int16x4_t { + unsafe { vpaddl_s8(a) } + } + #[doc = "See [`arch::vpaddlq_s8`]."] + #[inline(always)] + pub fn vpaddlq_s8(self, a: int8x16_t) -> int16x8_t { + unsafe { vpaddlq_s8(a) } + } + #[doc = "See [`arch::vpaddl_s16`]."] + #[inline(always)] + pub fn vpaddl_s16(self, a: int16x4_t) -> int32x2_t { + unsafe { vpaddl_s16(a) } + } + #[doc = "See [`arch::vpaddlq_s16`]."] + #[inline(always)] + pub fn vpaddlq_s16(self, a: int16x8_t) -> int32x4_t { + unsafe { vpaddlq_s16(a) } + } + #[doc = "See [`arch::vpaddl_s32`]."] + #[inline(always)] + pub fn vpaddl_s32(self, a: int32x2_t) -> int64x1_t { + unsafe { vpaddl_s32(a) } + } + #[doc = "See [`arch::vpaddlq_s32`]."] + #[inline(always)] + pub fn vpaddlq_s32(self, a: int32x4_t) -> int64x2_t { + unsafe { vpaddlq_s32(a) } + } + #[doc = "See [`arch::vpaddl_u8`]."] + #[inline(always)] + pub fn vpaddl_u8(self, a: uint8x8_t) -> uint16x4_t { + unsafe { vpaddl_u8(a) } + } + #[doc = "See [`arch::vpaddlq_u8`]."] + #[inline(always)] + pub fn vpaddlq_u8(self, a: uint8x16_t) -> uint16x8_t { + unsafe { vpaddlq_u8(a) } + } + #[doc = "See [`arch::vpaddl_u16`]."] + #[inline(always)] + pub fn vpaddl_u16(self, a: uint16x4_t) -> uint32x2_t { + unsafe { vpaddl_u16(a) } + } + #[doc = "See [`arch::vpaddlq_u16`]."] + #[inline(always)] + pub fn vpaddlq_u16(self, a: uint16x8_t) -> uint32x4_t { + unsafe { vpaddlq_u16(a) } + } + #[doc = "See [`arch::vpaddl_u32`]."] + #[inline(always)] + pub fn vpaddl_u32(self, a: uint32x2_t) -> uint64x1_t { + unsafe { vpaddl_u32(a) } + } + #[doc = "See [`arch::vpaddlq_u32`]."] + #[inline(always)] + pub fn vpaddlq_u32(self, a: uint32x4_t) -> uint64x2_t { + unsafe { vpaddlq_u32(a) } + } + #[doc = "See [`arch::vpmax_f32`]."] + #[inline(always)] + pub fn vpmax_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vpmax_f32(a, b) } + } + #[doc = "See [`arch::vpmax_s8`]."] + #[inline(always)] + pub fn vpmax_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vpmax_s8(a, b) } + } + #[doc = "See [`arch::vpmax_s16`]."] + #[inline(always)] + pub fn vpmax_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vpmax_s16(a, b) } + } + #[doc = "See [`arch::vpmax_s32`]."] + #[inline(always)] + pub fn vpmax_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vpmax_s32(a, b) } + } + #[doc = "See [`arch::vpmax_u8`]."] + #[inline(always)] + pub fn vpmax_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vpmax_u8(a, b) } + } + #[doc = "See [`arch::vpmax_u16`]."] + #[inline(always)] + pub fn vpmax_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vpmax_u16(a, b) } + } + #[doc = "See [`arch::vpmax_u32`]."] + #[inline(always)] + pub fn vpmax_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vpmax_u32(a, b) } + } + #[doc = "See [`arch::vpmin_f32`]."] + #[inline(always)] + pub fn vpmin_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vpmin_f32(a, b) } + } + #[doc = "See [`arch::vpmin_s8`]."] + #[inline(always)] + pub fn vpmin_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vpmin_s8(a, b) } + } + #[doc = "See [`arch::vpmin_s16`]."] + #[inline(always)] + pub fn vpmin_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vpmin_s16(a, b) } + } + #[doc = "See [`arch::vpmin_s32`]."] + #[inline(always)] + pub fn vpmin_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vpmin_s32(a, b) } + } + #[doc = "See [`arch::vpmin_u8`]."] + #[inline(always)] + pub fn vpmin_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vpmin_u8(a, b) } + } + #[doc = "See [`arch::vpmin_u16`]."] + #[inline(always)] + pub fn vpmin_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vpmin_u16(a, b) } + } + #[doc = "See [`arch::vpmin_u32`]."] + #[inline(always)] + pub fn vpmin_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vpmin_u32(a, b) } + } + #[doc = "See [`arch::vqabs_s8`]."] + #[inline(always)] + pub fn vqabs_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vqabs_s8(a) } + } + #[doc = "See [`arch::vqabsq_s8`]."] + #[inline(always)] + pub fn vqabsq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vqabsq_s8(a) } + } + #[doc = "See [`arch::vqabs_s16`]."] + #[inline(always)] + pub fn vqabs_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vqabs_s16(a) } + } + #[doc = "See [`arch::vqabsq_s16`]."] + #[inline(always)] + pub fn vqabsq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vqabsq_s16(a) } + } + #[doc = "See [`arch::vqabs_s32`]."] + #[inline(always)] + pub fn vqabs_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vqabs_s32(a) } + } + #[doc = "See [`arch::vqabsq_s32`]."] + #[inline(always)] + pub fn vqabsq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vqabsq_s32(a) } + } + #[doc = "See [`arch::vqadd_s8`]."] + #[inline(always)] + pub fn vqadd_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vqadd_s8(a, b) } + } + #[doc = "See [`arch::vqaddq_s8`]."] + #[inline(always)] + pub fn vqaddq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vqaddq_s8(a, b) } + } + #[doc = "See [`arch::vqadd_s16`]."] + #[inline(always)] + pub fn vqadd_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqadd_s16(a, b) } + } + #[doc = "See [`arch::vqaddq_s16`]."] + #[inline(always)] + pub fn vqaddq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqaddq_s16(a, b) } + } + #[doc = "See [`arch::vqadd_s32`]."] + #[inline(always)] + pub fn vqadd_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqadd_s32(a, b) } + } + #[doc = "See [`arch::vqaddq_s32`]."] + #[inline(always)] + pub fn vqaddq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqaddq_s32(a, b) } + } + #[doc = "See [`arch::vqadd_s64`]."] + #[inline(always)] + pub fn vqadd_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vqadd_s64(a, b) } + } + #[doc = "See [`arch::vqaddq_s64`]."] + #[inline(always)] + pub fn vqaddq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vqaddq_s64(a, b) } + } + #[doc = "See [`arch::vqadd_u8`]."] + #[inline(always)] + pub fn vqadd_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vqadd_u8(a, b) } + } + #[doc = "See [`arch::vqaddq_u8`]."] + #[inline(always)] + pub fn vqaddq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vqaddq_u8(a, b) } + } + #[doc = "See [`arch::vqadd_u16`]."] + #[inline(always)] + pub fn vqadd_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vqadd_u16(a, b) } + } + #[doc = "See [`arch::vqaddq_u16`]."] + #[inline(always)] + pub fn vqaddq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vqaddq_u16(a, b) } + } + #[doc = "See [`arch::vqadd_u32`]."] + #[inline(always)] + pub fn vqadd_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vqadd_u32(a, b) } + } + #[doc = "See [`arch::vqaddq_u32`]."] + #[inline(always)] + pub fn vqaddq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vqaddq_u32(a, b) } + } + #[doc = "See [`arch::vqadd_u64`]."] + #[inline(always)] + pub fn vqadd_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vqadd_u64(a, b) } + } + #[doc = "See [`arch::vqaddq_u64`]."] + #[inline(always)] + pub fn vqaddq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vqaddq_u64(a, b) } + } + #[doc = "See [`arch::vqdmlal_lane_s16`]."] + #[inline(always)] + pub fn vqdmlal_lane_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vqdmlal_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_lane_s32`]."] + #[inline(always)] + pub fn vqdmlal_lane_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vqdmlal_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_n_s16`]."] + #[inline(always)] + pub fn vqdmlal_n_s16(self, a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + unsafe { vqdmlal_n_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_n_s32`]."] + #[inline(always)] + pub fn vqdmlal_n_s32(self, a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + unsafe { vqdmlal_n_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_s16`]."] + #[inline(always)] + pub fn vqdmlal_s16(self, a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { vqdmlal_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlal_s32`]."] + #[inline(always)] + pub fn vqdmlal_s32(self, a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { vqdmlal_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_lane_s16`]."] + #[inline(always)] + pub fn vqdmlsl_lane_s16( + self, + a: int32x4_t, + b: int16x4_t, + c: int16x4_t, + ) -> int32x4_t { + unsafe { vqdmlsl_lane_s16::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_lane_s32`]."] + #[inline(always)] + pub fn vqdmlsl_lane_s32( + self, + a: int64x2_t, + b: int32x2_t, + c: int32x2_t, + ) -> int64x2_t { + unsafe { vqdmlsl_lane_s32::(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_n_s16`]."] + #[inline(always)] + pub fn vqdmlsl_n_s16(self, a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { + unsafe { vqdmlsl_n_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_n_s32`]."] + #[inline(always)] + pub fn vqdmlsl_n_s32(self, a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { + unsafe { vqdmlsl_n_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_s16`]."] + #[inline(always)] + pub fn vqdmlsl_s16(self, a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { + unsafe { vqdmlsl_s16(a, b, c) } + } + #[doc = "See [`arch::vqdmlsl_s32`]."] + #[inline(always)] + pub fn vqdmlsl_s32(self, a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { + unsafe { vqdmlsl_s32(a, b, c) } + } + #[doc = "See [`arch::vqdmulh_laneq_s16`]."] + #[inline(always)] + pub fn vqdmulh_laneq_s16(self, a: int16x4_t, b: int16x8_t) -> int16x4_t { + unsafe { vqdmulh_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqdmulhq_laneq_s16`]."] + #[inline(always)] + pub fn vqdmulhq_laneq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqdmulhq_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqdmulh_laneq_s32`]."] + #[inline(always)] + pub fn vqdmulh_laneq_s32(self, a: int32x2_t, b: int32x4_t) -> int32x2_t { + unsafe { vqdmulh_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqdmulhq_laneq_s32`]."] + #[inline(always)] + pub fn vqdmulhq_laneq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqdmulhq_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqdmulh_n_s16`]."] + #[inline(always)] + pub fn vqdmulh_n_s16(self, a: int16x4_t, b: i16) -> int16x4_t { + unsafe { vqdmulh_n_s16(a, b) } + } + #[doc = "See [`arch::vqdmulhq_n_s16`]."] + #[inline(always)] + pub fn vqdmulhq_n_s16(self, a: int16x8_t, b: i16) -> int16x8_t { + unsafe { vqdmulhq_n_s16(a, b) } + } + #[doc = "See [`arch::vqdmulh_n_s32`]."] + #[inline(always)] + pub fn vqdmulh_n_s32(self, a: int32x2_t, b: i32) -> int32x2_t { + unsafe { vqdmulh_n_s32(a, b) } + } + #[doc = "See [`arch::vqdmulhq_n_s32`]."] + #[inline(always)] + pub fn vqdmulhq_n_s32(self, a: int32x4_t, b: i32) -> int32x4_t { + unsafe { vqdmulhq_n_s32(a, b) } + } + #[doc = "See [`arch::vqdmulh_s16`]."] + #[inline(always)] + pub fn vqdmulh_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqdmulh_s16(a, b) } + } + #[doc = "See [`arch::vqdmulhq_s16`]."] + #[inline(always)] + pub fn vqdmulhq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqdmulhq_s16(a, b) } + } + #[doc = "See [`arch::vqdmulh_s32`]."] + #[inline(always)] + pub fn vqdmulh_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqdmulh_s32(a, b) } + } + #[doc = "See [`arch::vqdmulhq_s32`]."] + #[inline(always)] + pub fn vqdmulhq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqdmulhq_s32(a, b) } + } + #[doc = "See [`arch::vqdmull_lane_s16`]."] + #[inline(always)] + pub fn vqdmull_lane_s16(self, a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vqdmull_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqdmull_lane_s32`]."] + #[inline(always)] + pub fn vqdmull_lane_s32(self, a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vqdmull_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqdmull_n_s16`]."] + #[inline(always)] + pub fn vqdmull_n_s16(self, a: int16x4_t, b: i16) -> int32x4_t { + unsafe { vqdmull_n_s16(a, b) } + } + #[doc = "See [`arch::vqdmull_n_s32`]."] + #[inline(always)] + pub fn vqdmull_n_s32(self, a: int32x2_t, b: i32) -> int64x2_t { + unsafe { vqdmull_n_s32(a, b) } + } + #[doc = "See [`arch::vqdmull_s16`]."] + #[inline(always)] + pub fn vqdmull_s16(self, a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vqdmull_s16(a, b) } + } + #[doc = "See [`arch::vqdmull_s32`]."] + #[inline(always)] + pub fn vqdmull_s32(self, a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vqdmull_s32(a, b) } + } + #[doc = "See [`arch::vqmovn_s16`]."] + #[inline(always)] + pub fn vqmovn_s16(self, a: int16x8_t) -> int8x8_t { + unsafe { vqmovn_s16(a) } + } + #[doc = "See [`arch::vqmovn_s32`]."] + #[inline(always)] + pub fn vqmovn_s32(self, a: int32x4_t) -> int16x4_t { + unsafe { vqmovn_s32(a) } + } + #[doc = "See [`arch::vqmovn_s64`]."] + #[inline(always)] + pub fn vqmovn_s64(self, a: int64x2_t) -> int32x2_t { + unsafe { vqmovn_s64(a) } + } + #[doc = "See [`arch::vqmovn_u16`]."] + #[inline(always)] + pub fn vqmovn_u16(self, a: uint16x8_t) -> uint8x8_t { + unsafe { vqmovn_u16(a) } + } + #[doc = "See [`arch::vqmovn_u32`]."] + #[inline(always)] + pub fn vqmovn_u32(self, a: uint32x4_t) -> uint16x4_t { + unsafe { vqmovn_u32(a) } + } + #[doc = "See [`arch::vqmovn_u64`]."] + #[inline(always)] + pub fn vqmovn_u64(self, a: uint64x2_t) -> uint32x2_t { + unsafe { vqmovn_u64(a) } + } + #[doc = "See [`arch::vqmovun_s16`]."] + #[inline(always)] + pub fn vqmovun_s16(self, a: int16x8_t) -> uint8x8_t { + unsafe { vqmovun_s16(a) } + } + #[doc = "See [`arch::vqmovun_s32`]."] + #[inline(always)] + pub fn vqmovun_s32(self, a: int32x4_t) -> uint16x4_t { + unsafe { vqmovun_s32(a) } + } + #[doc = "See [`arch::vqmovun_s64`]."] + #[inline(always)] + pub fn vqmovun_s64(self, a: int64x2_t) -> uint32x2_t { + unsafe { vqmovun_s64(a) } + } + #[doc = "See [`arch::vqneg_s8`]."] + #[inline(always)] + pub fn vqneg_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vqneg_s8(a) } + } + #[doc = "See [`arch::vqnegq_s8`]."] + #[inline(always)] + pub fn vqnegq_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vqnegq_s8(a) } + } + #[doc = "See [`arch::vqneg_s16`]."] + #[inline(always)] + pub fn vqneg_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vqneg_s16(a) } + } + #[doc = "See [`arch::vqnegq_s16`]."] + #[inline(always)] + pub fn vqnegq_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vqnegq_s16(a) } + } + #[doc = "See [`arch::vqneg_s32`]."] + #[inline(always)] + pub fn vqneg_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vqneg_s32(a) } + } + #[doc = "See [`arch::vqnegq_s32`]."] + #[inline(always)] + pub fn vqnegq_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vqnegq_s32(a) } + } + #[doc = "See [`arch::vqrdmulh_lane_s16`]."] + #[inline(always)] + pub fn vqrdmulh_lane_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqrdmulh_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqrdmulh_lane_s32`]."] + #[inline(always)] + pub fn vqrdmulh_lane_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqrdmulh_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqrdmulh_laneq_s16`]."] + #[inline(always)] + pub fn vqrdmulh_laneq_s16(self, a: int16x4_t, b: int16x8_t) -> int16x4_t { + unsafe { vqrdmulh_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqrdmulh_laneq_s32`]."] + #[inline(always)] + pub fn vqrdmulh_laneq_s32(self, a: int32x2_t, b: int32x4_t) -> int32x2_t { + unsafe { vqrdmulh_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_lane_s16`]."] + #[inline(always)] + pub fn vqrdmulhq_lane_s16(self, a: int16x8_t, b: int16x4_t) -> int16x8_t { + unsafe { vqrdmulhq_lane_s16::(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_lane_s32`]."] + #[inline(always)] + pub fn vqrdmulhq_lane_s32(self, a: int32x4_t, b: int32x2_t) -> int32x4_t { + unsafe { vqrdmulhq_lane_s32::(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_laneq_s16`]."] + #[inline(always)] + pub fn vqrdmulhq_laneq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqrdmulhq_laneq_s16::(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_laneq_s32`]."] + #[inline(always)] + pub fn vqrdmulhq_laneq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqrdmulhq_laneq_s32::(a, b) } + } + #[doc = "See [`arch::vqrdmulh_n_s16`]."] + #[inline(always)] + pub fn vqrdmulh_n_s16(self, a: int16x4_t, b: i16) -> int16x4_t { + unsafe { vqrdmulh_n_s16(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_n_s16`]."] + #[inline(always)] + pub fn vqrdmulhq_n_s16(self, a: int16x8_t, b: i16) -> int16x8_t { + unsafe { vqrdmulhq_n_s16(a, b) } + } + #[doc = "See [`arch::vqrdmulh_n_s32`]."] + #[inline(always)] + pub fn vqrdmulh_n_s32(self, a: int32x2_t, b: i32) -> int32x2_t { + unsafe { vqrdmulh_n_s32(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_n_s32`]."] + #[inline(always)] + pub fn vqrdmulhq_n_s32(self, a: int32x4_t, b: i32) -> int32x4_t { + unsafe { vqrdmulhq_n_s32(a, b) } + } + #[doc = "See [`arch::vqrdmulh_s16`]."] + #[inline(always)] + pub fn vqrdmulh_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqrdmulh_s16(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_s16`]."] + #[inline(always)] + pub fn vqrdmulhq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqrdmulhq_s16(a, b) } + } + #[doc = "See [`arch::vqrdmulh_s32`]."] + #[inline(always)] + pub fn vqrdmulh_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqrdmulh_s32(a, b) } + } + #[doc = "See [`arch::vqrdmulhq_s32`]."] + #[inline(always)] + pub fn vqrdmulhq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqrdmulhq_s32(a, b) } + } + #[doc = "See [`arch::vqrshl_s8`]."] + #[inline(always)] + pub fn vqrshl_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vqrshl_s8(a, b) } + } + #[doc = "See [`arch::vqrshlq_s8`]."] + #[inline(always)] + pub fn vqrshlq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vqrshlq_s8(a, b) } + } + #[doc = "See [`arch::vqrshl_s16`]."] + #[inline(always)] + pub fn vqrshl_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqrshl_s16(a, b) } + } + #[doc = "See [`arch::vqrshlq_s16`]."] + #[inline(always)] + pub fn vqrshlq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqrshlq_s16(a, b) } + } + #[doc = "See [`arch::vqrshl_s32`]."] + #[inline(always)] + pub fn vqrshl_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqrshl_s32(a, b) } + } + #[doc = "See [`arch::vqrshlq_s32`]."] + #[inline(always)] + pub fn vqrshlq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqrshlq_s32(a, b) } + } + #[doc = "See [`arch::vqrshl_s64`]."] + #[inline(always)] + pub fn vqrshl_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vqrshl_s64(a, b) } + } + #[doc = "See [`arch::vqrshlq_s64`]."] + #[inline(always)] + pub fn vqrshlq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vqrshlq_s64(a, b) } + } + #[doc = "See [`arch::vqrshl_u8`]."] + #[inline(always)] + pub fn vqrshl_u8(self, a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vqrshl_u8(a, b) } + } + #[doc = "See [`arch::vqrshlq_u8`]."] + #[inline(always)] + pub fn vqrshlq_u8(self, a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vqrshlq_u8(a, b) } + } + #[doc = "See [`arch::vqrshl_u16`]."] + #[inline(always)] + pub fn vqrshl_u16(self, a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vqrshl_u16(a, b) } + } + #[doc = "See [`arch::vqrshlq_u16`]."] + #[inline(always)] + pub fn vqrshlq_u16(self, a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vqrshlq_u16(a, b) } + } + #[doc = "See [`arch::vqrshl_u32`]."] + #[inline(always)] + pub fn vqrshl_u32(self, a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vqrshl_u32(a, b) } + } + #[doc = "See [`arch::vqrshlq_u32`]."] + #[inline(always)] + pub fn vqrshlq_u32(self, a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vqrshlq_u32(a, b) } + } + #[doc = "See [`arch::vqrshl_u64`]."] + #[inline(always)] + pub fn vqrshl_u64(self, a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vqrshl_u64(a, b) } + } + #[doc = "See [`arch::vqrshlq_u64`]."] + #[inline(always)] + pub fn vqrshlq_u64(self, a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vqrshlq_u64(a, b) } + } + #[doc = "See [`arch::vqshl_n_s8`]."] + #[inline(always)] + pub fn vqshl_n_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vqshl_n_s8::(a) } + } + #[doc = "See [`arch::vqshlq_n_s8`]."] + #[inline(always)] + pub fn vqshlq_n_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vqshlq_n_s8::(a) } + } + #[doc = "See [`arch::vqshl_n_s16`]."] + #[inline(always)] + pub fn vqshl_n_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vqshl_n_s16::(a) } + } + #[doc = "See [`arch::vqshlq_n_s16`]."] + #[inline(always)] + pub fn vqshlq_n_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vqshlq_n_s16::(a) } + } + #[doc = "See [`arch::vqshl_n_s32`]."] + #[inline(always)] + pub fn vqshl_n_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vqshl_n_s32::(a) } + } + #[doc = "See [`arch::vqshlq_n_s32`]."] + #[inline(always)] + pub fn vqshlq_n_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vqshlq_n_s32::(a) } + } + #[doc = "See [`arch::vqshl_n_s64`]."] + #[inline(always)] + pub fn vqshl_n_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vqshl_n_s64::(a) } + } + #[doc = "See [`arch::vqshlq_n_s64`]."] + #[inline(always)] + pub fn vqshlq_n_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vqshlq_n_s64::(a) } + } + #[doc = "See [`arch::vqshl_n_u8`]."] + #[inline(always)] + pub fn vqshl_n_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vqshl_n_u8::(a) } + } + #[doc = "See [`arch::vqshlq_n_u8`]."] + #[inline(always)] + pub fn vqshlq_n_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vqshlq_n_u8::(a) } + } + #[doc = "See [`arch::vqshl_n_u16`]."] + #[inline(always)] + pub fn vqshl_n_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vqshl_n_u16::(a) } + } + #[doc = "See [`arch::vqshlq_n_u16`]."] + #[inline(always)] + pub fn vqshlq_n_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vqshlq_n_u16::(a) } + } + #[doc = "See [`arch::vqshl_n_u32`]."] + #[inline(always)] + pub fn vqshl_n_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vqshl_n_u32::(a) } + } + #[doc = "See [`arch::vqshlq_n_u32`]."] + #[inline(always)] + pub fn vqshlq_n_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vqshlq_n_u32::(a) } + } + #[doc = "See [`arch::vqshl_n_u64`]."] + #[inline(always)] + pub fn vqshl_n_u64(self, a: uint64x1_t) -> uint64x1_t { + unsafe { vqshl_n_u64::(a) } + } + #[doc = "See [`arch::vqshlq_n_u64`]."] + #[inline(always)] + pub fn vqshlq_n_u64(self, a: uint64x2_t) -> uint64x2_t { + unsafe { vqshlq_n_u64::(a) } + } + #[doc = "See [`arch::vqshl_s8`]."] + #[inline(always)] + pub fn vqshl_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vqshl_s8(a, b) } + } + #[doc = "See [`arch::vqshlq_s8`]."] + #[inline(always)] + pub fn vqshlq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vqshlq_s8(a, b) } + } + #[doc = "See [`arch::vqshl_s16`]."] + #[inline(always)] + pub fn vqshl_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqshl_s16(a, b) } + } + #[doc = "See [`arch::vqshlq_s16`]."] + #[inline(always)] + pub fn vqshlq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqshlq_s16(a, b) } + } + #[doc = "See [`arch::vqshl_s32`]."] + #[inline(always)] + pub fn vqshl_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqshl_s32(a, b) } + } + #[doc = "See [`arch::vqshlq_s32`]."] + #[inline(always)] + pub fn vqshlq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqshlq_s32(a, b) } + } + #[doc = "See [`arch::vqshl_s64`]."] + #[inline(always)] + pub fn vqshl_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vqshl_s64(a, b) } + } + #[doc = "See [`arch::vqshlq_s64`]."] + #[inline(always)] + pub fn vqshlq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vqshlq_s64(a, b) } + } + #[doc = "See [`arch::vqshl_u8`]."] + #[inline(always)] + pub fn vqshl_u8(self, a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vqshl_u8(a, b) } + } + #[doc = "See [`arch::vqshlq_u8`]."] + #[inline(always)] + pub fn vqshlq_u8(self, a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vqshlq_u8(a, b) } + } + #[doc = "See [`arch::vqshl_u16`]."] + #[inline(always)] + pub fn vqshl_u16(self, a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vqshl_u16(a, b) } + } + #[doc = "See [`arch::vqshlq_u16`]."] + #[inline(always)] + pub fn vqshlq_u16(self, a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vqshlq_u16(a, b) } + } + #[doc = "See [`arch::vqshl_u32`]."] + #[inline(always)] + pub fn vqshl_u32(self, a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vqshl_u32(a, b) } + } + #[doc = "See [`arch::vqshlq_u32`]."] + #[inline(always)] + pub fn vqshlq_u32(self, a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vqshlq_u32(a, b) } + } + #[doc = "See [`arch::vqshl_u64`]."] + #[inline(always)] + pub fn vqshl_u64(self, a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vqshl_u64(a, b) } + } + #[doc = "See [`arch::vqshlq_u64`]."] + #[inline(always)] + pub fn vqshlq_u64(self, a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vqshlq_u64(a, b) } + } + #[doc = "See [`arch::vqsub_s8`]."] + #[inline(always)] + pub fn vqsub_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vqsub_s8(a, b) } + } + #[doc = "See [`arch::vqsubq_s8`]."] + #[inline(always)] + pub fn vqsubq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vqsubq_s8(a, b) } + } + #[doc = "See [`arch::vqsub_s16`]."] + #[inline(always)] + pub fn vqsub_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vqsub_s16(a, b) } + } + #[doc = "See [`arch::vqsubq_s16`]."] + #[inline(always)] + pub fn vqsubq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vqsubq_s16(a, b) } + } + #[doc = "See [`arch::vqsub_s32`]."] + #[inline(always)] + pub fn vqsub_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vqsub_s32(a, b) } + } + #[doc = "See [`arch::vqsubq_s32`]."] + #[inline(always)] + pub fn vqsubq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vqsubq_s32(a, b) } + } + #[doc = "See [`arch::vqsub_s64`]."] + #[inline(always)] + pub fn vqsub_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vqsub_s64(a, b) } + } + #[doc = "See [`arch::vqsubq_s64`]."] + #[inline(always)] + pub fn vqsubq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vqsubq_s64(a, b) } + } + #[doc = "See [`arch::vqsub_u8`]."] + #[inline(always)] + pub fn vqsub_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vqsub_u8(a, b) } + } + #[doc = "See [`arch::vqsubq_u8`]."] + #[inline(always)] + pub fn vqsubq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vqsubq_u8(a, b) } + } + #[doc = "See [`arch::vqsub_u16`]."] + #[inline(always)] + pub fn vqsub_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vqsub_u16(a, b) } + } + #[doc = "See [`arch::vqsubq_u16`]."] + #[inline(always)] + pub fn vqsubq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vqsubq_u16(a, b) } + } + #[doc = "See [`arch::vqsub_u32`]."] + #[inline(always)] + pub fn vqsub_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vqsub_u32(a, b) } + } + #[doc = "See [`arch::vqsubq_u32`]."] + #[inline(always)] + pub fn vqsubq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vqsubq_u32(a, b) } + } + #[doc = "See [`arch::vqsub_u64`]."] + #[inline(always)] + pub fn vqsub_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vqsub_u64(a, b) } + } + #[doc = "See [`arch::vqsubq_u64`]."] + #[inline(always)] + pub fn vqsubq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vqsubq_u64(a, b) } + } + #[doc = "See [`arch::vraddhn_high_s16`]."] + #[inline(always)] + pub fn vraddhn_high_s16(self, a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + unsafe { vraddhn_high_s16(a, b, c) } + } + #[doc = "See [`arch::vraddhn_high_s32`]."] + #[inline(always)] + pub fn vraddhn_high_s32(self, a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + unsafe { vraddhn_high_s32(a, b, c) } + } + #[doc = "See [`arch::vraddhn_high_s64`]."] + #[inline(always)] + pub fn vraddhn_high_s64(self, a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + unsafe { vraddhn_high_s64(a, b, c) } + } + #[doc = "See [`arch::vraddhn_high_u16`]."] + #[inline(always)] + pub fn vraddhn_high_u16(self, a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + unsafe { vraddhn_high_u16(a, b, c) } + } + #[doc = "See [`arch::vraddhn_high_u32`]."] + #[inline(always)] + pub fn vraddhn_high_u32(self, a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + unsafe { vraddhn_high_u32(a, b, c) } + } + #[doc = "See [`arch::vraddhn_high_u64`]."] + #[inline(always)] + pub fn vraddhn_high_u64(self, a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + unsafe { vraddhn_high_u64(a, b, c) } + } + #[doc = "See [`arch::vraddhn_s16`]."] + #[inline(always)] + pub fn vraddhn_s16(self, a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe { vraddhn_s16(a, b) } + } + #[doc = "See [`arch::vraddhn_s32`]."] + #[inline(always)] + pub fn vraddhn_s32(self, a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe { vraddhn_s32(a, b) } + } + #[doc = "See [`arch::vraddhn_s64`]."] + #[inline(always)] + pub fn vraddhn_s64(self, a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe { vraddhn_s64(a, b) } + } + #[doc = "See [`arch::vraddhn_u16`]."] + #[inline(always)] + pub fn vraddhn_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { vraddhn_u16(a, b) } + } + #[doc = "See [`arch::vraddhn_u32`]."] + #[inline(always)] + pub fn vraddhn_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { vraddhn_u32(a, b) } + } + #[doc = "See [`arch::vraddhn_u64`]."] + #[inline(always)] + pub fn vraddhn_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { vraddhn_u64(a, b) } + } + #[doc = "See [`arch::vrecpe_f32`]."] + #[inline(always)] + pub fn vrecpe_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrecpe_f32(a) } + } + #[doc = "See [`arch::vrecpeq_f32`]."] + #[inline(always)] + pub fn vrecpeq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrecpeq_f32(a) } + } + #[doc = "See [`arch::vrecpe_u32`]."] + #[inline(always)] + pub fn vrecpe_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vrecpe_u32(a) } + } + #[doc = "See [`arch::vrecpeq_u32`]."] + #[inline(always)] + pub fn vrecpeq_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vrecpeq_u32(a) } + } + #[doc = "See [`arch::vrecps_f32`]."] + #[inline(always)] + pub fn vrecps_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vrecps_f32(a, b) } + } + #[doc = "See [`arch::vrecpsq_f32`]."] + #[inline(always)] + pub fn vrecpsq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vrecpsq_f32(a, b) } + } + #[doc = "See [`arch::vreinterpretq_f32_p128`]."] + #[inline(always)] + pub fn vreinterpretq_f32_p128(self, a: p128) -> float32x4_t { + unsafe { vreinterpretq_f32_p128(a) } + } + #[doc = "See [`arch::vreinterpret_s8_f32`]."] + #[inline(always)] + pub fn vreinterpret_s8_f32(self, a: float32x2_t) -> int8x8_t { + unsafe { vreinterpret_s8_f32(a) } + } + #[doc = "See [`arch::vreinterpret_s16_f32`]."] + #[inline(always)] + pub fn vreinterpret_s16_f32(self, a: float32x2_t) -> int16x4_t { + unsafe { vreinterpret_s16_f32(a) } + } + #[doc = "See [`arch::vreinterpret_s32_f32`]."] + #[inline(always)] + pub fn vreinterpret_s32_f32(self, a: float32x2_t) -> int32x2_t { + unsafe { vreinterpret_s32_f32(a) } + } + #[doc = "See [`arch::vreinterpret_s64_f32`]."] + #[inline(always)] + pub fn vreinterpret_s64_f32(self, a: float32x2_t) -> int64x1_t { + unsafe { vreinterpret_s64_f32(a) } + } + #[doc = "See [`arch::vreinterpret_u8_f32`]."] + #[inline(always)] + pub fn vreinterpret_u8_f32(self, a: float32x2_t) -> uint8x8_t { + unsafe { vreinterpret_u8_f32(a) } + } + #[doc = "See [`arch::vreinterpret_u16_f32`]."] + #[inline(always)] + pub fn vreinterpret_u16_f32(self, a: float32x2_t) -> uint16x4_t { + unsafe { vreinterpret_u16_f32(a) } + } + #[doc = "See [`arch::vreinterpret_u32_f32`]."] + #[inline(always)] + pub fn vreinterpret_u32_f32(self, a: float32x2_t) -> uint32x2_t { + unsafe { vreinterpret_u32_f32(a) } + } + #[doc = "See [`arch::vreinterpret_u64_f32`]."] + #[inline(always)] + pub fn vreinterpret_u64_f32(self, a: float32x2_t) -> uint64x1_t { + unsafe { vreinterpret_u64_f32(a) } + } + #[doc = "See [`arch::vreinterpret_p8_f32`]."] + #[inline(always)] + pub fn vreinterpret_p8_f32(self, a: float32x2_t) -> poly8x8_t { + unsafe { vreinterpret_p8_f32(a) } + } + #[doc = "See [`arch::vreinterpret_p16_f32`]."] + #[inline(always)] + pub fn vreinterpret_p16_f32(self, a: float32x2_t) -> poly16x4_t { + unsafe { vreinterpret_p16_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_f32`]."] + #[inline(always)] + pub fn vreinterpretq_p128_f32(self, a: float32x4_t) -> p128 { + unsafe { vreinterpretq_p128_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_f32`]."] + #[inline(always)] + pub fn vreinterpretq_s8_f32(self, a: float32x4_t) -> int8x16_t { + unsafe { vreinterpretq_s8_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_f32`]."] + #[inline(always)] + pub fn vreinterpretq_s16_f32(self, a: float32x4_t) -> int16x8_t { + unsafe { vreinterpretq_s16_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_f32`]."] + #[inline(always)] + pub fn vreinterpretq_s32_f32(self, a: float32x4_t) -> int32x4_t { + unsafe { vreinterpretq_s32_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_f32`]."] + #[inline(always)] + pub fn vreinterpretq_s64_f32(self, a: float32x4_t) -> int64x2_t { + unsafe { vreinterpretq_s64_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_f32`]."] + #[inline(always)] + pub fn vreinterpretq_u8_f32(self, a: float32x4_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_f32`]."] + #[inline(always)] + pub fn vreinterpretq_u16_f32(self, a: float32x4_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_f32`]."] + #[inline(always)] + pub fn vreinterpretq_u32_f32(self, a: float32x4_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_f32`]."] + #[inline(always)] + pub fn vreinterpretq_u64_f32(self, a: float32x4_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_f32`]."] + #[inline(always)] + pub fn vreinterpretq_p8_f32(self, a: float32x4_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_f32(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_f32`]."] + #[inline(always)] + pub fn vreinterpretq_p16_f32(self, a: float32x4_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_f32(a) } + } + #[doc = "See [`arch::vreinterpret_f32_s8`]."] + #[inline(always)] + pub fn vreinterpret_f32_s8(self, a: int8x8_t) -> float32x2_t { + unsafe { vreinterpret_f32_s8(a) } + } + #[doc = "See [`arch::vreinterpret_s16_s8`]."] + #[inline(always)] + pub fn vreinterpret_s16_s8(self, a: int8x8_t) -> int16x4_t { + unsafe { vreinterpret_s16_s8(a) } + } + #[doc = "See [`arch::vreinterpret_s32_s8`]."] + #[inline(always)] + pub fn vreinterpret_s32_s8(self, a: int8x8_t) -> int32x2_t { + unsafe { vreinterpret_s32_s8(a) } + } + #[doc = "See [`arch::vreinterpret_s64_s8`]."] + #[inline(always)] + pub fn vreinterpret_s64_s8(self, a: int8x8_t) -> int64x1_t { + unsafe { vreinterpret_s64_s8(a) } + } + #[doc = "See [`arch::vreinterpret_u8_s8`]."] + #[inline(always)] + pub fn vreinterpret_u8_s8(self, a: int8x8_t) -> uint8x8_t { + unsafe { vreinterpret_u8_s8(a) } + } + #[doc = "See [`arch::vreinterpret_u16_s8`]."] + #[inline(always)] + pub fn vreinterpret_u16_s8(self, a: int8x8_t) -> uint16x4_t { + unsafe { vreinterpret_u16_s8(a) } + } + #[doc = "See [`arch::vreinterpret_u32_s8`]."] + #[inline(always)] + pub fn vreinterpret_u32_s8(self, a: int8x8_t) -> uint32x2_t { + unsafe { vreinterpret_u32_s8(a) } + } + #[doc = "See [`arch::vreinterpret_u64_s8`]."] + #[inline(always)] + pub fn vreinterpret_u64_s8(self, a: int8x8_t) -> uint64x1_t { + unsafe { vreinterpret_u64_s8(a) } + } + #[doc = "See [`arch::vreinterpret_p8_s8`]."] + #[inline(always)] + pub fn vreinterpret_p8_s8(self, a: int8x8_t) -> poly8x8_t { + unsafe { vreinterpret_p8_s8(a) } + } + #[doc = "See [`arch::vreinterpret_p16_s8`]."] + #[inline(always)] + pub fn vreinterpret_p16_s8(self, a: int8x8_t) -> poly16x4_t { + unsafe { vreinterpret_p16_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_s8`]."] + #[inline(always)] + pub fn vreinterpretq_f32_s8(self, a: int8x16_t) -> float32x4_t { + unsafe { vreinterpretq_f32_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_s8`]."] + #[inline(always)] + pub fn vreinterpretq_s16_s8(self, a: int8x16_t) -> int16x8_t { + unsafe { vreinterpretq_s16_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_s8`]."] + #[inline(always)] + pub fn vreinterpretq_s32_s8(self, a: int8x16_t) -> int32x4_t { + unsafe { vreinterpretq_s32_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_s8`]."] + #[inline(always)] + pub fn vreinterpretq_s64_s8(self, a: int8x16_t) -> int64x2_t { + unsafe { vreinterpretq_s64_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_s8`]."] + #[inline(always)] + pub fn vreinterpretq_u8_s8(self, a: int8x16_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_s8`]."] + #[inline(always)] + pub fn vreinterpretq_u16_s8(self, a: int8x16_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_s8`]."] + #[inline(always)] + pub fn vreinterpretq_u32_s8(self, a: int8x16_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_s8`]."] + #[inline(always)] + pub fn vreinterpretq_u64_s8(self, a: int8x16_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_s8`]."] + #[inline(always)] + pub fn vreinterpretq_p8_s8(self, a: int8x16_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_s8`]."] + #[inline(always)] + pub fn vreinterpretq_p16_s8(self, a: int8x16_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_s8(a) } + } + #[doc = "See [`arch::vreinterpret_f32_s16`]."] + #[inline(always)] + pub fn vreinterpret_f32_s16(self, a: int16x4_t) -> float32x2_t { + unsafe { vreinterpret_f32_s16(a) } + } + #[doc = "See [`arch::vreinterpret_s8_s16`]."] + #[inline(always)] + pub fn vreinterpret_s8_s16(self, a: int16x4_t) -> int8x8_t { + unsafe { vreinterpret_s8_s16(a) } + } + #[doc = "See [`arch::vreinterpret_s32_s16`]."] + #[inline(always)] + pub fn vreinterpret_s32_s16(self, a: int16x4_t) -> int32x2_t { + unsafe { vreinterpret_s32_s16(a) } + } + #[doc = "See [`arch::vreinterpret_s64_s16`]."] + #[inline(always)] + pub fn vreinterpret_s64_s16(self, a: int16x4_t) -> int64x1_t { + unsafe { vreinterpret_s64_s16(a) } + } + #[doc = "See [`arch::vreinterpret_u8_s16`]."] + #[inline(always)] + pub fn vreinterpret_u8_s16(self, a: int16x4_t) -> uint8x8_t { + unsafe { vreinterpret_u8_s16(a) } + } + #[doc = "See [`arch::vreinterpret_u16_s16`]."] + #[inline(always)] + pub fn vreinterpret_u16_s16(self, a: int16x4_t) -> uint16x4_t { + unsafe { vreinterpret_u16_s16(a) } + } + #[doc = "See [`arch::vreinterpret_u32_s16`]."] + #[inline(always)] + pub fn vreinterpret_u32_s16(self, a: int16x4_t) -> uint32x2_t { + unsafe { vreinterpret_u32_s16(a) } + } + #[doc = "See [`arch::vreinterpret_u64_s16`]."] + #[inline(always)] + pub fn vreinterpret_u64_s16(self, a: int16x4_t) -> uint64x1_t { + unsafe { vreinterpret_u64_s16(a) } + } + #[doc = "See [`arch::vreinterpret_p8_s16`]."] + #[inline(always)] + pub fn vreinterpret_p8_s16(self, a: int16x4_t) -> poly8x8_t { + unsafe { vreinterpret_p8_s16(a) } + } + #[doc = "See [`arch::vreinterpret_p16_s16`]."] + #[inline(always)] + pub fn vreinterpret_p16_s16(self, a: int16x4_t) -> poly16x4_t { + unsafe { vreinterpret_p16_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_s16`]."] + #[inline(always)] + pub fn vreinterpretq_f32_s16(self, a: int16x8_t) -> float32x4_t { + unsafe { vreinterpretq_f32_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_s16`]."] + #[inline(always)] + pub fn vreinterpretq_s8_s16(self, a: int16x8_t) -> int8x16_t { + unsafe { vreinterpretq_s8_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_s16`]."] + #[inline(always)] + pub fn vreinterpretq_s32_s16(self, a: int16x8_t) -> int32x4_t { + unsafe { vreinterpretq_s32_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_s16`]."] + #[inline(always)] + pub fn vreinterpretq_s64_s16(self, a: int16x8_t) -> int64x2_t { + unsafe { vreinterpretq_s64_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_s16`]."] + #[inline(always)] + pub fn vreinterpretq_u8_s16(self, a: int16x8_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_s16`]."] + #[inline(always)] + pub fn vreinterpretq_u16_s16(self, a: int16x8_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_s16`]."] + #[inline(always)] + pub fn vreinterpretq_u32_s16(self, a: int16x8_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_s16`]."] + #[inline(always)] + pub fn vreinterpretq_u64_s16(self, a: int16x8_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_s16`]."] + #[inline(always)] + pub fn vreinterpretq_p8_s16(self, a: int16x8_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_s16`]."] + #[inline(always)] + pub fn vreinterpretq_p16_s16(self, a: int16x8_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_s16(a) } + } + #[doc = "See [`arch::vreinterpret_f32_s32`]."] + #[inline(always)] + pub fn vreinterpret_f32_s32(self, a: int32x2_t) -> float32x2_t { + unsafe { vreinterpret_f32_s32(a) } + } + #[doc = "See [`arch::vreinterpret_s8_s32`]."] + #[inline(always)] + pub fn vreinterpret_s8_s32(self, a: int32x2_t) -> int8x8_t { + unsafe { vreinterpret_s8_s32(a) } + } + #[doc = "See [`arch::vreinterpret_s16_s32`]."] + #[inline(always)] + pub fn vreinterpret_s16_s32(self, a: int32x2_t) -> int16x4_t { + unsafe { vreinterpret_s16_s32(a) } + } + #[doc = "See [`arch::vreinterpret_s64_s32`]."] + #[inline(always)] + pub fn vreinterpret_s64_s32(self, a: int32x2_t) -> int64x1_t { + unsafe { vreinterpret_s64_s32(a) } + } + #[doc = "See [`arch::vreinterpret_u8_s32`]."] + #[inline(always)] + pub fn vreinterpret_u8_s32(self, a: int32x2_t) -> uint8x8_t { + unsafe { vreinterpret_u8_s32(a) } + } + #[doc = "See [`arch::vreinterpret_u16_s32`]."] + #[inline(always)] + pub fn vreinterpret_u16_s32(self, a: int32x2_t) -> uint16x4_t { + unsafe { vreinterpret_u16_s32(a) } + } + #[doc = "See [`arch::vreinterpret_u32_s32`]."] + #[inline(always)] + pub fn vreinterpret_u32_s32(self, a: int32x2_t) -> uint32x2_t { + unsafe { vreinterpret_u32_s32(a) } + } + #[doc = "See [`arch::vreinterpret_u64_s32`]."] + #[inline(always)] + pub fn vreinterpret_u64_s32(self, a: int32x2_t) -> uint64x1_t { + unsafe { vreinterpret_u64_s32(a) } + } + #[doc = "See [`arch::vreinterpret_p8_s32`]."] + #[inline(always)] + pub fn vreinterpret_p8_s32(self, a: int32x2_t) -> poly8x8_t { + unsafe { vreinterpret_p8_s32(a) } + } + #[doc = "See [`arch::vreinterpret_p16_s32`]."] + #[inline(always)] + pub fn vreinterpret_p16_s32(self, a: int32x2_t) -> poly16x4_t { + unsafe { vreinterpret_p16_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_s32`]."] + #[inline(always)] + pub fn vreinterpretq_f32_s32(self, a: int32x4_t) -> float32x4_t { + unsafe { vreinterpretq_f32_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_s32`]."] + #[inline(always)] + pub fn vreinterpretq_s8_s32(self, a: int32x4_t) -> int8x16_t { + unsafe { vreinterpretq_s8_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_s32`]."] + #[inline(always)] + pub fn vreinterpretq_s16_s32(self, a: int32x4_t) -> int16x8_t { + unsafe { vreinterpretq_s16_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_s32`]."] + #[inline(always)] + pub fn vreinterpretq_s64_s32(self, a: int32x4_t) -> int64x2_t { + unsafe { vreinterpretq_s64_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_s32`]."] + #[inline(always)] + pub fn vreinterpretq_u8_s32(self, a: int32x4_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_s32`]."] + #[inline(always)] + pub fn vreinterpretq_u16_s32(self, a: int32x4_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_s32`]."] + #[inline(always)] + pub fn vreinterpretq_u32_s32(self, a: int32x4_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_s32`]."] + #[inline(always)] + pub fn vreinterpretq_u64_s32(self, a: int32x4_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_s32`]."] + #[inline(always)] + pub fn vreinterpretq_p8_s32(self, a: int32x4_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_s32`]."] + #[inline(always)] + pub fn vreinterpretq_p16_s32(self, a: int32x4_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_s32(a) } + } + #[doc = "See [`arch::vreinterpret_f32_s64`]."] + #[inline(always)] + pub fn vreinterpret_f32_s64(self, a: int64x1_t) -> float32x2_t { + unsafe { vreinterpret_f32_s64(a) } + } + #[doc = "See [`arch::vreinterpret_s8_s64`]."] + #[inline(always)] + pub fn vreinterpret_s8_s64(self, a: int64x1_t) -> int8x8_t { + unsafe { vreinterpret_s8_s64(a) } + } + #[doc = "See [`arch::vreinterpret_s16_s64`]."] + #[inline(always)] + pub fn vreinterpret_s16_s64(self, a: int64x1_t) -> int16x4_t { + unsafe { vreinterpret_s16_s64(a) } + } + #[doc = "See [`arch::vreinterpret_s32_s64`]."] + #[inline(always)] + pub fn vreinterpret_s32_s64(self, a: int64x1_t) -> int32x2_t { + unsafe { vreinterpret_s32_s64(a) } + } + #[doc = "See [`arch::vreinterpret_u8_s64`]."] + #[inline(always)] + pub fn vreinterpret_u8_s64(self, a: int64x1_t) -> uint8x8_t { + unsafe { vreinterpret_u8_s64(a) } + } + #[doc = "See [`arch::vreinterpret_u16_s64`]."] + #[inline(always)] + pub fn vreinterpret_u16_s64(self, a: int64x1_t) -> uint16x4_t { + unsafe { vreinterpret_u16_s64(a) } + } + #[doc = "See [`arch::vreinterpret_u32_s64`]."] + #[inline(always)] + pub fn vreinterpret_u32_s64(self, a: int64x1_t) -> uint32x2_t { + unsafe { vreinterpret_u32_s64(a) } + } + #[doc = "See [`arch::vreinterpret_u64_s64`]."] + #[inline(always)] + pub fn vreinterpret_u64_s64(self, a: int64x1_t) -> uint64x1_t { + unsafe { vreinterpret_u64_s64(a) } + } + #[doc = "See [`arch::vreinterpret_p8_s64`]."] + #[inline(always)] + pub fn vreinterpret_p8_s64(self, a: int64x1_t) -> poly8x8_t { + unsafe { vreinterpret_p8_s64(a) } + } + #[doc = "See [`arch::vreinterpret_p16_s64`]."] + #[inline(always)] + pub fn vreinterpret_p16_s64(self, a: int64x1_t) -> poly16x4_t { + unsafe { vreinterpret_p16_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_s64`]."] + #[inline(always)] + pub fn vreinterpretq_f32_s64(self, a: int64x2_t) -> float32x4_t { + unsafe { vreinterpretq_f32_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_s64`]."] + #[inline(always)] + pub fn vreinterpretq_s8_s64(self, a: int64x2_t) -> int8x16_t { + unsafe { vreinterpretq_s8_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_s64`]."] + #[inline(always)] + pub fn vreinterpretq_s16_s64(self, a: int64x2_t) -> int16x8_t { + unsafe { vreinterpretq_s16_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_s64`]."] + #[inline(always)] + pub fn vreinterpretq_s32_s64(self, a: int64x2_t) -> int32x4_t { + unsafe { vreinterpretq_s32_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_s64`]."] + #[inline(always)] + pub fn vreinterpretq_u8_s64(self, a: int64x2_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_s64`]."] + #[inline(always)] + pub fn vreinterpretq_u16_s64(self, a: int64x2_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_s64`]."] + #[inline(always)] + pub fn vreinterpretq_u32_s64(self, a: int64x2_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_s64`]."] + #[inline(always)] + pub fn vreinterpretq_u64_s64(self, a: int64x2_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_s64`]."] + #[inline(always)] + pub fn vreinterpretq_p8_s64(self, a: int64x2_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_s64(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_s64`]."] + #[inline(always)] + pub fn vreinterpretq_p16_s64(self, a: int64x2_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_s64(a) } + } + #[doc = "See [`arch::vreinterpret_f32_u8`]."] + #[inline(always)] + pub fn vreinterpret_f32_u8(self, a: uint8x8_t) -> float32x2_t { + unsafe { vreinterpret_f32_u8(a) } + } + #[doc = "See [`arch::vreinterpret_s8_u8`]."] + #[inline(always)] + pub fn vreinterpret_s8_u8(self, a: uint8x8_t) -> int8x8_t { + unsafe { vreinterpret_s8_u8(a) } + } + #[doc = "See [`arch::vreinterpret_s16_u8`]."] + #[inline(always)] + pub fn vreinterpret_s16_u8(self, a: uint8x8_t) -> int16x4_t { + unsafe { vreinterpret_s16_u8(a) } + } + #[doc = "See [`arch::vreinterpret_s32_u8`]."] + #[inline(always)] + pub fn vreinterpret_s32_u8(self, a: uint8x8_t) -> int32x2_t { + unsafe { vreinterpret_s32_u8(a) } + } + #[doc = "See [`arch::vreinterpret_s64_u8`]."] + #[inline(always)] + pub fn vreinterpret_s64_u8(self, a: uint8x8_t) -> int64x1_t { + unsafe { vreinterpret_s64_u8(a) } + } + #[doc = "See [`arch::vreinterpret_u16_u8`]."] + #[inline(always)] + pub fn vreinterpret_u16_u8(self, a: uint8x8_t) -> uint16x4_t { + unsafe { vreinterpret_u16_u8(a) } + } + #[doc = "See [`arch::vreinterpret_u32_u8`]."] + #[inline(always)] + pub fn vreinterpret_u32_u8(self, a: uint8x8_t) -> uint32x2_t { + unsafe { vreinterpret_u32_u8(a) } + } + #[doc = "See [`arch::vreinterpret_u64_u8`]."] + #[inline(always)] + pub fn vreinterpret_u64_u8(self, a: uint8x8_t) -> uint64x1_t { + unsafe { vreinterpret_u64_u8(a) } + } + #[doc = "See [`arch::vreinterpret_p8_u8`]."] + #[inline(always)] + pub fn vreinterpret_p8_u8(self, a: uint8x8_t) -> poly8x8_t { + unsafe { vreinterpret_p8_u8(a) } + } + #[doc = "See [`arch::vreinterpret_p16_u8`]."] + #[inline(always)] + pub fn vreinterpret_p16_u8(self, a: uint8x8_t) -> poly16x4_t { + unsafe { vreinterpret_p16_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_u8`]."] + #[inline(always)] + pub fn vreinterpretq_f32_u8(self, a: uint8x16_t) -> float32x4_t { + unsafe { vreinterpretq_f32_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_u8`]."] + #[inline(always)] + pub fn vreinterpretq_s8_u8(self, a: uint8x16_t) -> int8x16_t { + unsafe { vreinterpretq_s8_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_u8`]."] + #[inline(always)] + pub fn vreinterpretq_s16_u8(self, a: uint8x16_t) -> int16x8_t { + unsafe { vreinterpretq_s16_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_u8`]."] + #[inline(always)] + pub fn vreinterpretq_s32_u8(self, a: uint8x16_t) -> int32x4_t { + unsafe { vreinterpretq_s32_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_u8`]."] + #[inline(always)] + pub fn vreinterpretq_s64_u8(self, a: uint8x16_t) -> int64x2_t { + unsafe { vreinterpretq_s64_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_u8`]."] + #[inline(always)] + pub fn vreinterpretq_u16_u8(self, a: uint8x16_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_u8`]."] + #[inline(always)] + pub fn vreinterpretq_u32_u8(self, a: uint8x16_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_u8`]."] + #[inline(always)] + pub fn vreinterpretq_u64_u8(self, a: uint8x16_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_u8`]."] + #[inline(always)] + pub fn vreinterpretq_p8_u8(self, a: uint8x16_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_u8`]."] + #[inline(always)] + pub fn vreinterpretq_p16_u8(self, a: uint8x16_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_u8(a) } + } + #[doc = "See [`arch::vreinterpret_f32_u16`]."] + #[inline(always)] + pub fn vreinterpret_f32_u16(self, a: uint16x4_t) -> float32x2_t { + unsafe { vreinterpret_f32_u16(a) } + } + #[doc = "See [`arch::vreinterpret_s8_u16`]."] + #[inline(always)] + pub fn vreinterpret_s8_u16(self, a: uint16x4_t) -> int8x8_t { + unsafe { vreinterpret_s8_u16(a) } + } + #[doc = "See [`arch::vreinterpret_s16_u16`]."] + #[inline(always)] + pub fn vreinterpret_s16_u16(self, a: uint16x4_t) -> int16x4_t { + unsafe { vreinterpret_s16_u16(a) } + } + #[doc = "See [`arch::vreinterpret_s32_u16`]."] + #[inline(always)] + pub fn vreinterpret_s32_u16(self, a: uint16x4_t) -> int32x2_t { + unsafe { vreinterpret_s32_u16(a) } + } + #[doc = "See [`arch::vreinterpret_s64_u16`]."] + #[inline(always)] + pub fn vreinterpret_s64_u16(self, a: uint16x4_t) -> int64x1_t { + unsafe { vreinterpret_s64_u16(a) } + } + #[doc = "See [`arch::vreinterpret_u8_u16`]."] + #[inline(always)] + pub fn vreinterpret_u8_u16(self, a: uint16x4_t) -> uint8x8_t { + unsafe { vreinterpret_u8_u16(a) } + } + #[doc = "See [`arch::vreinterpret_u32_u16`]."] + #[inline(always)] + pub fn vreinterpret_u32_u16(self, a: uint16x4_t) -> uint32x2_t { + unsafe { vreinterpret_u32_u16(a) } + } + #[doc = "See [`arch::vreinterpret_u64_u16`]."] + #[inline(always)] + pub fn vreinterpret_u64_u16(self, a: uint16x4_t) -> uint64x1_t { + unsafe { vreinterpret_u64_u16(a) } + } + #[doc = "See [`arch::vreinterpret_p8_u16`]."] + #[inline(always)] + pub fn vreinterpret_p8_u16(self, a: uint16x4_t) -> poly8x8_t { + unsafe { vreinterpret_p8_u16(a) } + } + #[doc = "See [`arch::vreinterpret_p16_u16`]."] + #[inline(always)] + pub fn vreinterpret_p16_u16(self, a: uint16x4_t) -> poly16x4_t { + unsafe { vreinterpret_p16_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_u16`]."] + #[inline(always)] + pub fn vreinterpretq_f32_u16(self, a: uint16x8_t) -> float32x4_t { + unsafe { vreinterpretq_f32_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_u16`]."] + #[inline(always)] + pub fn vreinterpretq_s8_u16(self, a: uint16x8_t) -> int8x16_t { + unsafe { vreinterpretq_s8_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_u16`]."] + #[inline(always)] + pub fn vreinterpretq_s16_u16(self, a: uint16x8_t) -> int16x8_t { + unsafe { vreinterpretq_s16_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_u16`]."] + #[inline(always)] + pub fn vreinterpretq_s32_u16(self, a: uint16x8_t) -> int32x4_t { + unsafe { vreinterpretq_s32_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_u16`]."] + #[inline(always)] + pub fn vreinterpretq_s64_u16(self, a: uint16x8_t) -> int64x2_t { + unsafe { vreinterpretq_s64_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_u16`]."] + #[inline(always)] + pub fn vreinterpretq_u8_u16(self, a: uint16x8_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_u16`]."] + #[inline(always)] + pub fn vreinterpretq_u32_u16(self, a: uint16x8_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_u16`]."] + #[inline(always)] + pub fn vreinterpretq_u64_u16(self, a: uint16x8_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_u16`]."] + #[inline(always)] + pub fn vreinterpretq_p8_u16(self, a: uint16x8_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_u16`]."] + #[inline(always)] + pub fn vreinterpretq_p16_u16(self, a: uint16x8_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_u16(a) } + } + #[doc = "See [`arch::vreinterpret_f32_u32`]."] + #[inline(always)] + pub fn vreinterpret_f32_u32(self, a: uint32x2_t) -> float32x2_t { + unsafe { vreinterpret_f32_u32(a) } + } + #[doc = "See [`arch::vreinterpret_s8_u32`]."] + #[inline(always)] + pub fn vreinterpret_s8_u32(self, a: uint32x2_t) -> int8x8_t { + unsafe { vreinterpret_s8_u32(a) } + } + #[doc = "See [`arch::vreinterpret_s16_u32`]."] + #[inline(always)] + pub fn vreinterpret_s16_u32(self, a: uint32x2_t) -> int16x4_t { + unsafe { vreinterpret_s16_u32(a) } + } + #[doc = "See [`arch::vreinterpret_s32_u32`]."] + #[inline(always)] + pub fn vreinterpret_s32_u32(self, a: uint32x2_t) -> int32x2_t { + unsafe { vreinterpret_s32_u32(a) } + } + #[doc = "See [`arch::vreinterpret_s64_u32`]."] + #[inline(always)] + pub fn vreinterpret_s64_u32(self, a: uint32x2_t) -> int64x1_t { + unsafe { vreinterpret_s64_u32(a) } + } + #[doc = "See [`arch::vreinterpret_u8_u32`]."] + #[inline(always)] + pub fn vreinterpret_u8_u32(self, a: uint32x2_t) -> uint8x8_t { + unsafe { vreinterpret_u8_u32(a) } + } + #[doc = "See [`arch::vreinterpret_u16_u32`]."] + #[inline(always)] + pub fn vreinterpret_u16_u32(self, a: uint32x2_t) -> uint16x4_t { + unsafe { vreinterpret_u16_u32(a) } + } + #[doc = "See [`arch::vreinterpret_u64_u32`]."] + #[inline(always)] + pub fn vreinterpret_u64_u32(self, a: uint32x2_t) -> uint64x1_t { + unsafe { vreinterpret_u64_u32(a) } + } + #[doc = "See [`arch::vreinterpret_p8_u32`]."] + #[inline(always)] + pub fn vreinterpret_p8_u32(self, a: uint32x2_t) -> poly8x8_t { + unsafe { vreinterpret_p8_u32(a) } + } + #[doc = "See [`arch::vreinterpret_p16_u32`]."] + #[inline(always)] + pub fn vreinterpret_p16_u32(self, a: uint32x2_t) -> poly16x4_t { + unsafe { vreinterpret_p16_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_u32`]."] + #[inline(always)] + pub fn vreinterpretq_f32_u32(self, a: uint32x4_t) -> float32x4_t { + unsafe { vreinterpretq_f32_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_u32`]."] + #[inline(always)] + pub fn vreinterpretq_s8_u32(self, a: uint32x4_t) -> int8x16_t { + unsafe { vreinterpretq_s8_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_u32`]."] + #[inline(always)] + pub fn vreinterpretq_s16_u32(self, a: uint32x4_t) -> int16x8_t { + unsafe { vreinterpretq_s16_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_u32`]."] + #[inline(always)] + pub fn vreinterpretq_s32_u32(self, a: uint32x4_t) -> int32x4_t { + unsafe { vreinterpretq_s32_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_u32`]."] + #[inline(always)] + pub fn vreinterpretq_s64_u32(self, a: uint32x4_t) -> int64x2_t { + unsafe { vreinterpretq_s64_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_u32`]."] + #[inline(always)] + pub fn vreinterpretq_u8_u32(self, a: uint32x4_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_u32`]."] + #[inline(always)] + pub fn vreinterpretq_u16_u32(self, a: uint32x4_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_u32`]."] + #[inline(always)] + pub fn vreinterpretq_u64_u32(self, a: uint32x4_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_u32`]."] + #[inline(always)] + pub fn vreinterpretq_p8_u32(self, a: uint32x4_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_u32`]."] + #[inline(always)] + pub fn vreinterpretq_p16_u32(self, a: uint32x4_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_u32(a) } + } + #[doc = "See [`arch::vreinterpret_f32_u64`]."] + #[inline(always)] + pub fn vreinterpret_f32_u64(self, a: uint64x1_t) -> float32x2_t { + unsafe { vreinterpret_f32_u64(a) } + } + #[doc = "See [`arch::vreinterpret_s8_u64`]."] + #[inline(always)] + pub fn vreinterpret_s8_u64(self, a: uint64x1_t) -> int8x8_t { + unsafe { vreinterpret_s8_u64(a) } + } + #[doc = "See [`arch::vreinterpret_s16_u64`]."] + #[inline(always)] + pub fn vreinterpret_s16_u64(self, a: uint64x1_t) -> int16x4_t { + unsafe { vreinterpret_s16_u64(a) } + } + #[doc = "See [`arch::vreinterpret_s32_u64`]."] + #[inline(always)] + pub fn vreinterpret_s32_u64(self, a: uint64x1_t) -> int32x2_t { + unsafe { vreinterpret_s32_u64(a) } + } + #[doc = "See [`arch::vreinterpret_s64_u64`]."] + #[inline(always)] + pub fn vreinterpret_s64_u64(self, a: uint64x1_t) -> int64x1_t { + unsafe { vreinterpret_s64_u64(a) } + } + #[doc = "See [`arch::vreinterpret_u8_u64`]."] + #[inline(always)] + pub fn vreinterpret_u8_u64(self, a: uint64x1_t) -> uint8x8_t { + unsafe { vreinterpret_u8_u64(a) } + } + #[doc = "See [`arch::vreinterpret_u16_u64`]."] + #[inline(always)] + pub fn vreinterpret_u16_u64(self, a: uint64x1_t) -> uint16x4_t { + unsafe { vreinterpret_u16_u64(a) } + } + #[doc = "See [`arch::vreinterpret_u32_u64`]."] + #[inline(always)] + pub fn vreinterpret_u32_u64(self, a: uint64x1_t) -> uint32x2_t { + unsafe { vreinterpret_u32_u64(a) } + } + #[doc = "See [`arch::vreinterpret_p8_u64`]."] + #[inline(always)] + pub fn vreinterpret_p8_u64(self, a: uint64x1_t) -> poly8x8_t { + unsafe { vreinterpret_p8_u64(a) } + } + #[doc = "See [`arch::vreinterpret_p16_u64`]."] + #[inline(always)] + pub fn vreinterpret_p16_u64(self, a: uint64x1_t) -> poly16x4_t { + unsafe { vreinterpret_p16_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_u64`]."] + #[inline(always)] + pub fn vreinterpretq_f32_u64(self, a: uint64x2_t) -> float32x4_t { + unsafe { vreinterpretq_f32_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_u64`]."] + #[inline(always)] + pub fn vreinterpretq_s8_u64(self, a: uint64x2_t) -> int8x16_t { + unsafe { vreinterpretq_s8_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_u64`]."] + #[inline(always)] + pub fn vreinterpretq_s16_u64(self, a: uint64x2_t) -> int16x8_t { + unsafe { vreinterpretq_s16_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_u64`]."] + #[inline(always)] + pub fn vreinterpretq_s32_u64(self, a: uint64x2_t) -> int32x4_t { + unsafe { vreinterpretq_s32_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_u64`]."] + #[inline(always)] + pub fn vreinterpretq_s64_u64(self, a: uint64x2_t) -> int64x2_t { + unsafe { vreinterpretq_s64_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_u64`]."] + #[inline(always)] + pub fn vreinterpretq_u8_u64(self, a: uint64x2_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_u64`]."] + #[inline(always)] + pub fn vreinterpretq_u16_u64(self, a: uint64x2_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_u64`]."] + #[inline(always)] + pub fn vreinterpretq_u32_u64(self, a: uint64x2_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_u64`]."] + #[inline(always)] + pub fn vreinterpretq_p8_u64(self, a: uint64x2_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_u64(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_u64`]."] + #[inline(always)] + pub fn vreinterpretq_p16_u64(self, a: uint64x2_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_u64(a) } + } + #[doc = "See [`arch::vreinterpret_f32_p8`]."] + #[inline(always)] + pub fn vreinterpret_f32_p8(self, a: poly8x8_t) -> float32x2_t { + unsafe { vreinterpret_f32_p8(a) } + } + #[doc = "See [`arch::vreinterpret_s8_p8`]."] + #[inline(always)] + pub fn vreinterpret_s8_p8(self, a: poly8x8_t) -> int8x8_t { + unsafe { vreinterpret_s8_p8(a) } + } + #[doc = "See [`arch::vreinterpret_s16_p8`]."] + #[inline(always)] + pub fn vreinterpret_s16_p8(self, a: poly8x8_t) -> int16x4_t { + unsafe { vreinterpret_s16_p8(a) } + } + #[doc = "See [`arch::vreinterpret_s32_p8`]."] + #[inline(always)] + pub fn vreinterpret_s32_p8(self, a: poly8x8_t) -> int32x2_t { + unsafe { vreinterpret_s32_p8(a) } + } + #[doc = "See [`arch::vreinterpret_s64_p8`]."] + #[inline(always)] + pub fn vreinterpret_s64_p8(self, a: poly8x8_t) -> int64x1_t { + unsafe { vreinterpret_s64_p8(a) } + } + #[doc = "See [`arch::vreinterpret_u8_p8`]."] + #[inline(always)] + pub fn vreinterpret_u8_p8(self, a: poly8x8_t) -> uint8x8_t { + unsafe { vreinterpret_u8_p8(a) } + } + #[doc = "See [`arch::vreinterpret_u16_p8`]."] + #[inline(always)] + pub fn vreinterpret_u16_p8(self, a: poly8x8_t) -> uint16x4_t { + unsafe { vreinterpret_u16_p8(a) } + } + #[doc = "See [`arch::vreinterpret_u32_p8`]."] + #[inline(always)] + pub fn vreinterpret_u32_p8(self, a: poly8x8_t) -> uint32x2_t { + unsafe { vreinterpret_u32_p8(a) } + } + #[doc = "See [`arch::vreinterpret_u64_p8`]."] + #[inline(always)] + pub fn vreinterpret_u64_p8(self, a: poly8x8_t) -> uint64x1_t { + unsafe { vreinterpret_u64_p8(a) } + } + #[doc = "See [`arch::vreinterpret_p16_p8`]."] + #[inline(always)] + pub fn vreinterpret_p16_p8(self, a: poly8x8_t) -> poly16x4_t { + unsafe { vreinterpret_p16_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_p8`]."] + #[inline(always)] + pub fn vreinterpretq_f32_p8(self, a: poly8x16_t) -> float32x4_t { + unsafe { vreinterpretq_f32_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_p8`]."] + #[inline(always)] + pub fn vreinterpretq_s8_p8(self, a: poly8x16_t) -> int8x16_t { + unsafe { vreinterpretq_s8_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_p8`]."] + #[inline(always)] + pub fn vreinterpretq_s16_p8(self, a: poly8x16_t) -> int16x8_t { + unsafe { vreinterpretq_s16_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_p8`]."] + #[inline(always)] + pub fn vreinterpretq_s32_p8(self, a: poly8x16_t) -> int32x4_t { + unsafe { vreinterpretq_s32_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_p8`]."] + #[inline(always)] + pub fn vreinterpretq_s64_p8(self, a: poly8x16_t) -> int64x2_t { + unsafe { vreinterpretq_s64_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_p8`]."] + #[inline(always)] + pub fn vreinterpretq_u8_p8(self, a: poly8x16_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_p8`]."] + #[inline(always)] + pub fn vreinterpretq_u16_p8(self, a: poly8x16_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_p8`]."] + #[inline(always)] + pub fn vreinterpretq_u32_p8(self, a: poly8x16_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_p8`]."] + #[inline(always)] + pub fn vreinterpretq_u64_p8(self, a: poly8x16_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_p8`]."] + #[inline(always)] + pub fn vreinterpretq_p16_p8(self, a: poly8x16_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_p8(a) } + } + #[doc = "See [`arch::vreinterpret_f32_p16`]."] + #[inline(always)] + pub fn vreinterpret_f32_p16(self, a: poly16x4_t) -> float32x2_t { + unsafe { vreinterpret_f32_p16(a) } + } + #[doc = "See [`arch::vreinterpret_s8_p16`]."] + #[inline(always)] + pub fn vreinterpret_s8_p16(self, a: poly16x4_t) -> int8x8_t { + unsafe { vreinterpret_s8_p16(a) } + } + #[doc = "See [`arch::vreinterpret_s16_p16`]."] + #[inline(always)] + pub fn vreinterpret_s16_p16(self, a: poly16x4_t) -> int16x4_t { + unsafe { vreinterpret_s16_p16(a) } + } + #[doc = "See [`arch::vreinterpret_s32_p16`]."] + #[inline(always)] + pub fn vreinterpret_s32_p16(self, a: poly16x4_t) -> int32x2_t { + unsafe { vreinterpret_s32_p16(a) } + } + #[doc = "See [`arch::vreinterpret_s64_p16`]."] + #[inline(always)] + pub fn vreinterpret_s64_p16(self, a: poly16x4_t) -> int64x1_t { + unsafe { vreinterpret_s64_p16(a) } + } + #[doc = "See [`arch::vreinterpret_u8_p16`]."] + #[inline(always)] + pub fn vreinterpret_u8_p16(self, a: poly16x4_t) -> uint8x8_t { + unsafe { vreinterpret_u8_p16(a) } + } + #[doc = "See [`arch::vreinterpret_u16_p16`]."] + #[inline(always)] + pub fn vreinterpret_u16_p16(self, a: poly16x4_t) -> uint16x4_t { + unsafe { vreinterpret_u16_p16(a) } + } + #[doc = "See [`arch::vreinterpret_u32_p16`]."] + #[inline(always)] + pub fn vreinterpret_u32_p16(self, a: poly16x4_t) -> uint32x2_t { + unsafe { vreinterpret_u32_p16(a) } + } + #[doc = "See [`arch::vreinterpret_u64_p16`]."] + #[inline(always)] + pub fn vreinterpret_u64_p16(self, a: poly16x4_t) -> uint64x1_t { + unsafe { vreinterpret_u64_p16(a) } + } + #[doc = "See [`arch::vreinterpret_p8_p16`]."] + #[inline(always)] + pub fn vreinterpret_p8_p16(self, a: poly16x4_t) -> poly8x8_t { + unsafe { vreinterpret_p8_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_f32_p16`]."] + #[inline(always)] + pub fn vreinterpretq_f32_p16(self, a: poly16x8_t) -> float32x4_t { + unsafe { vreinterpretq_f32_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_p16`]."] + #[inline(always)] + pub fn vreinterpretq_s8_p16(self, a: poly16x8_t) -> int8x16_t { + unsafe { vreinterpretq_s8_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_p16`]."] + #[inline(always)] + pub fn vreinterpretq_s16_p16(self, a: poly16x8_t) -> int16x8_t { + unsafe { vreinterpretq_s16_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_p16`]."] + #[inline(always)] + pub fn vreinterpretq_s32_p16(self, a: poly16x8_t) -> int32x4_t { + unsafe { vreinterpretq_s32_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_p16`]."] + #[inline(always)] + pub fn vreinterpretq_s64_p16(self, a: poly16x8_t) -> int64x2_t { + unsafe { vreinterpretq_s64_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_p16`]."] + #[inline(always)] + pub fn vreinterpretq_u8_p16(self, a: poly16x8_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_p16`]."] + #[inline(always)] + pub fn vreinterpretq_u16_p16(self, a: poly16x8_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_p16`]."] + #[inline(always)] + pub fn vreinterpretq_u32_p16(self, a: poly16x8_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_p16`]."] + #[inline(always)] + pub fn vreinterpretq_u64_p16(self, a: poly16x8_t) -> uint64x2_t { + unsafe { vreinterpretq_u64_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_p16`]."] + #[inline(always)] + pub fn vreinterpretq_p8_p16(self, a: poly16x8_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_p128`]."] + #[inline(always)] + pub fn vreinterpretq_s8_p128(self, a: p128) -> int8x16_t { + unsafe { vreinterpretq_s8_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_p128`]."] + #[inline(always)] + pub fn vreinterpretq_s16_p128(self, a: p128) -> int16x8_t { + unsafe { vreinterpretq_s16_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_p128`]."] + #[inline(always)] + pub fn vreinterpretq_s32_p128(self, a: p128) -> int32x4_t { + unsafe { vreinterpretq_s32_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_s64_p128`]."] + #[inline(always)] + pub fn vreinterpretq_s64_p128(self, a: p128) -> int64x2_t { + unsafe { vreinterpretq_s64_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_p128`]."] + #[inline(always)] + pub fn vreinterpretq_u8_p128(self, a: p128) -> uint8x16_t { + unsafe { vreinterpretq_u8_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_p128`]."] + #[inline(always)] + pub fn vreinterpretq_u16_p128(self, a: p128) -> uint16x8_t { + unsafe { vreinterpretq_u16_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_p128`]."] + #[inline(always)] + pub fn vreinterpretq_u32_p128(self, a: p128) -> uint32x4_t { + unsafe { vreinterpretq_u32_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_u64_p128`]."] + #[inline(always)] + pub fn vreinterpretq_u64_p128(self, a: p128) -> uint64x2_t { + unsafe { vreinterpretq_u64_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_p128`]."] + #[inline(always)] + pub fn vreinterpretq_p8_p128(self, a: p128) -> poly8x16_t { + unsafe { vreinterpretq_p8_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_p128`]."] + #[inline(always)] + pub fn vreinterpretq_p16_p128(self, a: p128) -> poly16x8_t { + unsafe { vreinterpretq_p16_p128(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_p128`]."] + #[inline(always)] + pub fn vreinterpretq_p64_p128(self, a: p128) -> poly64x2_t { + unsafe { vreinterpretq_p64_p128(a) } + } + #[doc = "See [`arch::vreinterpret_p64_s8`]."] + #[inline(always)] + pub fn vreinterpret_p64_s8(self, a: int8x8_t) -> poly64x1_t { + unsafe { vreinterpret_p64_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_s8`]."] + #[inline(always)] + pub fn vreinterpretq_p128_s8(self, a: int8x16_t) -> p128 { + unsafe { vreinterpretq_p128_s8(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_s8`]."] + #[inline(always)] + pub fn vreinterpretq_p64_s8(self, a: int8x16_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_s8(a) } + } + #[doc = "See [`arch::vreinterpret_p64_s16`]."] + #[inline(always)] + pub fn vreinterpret_p64_s16(self, a: int16x4_t) -> poly64x1_t { + unsafe { vreinterpret_p64_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_s16`]."] + #[inline(always)] + pub fn vreinterpretq_p128_s16(self, a: int16x8_t) -> p128 { + unsafe { vreinterpretq_p128_s16(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_s16`]."] + #[inline(always)] + pub fn vreinterpretq_p64_s16(self, a: int16x8_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_s16(a) } + } + #[doc = "See [`arch::vreinterpret_p64_s32`]."] + #[inline(always)] + pub fn vreinterpret_p64_s32(self, a: int32x2_t) -> poly64x1_t { + unsafe { vreinterpret_p64_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_s32`]."] + #[inline(always)] + pub fn vreinterpretq_p128_s32(self, a: int32x4_t) -> p128 { + unsafe { vreinterpretq_p128_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_s32`]."] + #[inline(always)] + pub fn vreinterpretq_p64_s32(self, a: int32x4_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_s32(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_s64`]."] + #[inline(always)] + pub fn vreinterpretq_p128_s64(self, a: int64x2_t) -> p128 { + unsafe { vreinterpretq_p128_s64(a) } + } + #[doc = "See [`arch::vreinterpret_p64_u8`]."] + #[inline(always)] + pub fn vreinterpret_p64_u8(self, a: uint8x8_t) -> poly64x1_t { + unsafe { vreinterpret_p64_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_u8`]."] + #[inline(always)] + pub fn vreinterpretq_p128_u8(self, a: uint8x16_t) -> p128 { + unsafe { vreinterpretq_p128_u8(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_u8`]."] + #[inline(always)] + pub fn vreinterpretq_p64_u8(self, a: uint8x16_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_u8(a) } + } + #[doc = "See [`arch::vreinterpret_p64_u16`]."] + #[inline(always)] + pub fn vreinterpret_p64_u16(self, a: uint16x4_t) -> poly64x1_t { + unsafe { vreinterpret_p64_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_u16`]."] + #[inline(always)] + pub fn vreinterpretq_p128_u16(self, a: uint16x8_t) -> p128 { + unsafe { vreinterpretq_p128_u16(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_u16`]."] + #[inline(always)] + pub fn vreinterpretq_p64_u16(self, a: uint16x8_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_u16(a) } + } + #[doc = "See [`arch::vreinterpret_p64_u32`]."] + #[inline(always)] + pub fn vreinterpret_p64_u32(self, a: uint32x2_t) -> poly64x1_t { + unsafe { vreinterpret_p64_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_u32`]."] + #[inline(always)] + pub fn vreinterpretq_p128_u32(self, a: uint32x4_t) -> p128 { + unsafe { vreinterpretq_p128_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_u32`]."] + #[inline(always)] + pub fn vreinterpretq_p64_u32(self, a: uint32x4_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_u32(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_u64`]."] + #[inline(always)] + pub fn vreinterpretq_p128_u64(self, a: uint64x2_t) -> p128 { + unsafe { vreinterpretq_p128_u64(a) } + } + #[doc = "See [`arch::vreinterpret_p64_p8`]."] + #[inline(always)] + pub fn vreinterpret_p64_p8(self, a: poly8x8_t) -> poly64x1_t { + unsafe { vreinterpret_p64_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_p8`]."] + #[inline(always)] + pub fn vreinterpretq_p128_p8(self, a: poly8x16_t) -> p128 { + unsafe { vreinterpretq_p128_p8(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_p8`]."] + #[inline(always)] + pub fn vreinterpretq_p64_p8(self, a: poly8x16_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_p8(a) } + } + #[doc = "See [`arch::vreinterpret_p64_p16`]."] + #[inline(always)] + pub fn vreinterpret_p64_p16(self, a: poly16x4_t) -> poly64x1_t { + unsafe { vreinterpret_p64_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_p16`]."] + #[inline(always)] + pub fn vreinterpretq_p128_p16(self, a: poly16x8_t) -> p128 { + unsafe { vreinterpretq_p128_p16(a) } + } + #[doc = "See [`arch::vreinterpretq_p64_p16`]."] + #[inline(always)] + pub fn vreinterpretq_p64_p16(self, a: poly16x8_t) -> poly64x2_t { + unsafe { vreinterpretq_p64_p16(a) } + } + #[doc = "See [`arch::vreinterpret_s8_p64`]."] + #[inline(always)] + pub fn vreinterpret_s8_p64(self, a: poly64x1_t) -> int8x8_t { + unsafe { vreinterpret_s8_p64(a) } + } + #[doc = "See [`arch::vreinterpret_s16_p64`]."] + #[inline(always)] + pub fn vreinterpret_s16_p64(self, a: poly64x1_t) -> int16x4_t { + unsafe { vreinterpret_s16_p64(a) } + } + #[doc = "See [`arch::vreinterpret_s32_p64`]."] + #[inline(always)] + pub fn vreinterpret_s32_p64(self, a: poly64x1_t) -> int32x2_t { + unsafe { vreinterpret_s32_p64(a) } + } + #[doc = "See [`arch::vreinterpret_u8_p64`]."] + #[inline(always)] + pub fn vreinterpret_u8_p64(self, a: poly64x1_t) -> uint8x8_t { + unsafe { vreinterpret_u8_p64(a) } + } + #[doc = "See [`arch::vreinterpret_u16_p64`]."] + #[inline(always)] + pub fn vreinterpret_u16_p64(self, a: poly64x1_t) -> uint16x4_t { + unsafe { vreinterpret_u16_p64(a) } + } + #[doc = "See [`arch::vreinterpret_u32_p64`]."] + #[inline(always)] + pub fn vreinterpret_u32_p64(self, a: poly64x1_t) -> uint32x2_t { + unsafe { vreinterpret_u32_p64(a) } + } + #[doc = "See [`arch::vreinterpret_p8_p64`]."] + #[inline(always)] + pub fn vreinterpret_p8_p64(self, a: poly64x1_t) -> poly8x8_t { + unsafe { vreinterpret_p8_p64(a) } + } + #[doc = "See [`arch::vreinterpret_p16_p64`]."] + #[inline(always)] + pub fn vreinterpret_p16_p64(self, a: poly64x1_t) -> poly16x4_t { + unsafe { vreinterpret_p16_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_p128_p64`]."] + #[inline(always)] + pub fn vreinterpretq_p128_p64(self, a: poly64x2_t) -> p128 { + unsafe { vreinterpretq_p128_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_s8_p64`]."] + #[inline(always)] + pub fn vreinterpretq_s8_p64(self, a: poly64x2_t) -> int8x16_t { + unsafe { vreinterpretq_s8_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_s16_p64`]."] + #[inline(always)] + pub fn vreinterpretq_s16_p64(self, a: poly64x2_t) -> int16x8_t { + unsafe { vreinterpretq_s16_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_s32_p64`]."] + #[inline(always)] + pub fn vreinterpretq_s32_p64(self, a: poly64x2_t) -> int32x4_t { + unsafe { vreinterpretq_s32_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_u8_p64`]."] + #[inline(always)] + pub fn vreinterpretq_u8_p64(self, a: poly64x2_t) -> uint8x16_t { + unsafe { vreinterpretq_u8_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_u16_p64`]."] + #[inline(always)] + pub fn vreinterpretq_u16_p64(self, a: poly64x2_t) -> uint16x8_t { + unsafe { vreinterpretq_u16_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_u32_p64`]."] + #[inline(always)] + pub fn vreinterpretq_u32_p64(self, a: poly64x2_t) -> uint32x4_t { + unsafe { vreinterpretq_u32_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_p8_p64`]."] + #[inline(always)] + pub fn vreinterpretq_p8_p64(self, a: poly64x2_t) -> poly8x16_t { + unsafe { vreinterpretq_p8_p64(a) } + } + #[doc = "See [`arch::vreinterpretq_p16_p64`]."] + #[inline(always)] + pub fn vreinterpretq_p16_p64(self, a: poly64x2_t) -> poly16x8_t { + unsafe { vreinterpretq_p16_p64(a) } + } + #[doc = "See [`arch::vrev16_p8`]."] + #[inline(always)] + pub fn vrev16_p8(self, a: poly8x8_t) -> poly8x8_t { + unsafe { vrev16_p8(a) } + } + #[doc = "See [`arch::vrev16_s8`]."] + #[inline(always)] + pub fn vrev16_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vrev16_s8(a) } + } + #[doc = "See [`arch::vrev16_u8`]."] + #[inline(always)] + pub fn vrev16_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vrev16_u8(a) } + } + #[doc = "See [`arch::vrev16q_p8`]."] + #[inline(always)] + pub fn vrev16q_p8(self, a: poly8x16_t) -> poly8x16_t { + unsafe { vrev16q_p8(a) } + } + #[doc = "See [`arch::vrev16q_s8`]."] + #[inline(always)] + pub fn vrev16q_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vrev16q_s8(a) } + } + #[doc = "See [`arch::vrev16q_u8`]."] + #[inline(always)] + pub fn vrev16q_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vrev16q_u8(a) } + } + #[doc = "See [`arch::vrev32_p16`]."] + #[inline(always)] + pub fn vrev32_p16(self, a: poly16x4_t) -> poly16x4_t { + unsafe { vrev32_p16(a) } + } + #[doc = "See [`arch::vrev32_p8`]."] + #[inline(always)] + pub fn vrev32_p8(self, a: poly8x8_t) -> poly8x8_t { + unsafe { vrev32_p8(a) } + } + #[doc = "See [`arch::vrev32_s16`]."] + #[inline(always)] + pub fn vrev32_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vrev32_s16(a) } + } + #[doc = "See [`arch::vrev32_s8`]."] + #[inline(always)] + pub fn vrev32_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vrev32_s8(a) } + } + #[doc = "See [`arch::vrev32_u16`]."] + #[inline(always)] + pub fn vrev32_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vrev32_u16(a) } + } + #[doc = "See [`arch::vrev32_u8`]."] + #[inline(always)] + pub fn vrev32_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vrev32_u8(a) } + } + #[doc = "See [`arch::vrev32q_p16`]."] + #[inline(always)] + pub fn vrev32q_p16(self, a: poly16x8_t) -> poly16x8_t { + unsafe { vrev32q_p16(a) } + } + #[doc = "See [`arch::vrev32q_p8`]."] + #[inline(always)] + pub fn vrev32q_p8(self, a: poly8x16_t) -> poly8x16_t { + unsafe { vrev32q_p8(a) } + } + #[doc = "See [`arch::vrev32q_s16`]."] + #[inline(always)] + pub fn vrev32q_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vrev32q_s16(a) } + } + #[doc = "See [`arch::vrev32q_s8`]."] + #[inline(always)] + pub fn vrev32q_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vrev32q_s8(a) } + } + #[doc = "See [`arch::vrev32q_u16`]."] + #[inline(always)] + pub fn vrev32q_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vrev32q_u16(a) } + } + #[doc = "See [`arch::vrev32q_u8`]."] + #[inline(always)] + pub fn vrev32q_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vrev32q_u8(a) } + } + #[doc = "See [`arch::vrev64_f32`]."] + #[inline(always)] + pub fn vrev64_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrev64_f32(a) } + } + #[doc = "See [`arch::vrev64_p16`]."] + #[inline(always)] + pub fn vrev64_p16(self, a: poly16x4_t) -> poly16x4_t { + unsafe { vrev64_p16(a) } + } + #[doc = "See [`arch::vrev64_p8`]."] + #[inline(always)] + pub fn vrev64_p8(self, a: poly8x8_t) -> poly8x8_t { + unsafe { vrev64_p8(a) } + } + #[doc = "See [`arch::vrev64_s16`]."] + #[inline(always)] + pub fn vrev64_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vrev64_s16(a) } + } + #[doc = "See [`arch::vrev64_s32`]."] + #[inline(always)] + pub fn vrev64_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vrev64_s32(a) } + } + #[doc = "See [`arch::vrev64_s8`]."] + #[inline(always)] + pub fn vrev64_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vrev64_s8(a) } + } + #[doc = "See [`arch::vrev64_u16`]."] + #[inline(always)] + pub fn vrev64_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vrev64_u16(a) } + } + #[doc = "See [`arch::vrev64_u32`]."] + #[inline(always)] + pub fn vrev64_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vrev64_u32(a) } + } + #[doc = "See [`arch::vrev64_u8`]."] + #[inline(always)] + pub fn vrev64_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vrev64_u8(a) } + } + #[doc = "See [`arch::vrev64q_f32`]."] + #[inline(always)] + pub fn vrev64q_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrev64q_f32(a) } + } + #[doc = "See [`arch::vrev64q_p16`]."] + #[inline(always)] + pub fn vrev64q_p16(self, a: poly16x8_t) -> poly16x8_t { + unsafe { vrev64q_p16(a) } + } + #[doc = "See [`arch::vrev64q_p8`]."] + #[inline(always)] + pub fn vrev64q_p8(self, a: poly8x16_t) -> poly8x16_t { + unsafe { vrev64q_p8(a) } + } + #[doc = "See [`arch::vrev64q_s16`]."] + #[inline(always)] + pub fn vrev64q_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vrev64q_s16(a) } + } + #[doc = "See [`arch::vrev64q_s32`]."] + #[inline(always)] + pub fn vrev64q_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vrev64q_s32(a) } + } + #[doc = "See [`arch::vrev64q_s8`]."] + #[inline(always)] + pub fn vrev64q_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vrev64q_s8(a) } + } + #[doc = "See [`arch::vrev64q_u16`]."] + #[inline(always)] + pub fn vrev64q_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vrev64q_u16(a) } + } + #[doc = "See [`arch::vrev64q_u32`]."] + #[inline(always)] + pub fn vrev64q_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vrev64q_u32(a) } + } + #[doc = "See [`arch::vrev64q_u8`]."] + #[inline(always)] + pub fn vrev64q_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vrev64q_u8(a) } + } + #[doc = "See [`arch::vrhadd_s8`]."] + #[inline(always)] + pub fn vrhadd_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vrhadd_s8(a, b) } + } + #[doc = "See [`arch::vrhaddq_s8`]."] + #[inline(always)] + pub fn vrhaddq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vrhaddq_s8(a, b) } + } + #[doc = "See [`arch::vrhadd_s16`]."] + #[inline(always)] + pub fn vrhadd_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vrhadd_s16(a, b) } + } + #[doc = "See [`arch::vrhaddq_s16`]."] + #[inline(always)] + pub fn vrhaddq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vrhaddq_s16(a, b) } + } + #[doc = "See [`arch::vrhadd_s32`]."] + #[inline(always)] + pub fn vrhadd_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vrhadd_s32(a, b) } + } + #[doc = "See [`arch::vrhaddq_s32`]."] + #[inline(always)] + pub fn vrhaddq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vrhaddq_s32(a, b) } + } + #[doc = "See [`arch::vrhadd_u8`]."] + #[inline(always)] + pub fn vrhadd_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vrhadd_u8(a, b) } + } + #[doc = "See [`arch::vrhaddq_u8`]."] + #[inline(always)] + pub fn vrhaddq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vrhaddq_u8(a, b) } + } + #[doc = "See [`arch::vrhadd_u16`]."] + #[inline(always)] + pub fn vrhadd_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vrhadd_u16(a, b) } + } + #[doc = "See [`arch::vrhaddq_u16`]."] + #[inline(always)] + pub fn vrhaddq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vrhaddq_u16(a, b) } + } + #[doc = "See [`arch::vrhadd_u32`]."] + #[inline(always)] + pub fn vrhadd_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vrhadd_u32(a, b) } + } + #[doc = "See [`arch::vrhaddq_u32`]."] + #[inline(always)] + pub fn vrhaddq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vrhaddq_u32(a, b) } + } + #[doc = "See [`arch::vrndn_f32`]."] + #[inline(always)] + pub fn vrndn_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrndn_f32(a) } + } + #[doc = "See [`arch::vrndnq_f32`]."] + #[inline(always)] + pub fn vrndnq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrndnq_f32(a) } + } + #[doc = "See [`arch::vrshl_s8`]."] + #[inline(always)] + pub fn vrshl_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vrshl_s8(a, b) } + } + #[doc = "See [`arch::vrshlq_s8`]."] + #[inline(always)] + pub fn vrshlq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vrshlq_s8(a, b) } + } + #[doc = "See [`arch::vrshl_s16`]."] + #[inline(always)] + pub fn vrshl_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vrshl_s16(a, b) } + } + #[doc = "See [`arch::vrshlq_s16`]."] + #[inline(always)] + pub fn vrshlq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vrshlq_s16(a, b) } + } + #[doc = "See [`arch::vrshl_s32`]."] + #[inline(always)] + pub fn vrshl_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vrshl_s32(a, b) } + } + #[doc = "See [`arch::vrshlq_s32`]."] + #[inline(always)] + pub fn vrshlq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vrshlq_s32(a, b) } + } + #[doc = "See [`arch::vrshl_s64`]."] + #[inline(always)] + pub fn vrshl_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vrshl_s64(a, b) } + } + #[doc = "See [`arch::vrshlq_s64`]."] + #[inline(always)] + pub fn vrshlq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vrshlq_s64(a, b) } + } + #[doc = "See [`arch::vrshl_u8`]."] + #[inline(always)] + pub fn vrshl_u8(self, a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vrshl_u8(a, b) } + } + #[doc = "See [`arch::vrshlq_u8`]."] + #[inline(always)] + pub fn vrshlq_u8(self, a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vrshlq_u8(a, b) } + } + #[doc = "See [`arch::vrshl_u16`]."] + #[inline(always)] + pub fn vrshl_u16(self, a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vrshl_u16(a, b) } + } + #[doc = "See [`arch::vrshlq_u16`]."] + #[inline(always)] + pub fn vrshlq_u16(self, a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vrshlq_u16(a, b) } + } + #[doc = "See [`arch::vrshl_u32`]."] + #[inline(always)] + pub fn vrshl_u32(self, a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vrshl_u32(a, b) } + } + #[doc = "See [`arch::vrshlq_u32`]."] + #[inline(always)] + pub fn vrshlq_u32(self, a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vrshlq_u32(a, b) } + } + #[doc = "See [`arch::vrshl_u64`]."] + #[inline(always)] + pub fn vrshl_u64(self, a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vrshl_u64(a, b) } + } + #[doc = "See [`arch::vrshlq_u64`]."] + #[inline(always)] + pub fn vrshlq_u64(self, a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vrshlq_u64(a, b) } + } + #[doc = "See [`arch::vrshr_n_s8`]."] + #[inline(always)] + pub fn vrshr_n_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vrshr_n_s8::(a) } + } + #[doc = "See [`arch::vrshrq_n_s8`]."] + #[inline(always)] + pub fn vrshrq_n_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vrshrq_n_s8::(a) } + } + #[doc = "See [`arch::vrshr_n_s16`]."] + #[inline(always)] + pub fn vrshr_n_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vrshr_n_s16::(a) } + } + #[doc = "See [`arch::vrshrq_n_s16`]."] + #[inline(always)] + pub fn vrshrq_n_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vrshrq_n_s16::(a) } + } + #[doc = "See [`arch::vrshr_n_s32`]."] + #[inline(always)] + pub fn vrshr_n_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vrshr_n_s32::(a) } + } + #[doc = "See [`arch::vrshrq_n_s32`]."] + #[inline(always)] + pub fn vrshrq_n_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vrshrq_n_s32::(a) } + } + #[doc = "See [`arch::vrshr_n_s64`]."] + #[inline(always)] + pub fn vrshr_n_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vrshr_n_s64::(a) } + } + #[doc = "See [`arch::vrshrq_n_s64`]."] + #[inline(always)] + pub fn vrshrq_n_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vrshrq_n_s64::(a) } + } + #[doc = "See [`arch::vrshr_n_u8`]."] + #[inline(always)] + pub fn vrshr_n_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vrshr_n_u8::(a) } + } + #[doc = "See [`arch::vrshrq_n_u8`]."] + #[inline(always)] + pub fn vrshrq_n_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vrshrq_n_u8::(a) } + } + #[doc = "See [`arch::vrshr_n_u16`]."] + #[inline(always)] + pub fn vrshr_n_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vrshr_n_u16::(a) } + } + #[doc = "See [`arch::vrshrq_n_u16`]."] + #[inline(always)] + pub fn vrshrq_n_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vrshrq_n_u16::(a) } + } + #[doc = "See [`arch::vrshr_n_u32`]."] + #[inline(always)] + pub fn vrshr_n_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vrshr_n_u32::(a) } + } + #[doc = "See [`arch::vrshrq_n_u32`]."] + #[inline(always)] + pub fn vrshrq_n_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vrshrq_n_u32::(a) } + } + #[doc = "See [`arch::vrshr_n_u64`]."] + #[inline(always)] + pub fn vrshr_n_u64(self, a: uint64x1_t) -> uint64x1_t { + unsafe { vrshr_n_u64::(a) } + } + #[doc = "See [`arch::vrshrq_n_u64`]."] + #[inline(always)] + pub fn vrshrq_n_u64(self, a: uint64x2_t) -> uint64x2_t { + unsafe { vrshrq_n_u64::(a) } + } + #[doc = "See [`arch::vrshrn_n_u16`]."] + #[inline(always)] + pub fn vrshrn_n_u16(self, a: uint16x8_t) -> uint8x8_t { + unsafe { vrshrn_n_u16::(a) } + } + #[doc = "See [`arch::vrshrn_n_u32`]."] + #[inline(always)] + pub fn vrshrn_n_u32(self, a: uint32x4_t) -> uint16x4_t { + unsafe { vrshrn_n_u32::(a) } + } + #[doc = "See [`arch::vrshrn_n_u64`]."] + #[inline(always)] + pub fn vrshrn_n_u64(self, a: uint64x2_t) -> uint32x2_t { + unsafe { vrshrn_n_u64::(a) } + } + #[doc = "See [`arch::vrsqrte_f32`]."] + #[inline(always)] + pub fn vrsqrte_f32(self, a: float32x2_t) -> float32x2_t { + unsafe { vrsqrte_f32(a) } + } + #[doc = "See [`arch::vrsqrteq_f32`]."] + #[inline(always)] + pub fn vrsqrteq_f32(self, a: float32x4_t) -> float32x4_t { + unsafe { vrsqrteq_f32(a) } + } + #[doc = "See [`arch::vrsqrte_u32`]."] + #[inline(always)] + pub fn vrsqrte_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vrsqrte_u32(a) } + } + #[doc = "See [`arch::vrsqrteq_u32`]."] + #[inline(always)] + pub fn vrsqrteq_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vrsqrteq_u32(a) } + } + #[doc = "See [`arch::vrsqrts_f32`]."] + #[inline(always)] + pub fn vrsqrts_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vrsqrts_f32(a, b) } + } + #[doc = "See [`arch::vrsqrtsq_f32`]."] + #[inline(always)] + pub fn vrsqrtsq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vrsqrtsq_f32(a, b) } + } + #[doc = "See [`arch::vrsra_n_s8`]."] + #[inline(always)] + pub fn vrsra_n_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vrsra_n_s8::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_s8`]."] + #[inline(always)] + pub fn vrsraq_n_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vrsraq_n_s8::(a, b) } + } + #[doc = "See [`arch::vrsra_n_s16`]."] + #[inline(always)] + pub fn vrsra_n_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vrsra_n_s16::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_s16`]."] + #[inline(always)] + pub fn vrsraq_n_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vrsraq_n_s16::(a, b) } + } + #[doc = "See [`arch::vrsra_n_s32`]."] + #[inline(always)] + pub fn vrsra_n_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vrsra_n_s32::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_s32`]."] + #[inline(always)] + pub fn vrsraq_n_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vrsraq_n_s32::(a, b) } + } + #[doc = "See [`arch::vrsra_n_s64`]."] + #[inline(always)] + pub fn vrsra_n_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vrsra_n_s64::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_s64`]."] + #[inline(always)] + pub fn vrsraq_n_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vrsraq_n_s64::(a, b) } + } + #[doc = "See [`arch::vrsra_n_u8`]."] + #[inline(always)] + pub fn vrsra_n_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vrsra_n_u8::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_u8`]."] + #[inline(always)] + pub fn vrsraq_n_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vrsraq_n_u8::(a, b) } + } + #[doc = "See [`arch::vrsra_n_u16`]."] + #[inline(always)] + pub fn vrsra_n_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vrsra_n_u16::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_u16`]."] + #[inline(always)] + pub fn vrsraq_n_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vrsraq_n_u16::(a, b) } + } + #[doc = "See [`arch::vrsra_n_u32`]."] + #[inline(always)] + pub fn vrsra_n_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vrsra_n_u32::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_u32`]."] + #[inline(always)] + pub fn vrsraq_n_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vrsraq_n_u32::(a, b) } + } + #[doc = "See [`arch::vrsra_n_u64`]."] + #[inline(always)] + pub fn vrsra_n_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vrsra_n_u64::(a, b) } + } + #[doc = "See [`arch::vrsraq_n_u64`]."] + #[inline(always)] + pub fn vrsraq_n_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vrsraq_n_u64::(a, b) } + } + #[doc = "See [`arch::vrsubhn_s16`]."] + #[inline(always)] + pub fn vrsubhn_s16(self, a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe { vrsubhn_s16(a, b) } + } + #[doc = "See [`arch::vrsubhn_s32`]."] + #[inline(always)] + pub fn vrsubhn_s32(self, a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe { vrsubhn_s32(a, b) } + } + #[doc = "See [`arch::vrsubhn_s64`]."] + #[inline(always)] + pub fn vrsubhn_s64(self, a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe { vrsubhn_s64(a, b) } + } + #[doc = "See [`arch::vrsubhn_u16`]."] + #[inline(always)] + pub fn vrsubhn_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { vrsubhn_u16(a, b) } + } + #[doc = "See [`arch::vrsubhn_u32`]."] + #[inline(always)] + pub fn vrsubhn_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { vrsubhn_u32(a, b) } + } + #[doc = "See [`arch::vrsubhn_u64`]."] + #[inline(always)] + pub fn vrsubhn_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { vrsubhn_u64(a, b) } + } + #[doc = "See [`arch::vset_lane_f32`]."] + #[inline(always)] + pub fn vset_lane_f32(self, a: f32, b: float32x2_t) -> float32x2_t { + unsafe { vset_lane_f32::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_f32`]."] + #[inline(always)] + pub fn vsetq_lane_f32(self, a: f32, b: float32x4_t) -> float32x4_t { + unsafe { vsetq_lane_f32::(a, b) } + } + #[doc = "See [`arch::vset_lane_s8`]."] + #[inline(always)] + pub fn vset_lane_s8(self, a: i8, b: int8x8_t) -> int8x8_t { + unsafe { vset_lane_s8::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_s8`]."] + #[inline(always)] + pub fn vsetq_lane_s8(self, a: i8, b: int8x16_t) -> int8x16_t { + unsafe { vsetq_lane_s8::(a, b) } + } + #[doc = "See [`arch::vset_lane_s16`]."] + #[inline(always)] + pub fn vset_lane_s16(self, a: i16, b: int16x4_t) -> int16x4_t { + unsafe { vset_lane_s16::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_s16`]."] + #[inline(always)] + pub fn vsetq_lane_s16(self, a: i16, b: int16x8_t) -> int16x8_t { + unsafe { vsetq_lane_s16::(a, b) } + } + #[doc = "See [`arch::vset_lane_s32`]."] + #[inline(always)] + pub fn vset_lane_s32(self, a: i32, b: int32x2_t) -> int32x2_t { + unsafe { vset_lane_s32::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_s32`]."] + #[inline(always)] + pub fn vsetq_lane_s32(self, a: i32, b: int32x4_t) -> int32x4_t { + unsafe { vsetq_lane_s32::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_s64`]."] + #[inline(always)] + pub fn vsetq_lane_s64(self, a: i64, b: int64x2_t) -> int64x2_t { + unsafe { vsetq_lane_s64::(a, b) } + } + #[doc = "See [`arch::vset_lane_u8`]."] + #[inline(always)] + pub fn vset_lane_u8(self, a: u8, b: uint8x8_t) -> uint8x8_t { + unsafe { vset_lane_u8::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_u8`]."] + #[inline(always)] + pub fn vsetq_lane_u8(self, a: u8, b: uint8x16_t) -> uint8x16_t { + unsafe { vsetq_lane_u8::(a, b) } + } + #[doc = "See [`arch::vset_lane_u16`]."] + #[inline(always)] + pub fn vset_lane_u16(self, a: u16, b: uint16x4_t) -> uint16x4_t { + unsafe { vset_lane_u16::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_u16`]."] + #[inline(always)] + pub fn vsetq_lane_u16(self, a: u16, b: uint16x8_t) -> uint16x8_t { + unsafe { vsetq_lane_u16::(a, b) } + } + #[doc = "See [`arch::vset_lane_u32`]."] + #[inline(always)] + pub fn vset_lane_u32(self, a: u32, b: uint32x2_t) -> uint32x2_t { + unsafe { vset_lane_u32::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_u32`]."] + #[inline(always)] + pub fn vsetq_lane_u32(self, a: u32, b: uint32x4_t) -> uint32x4_t { + unsafe { vsetq_lane_u32::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_u64`]."] + #[inline(always)] + pub fn vsetq_lane_u64(self, a: u64, b: uint64x2_t) -> uint64x2_t { + unsafe { vsetq_lane_u64::(a, b) } + } + #[doc = "See [`arch::vset_lane_p8`]."] + #[inline(always)] + pub fn vset_lane_p8(self, a: p8, b: poly8x8_t) -> poly8x8_t { + unsafe { vset_lane_p8::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_p8`]."] + #[inline(always)] + pub fn vsetq_lane_p8(self, a: p8, b: poly8x16_t) -> poly8x16_t { + unsafe { vsetq_lane_p8::(a, b) } + } + #[doc = "See [`arch::vset_lane_p16`]."] + #[inline(always)] + pub fn vset_lane_p16(self, a: p16, b: poly16x4_t) -> poly16x4_t { + unsafe { vset_lane_p16::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_p16`]."] + #[inline(always)] + pub fn vsetq_lane_p16(self, a: p16, b: poly16x8_t) -> poly16x8_t { + unsafe { vsetq_lane_p16::(a, b) } + } + #[doc = "See [`arch::vset_lane_p64`]."] + #[inline(always)] + pub fn vset_lane_p64(self, a: p64, b: poly64x1_t) -> poly64x1_t { + unsafe { vset_lane_p64::(a, b) } + } + #[doc = "See [`arch::vset_lane_s64`]."] + #[inline(always)] + pub fn vset_lane_s64(self, a: i64, b: int64x1_t) -> int64x1_t { + unsafe { vset_lane_s64::(a, b) } + } + #[doc = "See [`arch::vset_lane_u64`]."] + #[inline(always)] + pub fn vset_lane_u64(self, a: u64, b: uint64x1_t) -> uint64x1_t { + unsafe { vset_lane_u64::(a, b) } + } + #[doc = "See [`arch::vsetq_lane_p64`]."] + #[inline(always)] + pub fn vsetq_lane_p64(self, a: p64, b: poly64x2_t) -> poly64x2_t { + unsafe { vsetq_lane_p64::(a, b) } + } + #[doc = "See [`arch::vshl_n_s8`]."] + #[inline(always)] + pub fn vshl_n_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vshl_n_s8::(a) } + } + #[doc = "See [`arch::vshlq_n_s8`]."] + #[inline(always)] + pub fn vshlq_n_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vshlq_n_s8::(a) } + } + #[doc = "See [`arch::vshl_n_s16`]."] + #[inline(always)] + pub fn vshl_n_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vshl_n_s16::(a) } + } + #[doc = "See [`arch::vshlq_n_s16`]."] + #[inline(always)] + pub fn vshlq_n_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vshlq_n_s16::(a) } + } + #[doc = "See [`arch::vshl_n_s32`]."] + #[inline(always)] + pub fn vshl_n_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vshl_n_s32::(a) } + } + #[doc = "See [`arch::vshlq_n_s32`]."] + #[inline(always)] + pub fn vshlq_n_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vshlq_n_s32::(a) } + } + #[doc = "See [`arch::vshl_n_s64`]."] + #[inline(always)] + pub fn vshl_n_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vshl_n_s64::(a) } + } + #[doc = "See [`arch::vshlq_n_s64`]."] + #[inline(always)] + pub fn vshlq_n_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vshlq_n_s64::(a) } + } + #[doc = "See [`arch::vshl_n_u8`]."] + #[inline(always)] + pub fn vshl_n_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vshl_n_u8::(a) } + } + #[doc = "See [`arch::vshlq_n_u8`]."] + #[inline(always)] + pub fn vshlq_n_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vshlq_n_u8::(a) } + } + #[doc = "See [`arch::vshl_n_u16`]."] + #[inline(always)] + pub fn vshl_n_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vshl_n_u16::(a) } + } + #[doc = "See [`arch::vshlq_n_u16`]."] + #[inline(always)] + pub fn vshlq_n_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vshlq_n_u16::(a) } + } + #[doc = "See [`arch::vshl_n_u32`]."] + #[inline(always)] + pub fn vshl_n_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vshl_n_u32::(a) } + } + #[doc = "See [`arch::vshlq_n_u32`]."] + #[inline(always)] + pub fn vshlq_n_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vshlq_n_u32::(a) } + } + #[doc = "See [`arch::vshl_n_u64`]."] + #[inline(always)] + pub fn vshl_n_u64(self, a: uint64x1_t) -> uint64x1_t { + unsafe { vshl_n_u64::(a) } + } + #[doc = "See [`arch::vshlq_n_u64`]."] + #[inline(always)] + pub fn vshlq_n_u64(self, a: uint64x2_t) -> uint64x2_t { + unsafe { vshlq_n_u64::(a) } + } + #[doc = "See [`arch::vshl_s8`]."] + #[inline(always)] + pub fn vshl_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vshl_s8(a, b) } + } + #[doc = "See [`arch::vshlq_s8`]."] + #[inline(always)] + pub fn vshlq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vshlq_s8(a, b) } + } + #[doc = "See [`arch::vshl_s16`]."] + #[inline(always)] + pub fn vshl_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vshl_s16(a, b) } + } + #[doc = "See [`arch::vshlq_s16`]."] + #[inline(always)] + pub fn vshlq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vshlq_s16(a, b) } + } + #[doc = "See [`arch::vshl_s32`]."] + #[inline(always)] + pub fn vshl_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vshl_s32(a, b) } + } + #[doc = "See [`arch::vshlq_s32`]."] + #[inline(always)] + pub fn vshlq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vshlq_s32(a, b) } + } + #[doc = "See [`arch::vshl_s64`]."] + #[inline(always)] + pub fn vshl_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vshl_s64(a, b) } + } + #[doc = "See [`arch::vshlq_s64`]."] + #[inline(always)] + pub fn vshlq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vshlq_s64(a, b) } + } + #[doc = "See [`arch::vshl_u8`]."] + #[inline(always)] + pub fn vshl_u8(self, a: uint8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vshl_u8(a, b) } + } + #[doc = "See [`arch::vshlq_u8`]."] + #[inline(always)] + pub fn vshlq_u8(self, a: uint8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vshlq_u8(a, b) } + } + #[doc = "See [`arch::vshl_u16`]."] + #[inline(always)] + pub fn vshl_u16(self, a: uint16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vshl_u16(a, b) } + } + #[doc = "See [`arch::vshlq_u16`]."] + #[inline(always)] + pub fn vshlq_u16(self, a: uint16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vshlq_u16(a, b) } + } + #[doc = "See [`arch::vshl_u32`]."] + #[inline(always)] + pub fn vshl_u32(self, a: uint32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vshl_u32(a, b) } + } + #[doc = "See [`arch::vshlq_u32`]."] + #[inline(always)] + pub fn vshlq_u32(self, a: uint32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vshlq_u32(a, b) } + } + #[doc = "See [`arch::vshl_u64`]."] + #[inline(always)] + pub fn vshl_u64(self, a: uint64x1_t, b: int64x1_t) -> uint64x1_t { + unsafe { vshl_u64(a, b) } + } + #[doc = "See [`arch::vshlq_u64`]."] + #[inline(always)] + pub fn vshlq_u64(self, a: uint64x2_t, b: int64x2_t) -> uint64x2_t { + unsafe { vshlq_u64(a, b) } + } + #[doc = "See [`arch::vshll_n_s16`]."] + #[inline(always)] + pub fn vshll_n_s16(self, a: int16x4_t) -> int32x4_t { + unsafe { vshll_n_s16::(a) } + } + #[doc = "See [`arch::vshll_n_s32`]."] + #[inline(always)] + pub fn vshll_n_s32(self, a: int32x2_t) -> int64x2_t { + unsafe { vshll_n_s32::(a) } + } + #[doc = "See [`arch::vshll_n_s8`]."] + #[inline(always)] + pub fn vshll_n_s8(self, a: int8x8_t) -> int16x8_t { + unsafe { vshll_n_s8::(a) } + } + #[doc = "See [`arch::vshll_n_u16`]."] + #[inline(always)] + pub fn vshll_n_u16(self, a: uint16x4_t) -> uint32x4_t { + unsafe { vshll_n_u16::(a) } + } + #[doc = "See [`arch::vshll_n_u32`]."] + #[inline(always)] + pub fn vshll_n_u32(self, a: uint32x2_t) -> uint64x2_t { + unsafe { vshll_n_u32::(a) } + } + #[doc = "See [`arch::vshll_n_u8`]."] + #[inline(always)] + pub fn vshll_n_u8(self, a: uint8x8_t) -> uint16x8_t { + unsafe { vshll_n_u8::(a) } + } + #[doc = "See [`arch::vshr_n_s8`]."] + #[inline(always)] + pub fn vshr_n_s8(self, a: int8x8_t) -> int8x8_t { + unsafe { vshr_n_s8::(a) } + } + #[doc = "See [`arch::vshrq_n_s8`]."] + #[inline(always)] + pub fn vshrq_n_s8(self, a: int8x16_t) -> int8x16_t { + unsafe { vshrq_n_s8::(a) } + } + #[doc = "See [`arch::vshr_n_s16`]."] + #[inline(always)] + pub fn vshr_n_s16(self, a: int16x4_t) -> int16x4_t { + unsafe { vshr_n_s16::(a) } + } + #[doc = "See [`arch::vshrq_n_s16`]."] + #[inline(always)] + pub fn vshrq_n_s16(self, a: int16x8_t) -> int16x8_t { + unsafe { vshrq_n_s16::(a) } + } + #[doc = "See [`arch::vshr_n_s32`]."] + #[inline(always)] + pub fn vshr_n_s32(self, a: int32x2_t) -> int32x2_t { + unsafe { vshr_n_s32::(a) } + } + #[doc = "See [`arch::vshrq_n_s32`]."] + #[inline(always)] + pub fn vshrq_n_s32(self, a: int32x4_t) -> int32x4_t { + unsafe { vshrq_n_s32::(a) } + } + #[doc = "See [`arch::vshr_n_s64`]."] + #[inline(always)] + pub fn vshr_n_s64(self, a: int64x1_t) -> int64x1_t { + unsafe { vshr_n_s64::(a) } + } + #[doc = "See [`arch::vshrq_n_s64`]."] + #[inline(always)] + pub fn vshrq_n_s64(self, a: int64x2_t) -> int64x2_t { + unsafe { vshrq_n_s64::(a) } + } + #[doc = "See [`arch::vshr_n_u8`]."] + #[inline(always)] + pub fn vshr_n_u8(self, a: uint8x8_t) -> uint8x8_t { + unsafe { vshr_n_u8::(a) } + } + #[doc = "See [`arch::vshrq_n_u8`]."] + #[inline(always)] + pub fn vshrq_n_u8(self, a: uint8x16_t) -> uint8x16_t { + unsafe { vshrq_n_u8::(a) } + } + #[doc = "See [`arch::vshr_n_u16`]."] + #[inline(always)] + pub fn vshr_n_u16(self, a: uint16x4_t) -> uint16x4_t { + unsafe { vshr_n_u16::(a) } + } + #[doc = "See [`arch::vshrq_n_u16`]."] + #[inline(always)] + pub fn vshrq_n_u16(self, a: uint16x8_t) -> uint16x8_t { + unsafe { vshrq_n_u16::(a) } + } + #[doc = "See [`arch::vshr_n_u32`]."] + #[inline(always)] + pub fn vshr_n_u32(self, a: uint32x2_t) -> uint32x2_t { + unsafe { vshr_n_u32::(a) } + } + #[doc = "See [`arch::vshrq_n_u32`]."] + #[inline(always)] + pub fn vshrq_n_u32(self, a: uint32x4_t) -> uint32x4_t { + unsafe { vshrq_n_u32::(a) } + } + #[doc = "See [`arch::vshr_n_u64`]."] + #[inline(always)] + pub fn vshr_n_u64(self, a: uint64x1_t) -> uint64x1_t { + unsafe { vshr_n_u64::(a) } + } + #[doc = "See [`arch::vshrq_n_u64`]."] + #[inline(always)] + pub fn vshrq_n_u64(self, a: uint64x2_t) -> uint64x2_t { + unsafe { vshrq_n_u64::(a) } + } + #[doc = "See [`arch::vshrn_n_s16`]."] + #[inline(always)] + pub fn vshrn_n_s16(self, a: int16x8_t) -> int8x8_t { + unsafe { vshrn_n_s16::(a) } + } + #[doc = "See [`arch::vshrn_n_s32`]."] + #[inline(always)] + pub fn vshrn_n_s32(self, a: int32x4_t) -> int16x4_t { + unsafe { vshrn_n_s32::(a) } + } + #[doc = "See [`arch::vshrn_n_s64`]."] + #[inline(always)] + pub fn vshrn_n_s64(self, a: int64x2_t) -> int32x2_t { + unsafe { vshrn_n_s64::(a) } + } + #[doc = "See [`arch::vshrn_n_u16`]."] + #[inline(always)] + pub fn vshrn_n_u16(self, a: uint16x8_t) -> uint8x8_t { + unsafe { vshrn_n_u16::(a) } + } + #[doc = "See [`arch::vshrn_n_u32`]."] + #[inline(always)] + pub fn vshrn_n_u32(self, a: uint32x4_t) -> uint16x4_t { + unsafe { vshrn_n_u32::(a) } + } + #[doc = "See [`arch::vshrn_n_u64`]."] + #[inline(always)] + pub fn vshrn_n_u64(self, a: uint64x2_t) -> uint32x2_t { + unsafe { vshrn_n_u64::(a) } + } + #[doc = "See [`arch::vsra_n_s8`]."] + #[inline(always)] + pub fn vsra_n_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vsra_n_s8::(a, b) } + } + #[doc = "See [`arch::vsraq_n_s8`]."] + #[inline(always)] + pub fn vsraq_n_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vsraq_n_s8::(a, b) } + } + #[doc = "See [`arch::vsra_n_s16`]."] + #[inline(always)] + pub fn vsra_n_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vsra_n_s16::(a, b) } + } + #[doc = "See [`arch::vsraq_n_s16`]."] + #[inline(always)] + pub fn vsraq_n_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vsraq_n_s16::(a, b) } + } + #[doc = "See [`arch::vsra_n_s32`]."] + #[inline(always)] + pub fn vsra_n_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vsra_n_s32::(a, b) } + } + #[doc = "See [`arch::vsraq_n_s32`]."] + #[inline(always)] + pub fn vsraq_n_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vsraq_n_s32::(a, b) } + } + #[doc = "See [`arch::vsra_n_s64`]."] + #[inline(always)] + pub fn vsra_n_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vsra_n_s64::(a, b) } + } + #[doc = "See [`arch::vsraq_n_s64`]."] + #[inline(always)] + pub fn vsraq_n_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vsraq_n_s64::(a, b) } + } + #[doc = "See [`arch::vsra_n_u8`]."] + #[inline(always)] + pub fn vsra_n_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vsra_n_u8::(a, b) } + } + #[doc = "See [`arch::vsraq_n_u8`]."] + #[inline(always)] + pub fn vsraq_n_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vsraq_n_u8::(a, b) } + } + #[doc = "See [`arch::vsra_n_u16`]."] + #[inline(always)] + pub fn vsra_n_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vsra_n_u16::(a, b) } + } + #[doc = "See [`arch::vsraq_n_u16`]."] + #[inline(always)] + pub fn vsraq_n_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vsraq_n_u16::(a, b) } + } + #[doc = "See [`arch::vsra_n_u32`]."] + #[inline(always)] + pub fn vsra_n_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vsra_n_u32::(a, b) } + } + #[doc = "See [`arch::vsraq_n_u32`]."] + #[inline(always)] + pub fn vsraq_n_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vsraq_n_u32::(a, b) } + } + #[doc = "See [`arch::vsra_n_u64`]."] + #[inline(always)] + pub fn vsra_n_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vsra_n_u64::(a, b) } + } + #[doc = "See [`arch::vsraq_n_u64`]."] + #[inline(always)] + pub fn vsraq_n_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vsraq_n_u64::(a, b) } + } + #[doc = "See [`arch::vst1_f32_x3`]."] + #[inline(always)] + pub unsafe fn vst1_f32_x3(self, a: *mut f32, b: float32x2x3_t) { + unsafe { vst1_f32_x3(a, b) } + } + #[doc = "See [`arch::vst1q_f32_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_f32_x3(self, a: *mut f32, b: float32x4x3_t) { + unsafe { vst1q_f32_x3(a, b) } + } + #[doc = "See [`arch::vst1_lane_f32`]."] + #[inline(always)] + pub unsafe fn vst1_lane_f32(self, a: *mut f32, b: float32x2_t) { + unsafe { vst1_lane_f32::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_f32`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_f32(self, a: *mut f32, b: float32x4_t) { + unsafe { vst1q_lane_f32::(a, b) } + } + #[doc = "See [`arch::vst1_lane_s8`]."] + #[inline(always)] + pub unsafe fn vst1_lane_s8(self, a: *mut i8, b: int8x8_t) { + unsafe { vst1_lane_s8::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_s8`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_s8(self, a: *mut i8, b: int8x16_t) { + unsafe { vst1q_lane_s8::(a, b) } + } + #[doc = "See [`arch::vst1_lane_s16`]."] + #[inline(always)] + pub unsafe fn vst1_lane_s16(self, a: *mut i16, b: int16x4_t) { + unsafe { vst1_lane_s16::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_s16`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_s16(self, a: *mut i16, b: int16x8_t) { + unsafe { vst1q_lane_s16::(a, b) } + } + #[doc = "See [`arch::vst1_lane_s32`]."] + #[inline(always)] + pub unsafe fn vst1_lane_s32(self, a: *mut i32, b: int32x2_t) { + unsafe { vst1_lane_s32::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_s32`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_s32(self, a: *mut i32, b: int32x4_t) { + unsafe { vst1q_lane_s32::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_s64(self, a: *mut i64, b: int64x2_t) { + unsafe { vst1q_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst1_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst1_lane_u8(self, a: *mut u8, b: uint8x8_t) { + unsafe { vst1_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_u8(self, a: *mut u8, b: uint8x16_t) { + unsafe { vst1q_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst1_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst1_lane_u16(self, a: *mut u16, b: uint16x4_t) { + unsafe { vst1_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_u16(self, a: *mut u16, b: uint16x8_t) { + unsafe { vst1q_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst1_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst1_lane_u32(self, a: *mut u32, b: uint32x2_t) { + unsafe { vst1_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_u32(self, a: *mut u32, b: uint32x4_t) { + unsafe { vst1q_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_u64(self, a: *mut u64, b: uint64x2_t) { + unsafe { vst1q_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst1_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst1_lane_p8(self, a: *mut p8, b: poly8x8_t) { + unsafe { vst1_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_p8(self, a: *mut p8, b: poly8x16_t) { + unsafe { vst1q_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst1_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst1_lane_p16(self, a: *mut p16, b: poly16x4_t) { + unsafe { vst1_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst1q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_p16(self, a: *mut p16, b: poly16x8_t) { + unsafe { vst1q_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst1_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst1_lane_p64(self, a: *mut p64, b: poly64x1_t) { + unsafe { vst1_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst1_lane_s64`]."] + #[inline(always)] + pub unsafe fn vst1_lane_s64(self, a: *mut i64, b: int64x1_t) { + unsafe { vst1_lane_s64::(a, b) } + } + #[doc = "See [`arch::vst1_lane_u64`]."] + #[inline(always)] + pub unsafe fn vst1_lane_u64(self, a: *mut u64, b: uint64x1_t) { + unsafe { vst1_lane_u64::(a, b) } + } + #[doc = "See [`arch::vst1_p64_x2`]."] + #[inline(always)] + pub unsafe fn vst1_p64_x2(self, a: *mut p64, b: poly64x1x2_t) { + unsafe { vst1_p64_x2(a, b) } + } + #[doc = "See [`arch::vst1_p64_x3`]."] + #[inline(always)] + pub unsafe fn vst1_p64_x3(self, a: *mut p64, b: poly64x1x3_t) { + unsafe { vst1_p64_x3(a, b) } + } + #[doc = "See [`arch::vst1_p64_x4`]."] + #[inline(always)] + pub unsafe fn vst1_p64_x4(self, a: *mut p64, b: poly64x1x4_t) { + unsafe { vst1_p64_x4(a, b) } + } + #[doc = "See [`arch::vst1q_p64_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_p64_x2(self, a: *mut p64, b: poly64x2x2_t) { + unsafe { vst1q_p64_x2(a, b) } + } + #[doc = "See [`arch::vst1q_p64_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_p64_x3(self, a: *mut p64, b: poly64x2x3_t) { + unsafe { vst1q_p64_x3(a, b) } + } + #[doc = "See [`arch::vst1q_p64_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_p64_x4(self, a: *mut p64, b: poly64x2x4_t) { + unsafe { vst1q_p64_x4(a, b) } + } + #[doc = "See [`arch::vst1_s8_x2`]."] + #[inline(always)] + pub unsafe fn vst1_s8_x2(self, a: *mut i8, b: int8x8x2_t) { + unsafe { vst1_s8_x2(a, b) } + } + #[doc = "See [`arch::vst1q_s8_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_s8_x2(self, a: *mut i8, b: int8x16x2_t) { + unsafe { vst1q_s8_x2(a, b) } + } + #[doc = "See [`arch::vst1_s16_x2`]."] + #[inline(always)] + pub unsafe fn vst1_s16_x2(self, a: *mut i16, b: int16x4x2_t) { + unsafe { vst1_s16_x2(a, b) } + } + #[doc = "See [`arch::vst1q_s16_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_s16_x2(self, a: *mut i16, b: int16x8x2_t) { + unsafe { vst1q_s16_x2(a, b) } + } + #[doc = "See [`arch::vst1_s32_x2`]."] + #[inline(always)] + pub unsafe fn vst1_s32_x2(self, a: *mut i32, b: int32x2x2_t) { + unsafe { vst1_s32_x2(a, b) } + } + #[doc = "See [`arch::vst1q_s32_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_s32_x2(self, a: *mut i32, b: int32x4x2_t) { + unsafe { vst1q_s32_x2(a, b) } + } + #[doc = "See [`arch::vst1_s64_x2`]."] + #[inline(always)] + pub unsafe fn vst1_s64_x2(self, a: *mut i64, b: int64x1x2_t) { + unsafe { vst1_s64_x2(a, b) } + } + #[doc = "See [`arch::vst1q_s64_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_s64_x2(self, a: *mut i64, b: int64x2x2_t) { + unsafe { vst1q_s64_x2(a, b) } + } + #[doc = "See [`arch::vst1_s8_x3`]."] + #[inline(always)] + pub unsafe fn vst1_s8_x3(self, a: *mut i8, b: int8x8x3_t) { + unsafe { vst1_s8_x3(a, b) } + } + #[doc = "See [`arch::vst1q_s8_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_s8_x3(self, a: *mut i8, b: int8x16x3_t) { + unsafe { vst1q_s8_x3(a, b) } + } + #[doc = "See [`arch::vst1_s16_x3`]."] + #[inline(always)] + pub unsafe fn vst1_s16_x3(self, a: *mut i16, b: int16x4x3_t) { + unsafe { vst1_s16_x3(a, b) } + } + #[doc = "See [`arch::vst1q_s16_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_s16_x3(self, a: *mut i16, b: int16x8x3_t) { + unsafe { vst1q_s16_x3(a, b) } + } + #[doc = "See [`arch::vst1_s32_x3`]."] + #[inline(always)] + pub unsafe fn vst1_s32_x3(self, a: *mut i32, b: int32x2x3_t) { + unsafe { vst1_s32_x3(a, b) } + } + #[doc = "See [`arch::vst1q_s32_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_s32_x3(self, a: *mut i32, b: int32x4x3_t) { + unsafe { vst1q_s32_x3(a, b) } + } + #[doc = "See [`arch::vst1_s64_x3`]."] + #[inline(always)] + pub unsafe fn vst1_s64_x3(self, a: *mut i64, b: int64x1x3_t) { + unsafe { vst1_s64_x3(a, b) } + } + #[doc = "See [`arch::vst1q_s64_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_s64_x3(self, a: *mut i64, b: int64x2x3_t) { + unsafe { vst1q_s64_x3(a, b) } + } + #[doc = "See [`arch::vst1_s8_x4`]."] + #[inline(always)] + pub unsafe fn vst1_s8_x4(self, a: *mut i8, b: int8x8x4_t) { + unsafe { vst1_s8_x4(a, b) } + } + #[doc = "See [`arch::vst1q_s8_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_s8_x4(self, a: *mut i8, b: int8x16x4_t) { + unsafe { vst1q_s8_x4(a, b) } + } + #[doc = "See [`arch::vst1_s16_x4`]."] + #[inline(always)] + pub unsafe fn vst1_s16_x4(self, a: *mut i16, b: int16x4x4_t) { + unsafe { vst1_s16_x4(a, b) } + } + #[doc = "See [`arch::vst1q_s16_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_s16_x4(self, a: *mut i16, b: int16x8x4_t) { + unsafe { vst1q_s16_x4(a, b) } + } + #[doc = "See [`arch::vst1_s32_x4`]."] + #[inline(always)] + pub unsafe fn vst1_s32_x4(self, a: *mut i32, b: int32x2x4_t) { + unsafe { vst1_s32_x4(a, b) } + } + #[doc = "See [`arch::vst1q_s32_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_s32_x4(self, a: *mut i32, b: int32x4x4_t) { + unsafe { vst1q_s32_x4(a, b) } + } + #[doc = "See [`arch::vst1_s64_x4`]."] + #[inline(always)] + pub unsafe fn vst1_s64_x4(self, a: *mut i64, b: int64x1x4_t) { + unsafe { vst1_s64_x4(a, b) } + } + #[doc = "See [`arch::vst1q_s64_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_s64_x4(self, a: *mut i64, b: int64x2x4_t) { + unsafe { vst1q_s64_x4(a, b) } + } + #[doc = "See [`arch::vst1_u8_x2`]."] + #[inline(always)] + pub unsafe fn vst1_u8_x2(self, a: *mut u8, b: uint8x8x2_t) { + unsafe { vst1_u8_x2(a, b) } + } + #[doc = "See [`arch::vst1_u8_x3`]."] + #[inline(always)] + pub unsafe fn vst1_u8_x3(self, a: *mut u8, b: uint8x8x3_t) { + unsafe { vst1_u8_x3(a, b) } + } + #[doc = "See [`arch::vst1_u8_x4`]."] + #[inline(always)] + pub unsafe fn vst1_u8_x4(self, a: *mut u8, b: uint8x8x4_t) { + unsafe { vst1_u8_x4(a, b) } + } + #[doc = "See [`arch::vst1q_u8_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_u8_x2(self, a: *mut u8, b: uint8x16x2_t) { + unsafe { vst1q_u8_x2(a, b) } + } + #[doc = "See [`arch::vst1q_u8_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_u8_x3(self, a: *mut u8, b: uint8x16x3_t) { + unsafe { vst1q_u8_x3(a, b) } + } + #[doc = "See [`arch::vst1q_u8_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_u8_x4(self, a: *mut u8, b: uint8x16x4_t) { + unsafe { vst1q_u8_x4(a, b) } + } + #[doc = "See [`arch::vst1_u16_x2`]."] + #[inline(always)] + pub unsafe fn vst1_u16_x2(self, a: *mut u16, b: uint16x4x2_t) { + unsafe { vst1_u16_x2(a, b) } + } + #[doc = "See [`arch::vst1_u16_x3`]."] + #[inline(always)] + pub unsafe fn vst1_u16_x3(self, a: *mut u16, b: uint16x4x3_t) { + unsafe { vst1_u16_x3(a, b) } + } + #[doc = "See [`arch::vst1_u16_x4`]."] + #[inline(always)] + pub unsafe fn vst1_u16_x4(self, a: *mut u16, b: uint16x4x4_t) { + unsafe { vst1_u16_x4(a, b) } + } + #[doc = "See [`arch::vst1q_u16_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_u16_x2(self, a: *mut u16, b: uint16x8x2_t) { + unsafe { vst1q_u16_x2(a, b) } + } + #[doc = "See [`arch::vst1q_u16_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_u16_x3(self, a: *mut u16, b: uint16x8x3_t) { + unsafe { vst1q_u16_x3(a, b) } + } + #[doc = "See [`arch::vst1q_u16_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_u16_x4(self, a: *mut u16, b: uint16x8x4_t) { + unsafe { vst1q_u16_x4(a, b) } + } + #[doc = "See [`arch::vst1_u32_x2`]."] + #[inline(always)] + pub unsafe fn vst1_u32_x2(self, a: *mut u32, b: uint32x2x2_t) { + unsafe { vst1_u32_x2(a, b) } + } + #[doc = "See [`arch::vst1_u32_x3`]."] + #[inline(always)] + pub unsafe fn vst1_u32_x3(self, a: *mut u32, b: uint32x2x3_t) { + unsafe { vst1_u32_x3(a, b) } + } + #[doc = "See [`arch::vst1_u32_x4`]."] + #[inline(always)] + pub unsafe fn vst1_u32_x4(self, a: *mut u32, b: uint32x2x4_t) { + unsafe { vst1_u32_x4(a, b) } + } + #[doc = "See [`arch::vst1q_u32_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_u32_x2(self, a: *mut u32, b: uint32x4x2_t) { + unsafe { vst1q_u32_x2(a, b) } + } + #[doc = "See [`arch::vst1q_u32_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_u32_x3(self, a: *mut u32, b: uint32x4x3_t) { + unsafe { vst1q_u32_x3(a, b) } + } + #[doc = "See [`arch::vst1q_u32_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_u32_x4(self, a: *mut u32, b: uint32x4x4_t) { + unsafe { vst1q_u32_x4(a, b) } + } + #[doc = "See [`arch::vst1_u64_x2`]."] + #[inline(always)] + pub unsafe fn vst1_u64_x2(self, a: *mut u64, b: uint64x1x2_t) { + unsafe { vst1_u64_x2(a, b) } + } + #[doc = "See [`arch::vst1_u64_x3`]."] + #[inline(always)] + pub unsafe fn vst1_u64_x3(self, a: *mut u64, b: uint64x1x3_t) { + unsafe { vst1_u64_x3(a, b) } + } + #[doc = "See [`arch::vst1_u64_x4`]."] + #[inline(always)] + pub unsafe fn vst1_u64_x4(self, a: *mut u64, b: uint64x1x4_t) { + unsafe { vst1_u64_x4(a, b) } + } + #[doc = "See [`arch::vst1q_u64_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_u64_x2(self, a: *mut u64, b: uint64x2x2_t) { + unsafe { vst1q_u64_x2(a, b) } + } + #[doc = "See [`arch::vst1q_u64_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_u64_x3(self, a: *mut u64, b: uint64x2x3_t) { + unsafe { vst1q_u64_x3(a, b) } + } + #[doc = "See [`arch::vst1q_u64_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_u64_x4(self, a: *mut u64, b: uint64x2x4_t) { + unsafe { vst1q_u64_x4(a, b) } + } + #[doc = "See [`arch::vst1_p8_x2`]."] + #[inline(always)] + pub unsafe fn vst1_p8_x2(self, a: *mut p8, b: poly8x8x2_t) { + unsafe { vst1_p8_x2(a, b) } + } + #[doc = "See [`arch::vst1_p8_x3`]."] + #[inline(always)] + pub unsafe fn vst1_p8_x3(self, a: *mut p8, b: poly8x8x3_t) { + unsafe { vst1_p8_x3(a, b) } + } + #[doc = "See [`arch::vst1_p8_x4`]."] + #[inline(always)] + pub unsafe fn vst1_p8_x4(self, a: *mut p8, b: poly8x8x4_t) { + unsafe { vst1_p8_x4(a, b) } + } + #[doc = "See [`arch::vst1q_p8_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_p8_x2(self, a: *mut p8, b: poly8x16x2_t) { + unsafe { vst1q_p8_x2(a, b) } + } + #[doc = "See [`arch::vst1q_p8_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_p8_x3(self, a: *mut p8, b: poly8x16x3_t) { + unsafe { vst1q_p8_x3(a, b) } + } + #[doc = "See [`arch::vst1q_p8_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_p8_x4(self, a: *mut p8, b: poly8x16x4_t) { + unsafe { vst1q_p8_x4(a, b) } + } + #[doc = "See [`arch::vst1_p16_x2`]."] + #[inline(always)] + pub unsafe fn vst1_p16_x2(self, a: *mut p16, b: poly16x4x2_t) { + unsafe { vst1_p16_x2(a, b) } + } + #[doc = "See [`arch::vst1_p16_x3`]."] + #[inline(always)] + pub unsafe fn vst1_p16_x3(self, a: *mut p16, b: poly16x4x3_t) { + unsafe { vst1_p16_x3(a, b) } + } + #[doc = "See [`arch::vst1_p16_x4`]."] + #[inline(always)] + pub unsafe fn vst1_p16_x4(self, a: *mut p16, b: poly16x4x4_t) { + unsafe { vst1_p16_x4(a, b) } + } + #[doc = "See [`arch::vst1q_p16_x2`]."] + #[inline(always)] + pub unsafe fn vst1q_p16_x2(self, a: *mut p16, b: poly16x8x2_t) { + unsafe { vst1q_p16_x2(a, b) } + } + #[doc = "See [`arch::vst1q_p16_x3`]."] + #[inline(always)] + pub unsafe fn vst1q_p16_x3(self, a: *mut p16, b: poly16x8x3_t) { + unsafe { vst1q_p16_x3(a, b) } + } + #[doc = "See [`arch::vst1q_p16_x4`]."] + #[inline(always)] + pub unsafe fn vst1q_p16_x4(self, a: *mut p16, b: poly16x8x4_t) { + unsafe { vst1q_p16_x4(a, b) } + } + #[doc = "See [`arch::vst1q_lane_p64`]."] + #[inline(always)] + pub unsafe fn vst1q_lane_p64(self, a: *mut p64, b: poly64x2_t) { + unsafe { vst1q_lane_p64::(a, b) } + } + #[doc = "See [`arch::vst2_f32`]."] + #[inline(always)] + pub unsafe fn vst2_f32(self, a: *mut f32, b: float32x2x2_t) { + unsafe { vst2_f32(a, b) } + } + #[doc = "See [`arch::vst2q_f32`]."] + #[inline(always)] + pub unsafe fn vst2q_f32(self, a: *mut f32, b: float32x4x2_t) { + unsafe { vst2q_f32(a, b) } + } + #[doc = "See [`arch::vst2_s8`]."] + #[inline(always)] + pub unsafe fn vst2_s8(self, a: *mut i8, b: int8x8x2_t) { + unsafe { vst2_s8(a, b) } + } + #[doc = "See [`arch::vst2q_s8`]."] + #[inline(always)] + pub unsafe fn vst2q_s8(self, a: *mut i8, b: int8x16x2_t) { + unsafe { vst2q_s8(a, b) } + } + #[doc = "See [`arch::vst2_s16`]."] + #[inline(always)] + pub unsafe fn vst2_s16(self, a: *mut i16, b: int16x4x2_t) { + unsafe { vst2_s16(a, b) } + } + #[doc = "See [`arch::vst2q_s16`]."] + #[inline(always)] + pub unsafe fn vst2q_s16(self, a: *mut i16, b: int16x8x2_t) { + unsafe { vst2q_s16(a, b) } + } + #[doc = "See [`arch::vst2_s32`]."] + #[inline(always)] + pub unsafe fn vst2_s32(self, a: *mut i32, b: int32x2x2_t) { + unsafe { vst2_s32(a, b) } + } + #[doc = "See [`arch::vst2q_s32`]."] + #[inline(always)] + pub unsafe fn vst2q_s32(self, a: *mut i32, b: int32x4x2_t) { + unsafe { vst2q_s32(a, b) } + } + #[doc = "See [`arch::vst2_lane_f32`]."] + #[inline(always)] + pub unsafe fn vst2_lane_f32(self, a: *mut f32, b: float32x2x2_t) { + unsafe { vst2_lane_f32::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_f32`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_f32(self, a: *mut f32, b: float32x4x2_t) { + unsafe { vst2q_lane_f32::(a, b) } + } + #[doc = "See [`arch::vst2_lane_s8`]."] + #[inline(always)] + pub unsafe fn vst2_lane_s8(self, a: *mut i8, b: int8x8x2_t) { + unsafe { vst2_lane_s8::(a, b) } + } + #[doc = "See [`arch::vst2_lane_s16`]."] + #[inline(always)] + pub unsafe fn vst2_lane_s16(self, a: *mut i16, b: int16x4x2_t) { + unsafe { vst2_lane_s16::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_s16`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_s16(self, a: *mut i16, b: int16x8x2_t) { + unsafe { vst2q_lane_s16::(a, b) } + } + #[doc = "See [`arch::vst2_lane_s32`]."] + #[inline(always)] + pub unsafe fn vst2_lane_s32(self, a: *mut i32, b: int32x2x2_t) { + unsafe { vst2_lane_s32::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_s32`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_s32(self, a: *mut i32, b: int32x4x2_t) { + unsafe { vst2q_lane_s32::(a, b) } + } + #[doc = "See [`arch::vst2_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst2_lane_u8(self, a: *mut u8, b: uint8x8x2_t) { + unsafe { vst2_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst2_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst2_lane_u16(self, a: *mut u16, b: uint16x4x2_t) { + unsafe { vst2_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_u16(self, a: *mut u16, b: uint16x8x2_t) { + unsafe { vst2q_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst2_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst2_lane_u32(self, a: *mut u32, b: uint32x2x2_t) { + unsafe { vst2_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_u32(self, a: *mut u32, b: uint32x4x2_t) { + unsafe { vst2q_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst2_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst2_lane_p8(self, a: *mut p8, b: poly8x8x2_t) { + unsafe { vst2_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst2_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst2_lane_p16(self, a: *mut p16, b: poly16x4x2_t) { + unsafe { vst2_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst2q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst2q_lane_p16(self, a: *mut p16, b: poly16x8x2_t) { + unsafe { vst2q_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst2_p64`]."] + #[inline(always)] + pub unsafe fn vst2_p64(self, a: *mut p64, b: poly64x1x2_t) { + unsafe { vst2_p64(a, b) } + } + #[doc = "See [`arch::vst2_u64`]."] + #[inline(always)] + pub unsafe fn vst2_u64(self, a: *mut u64, b: uint64x1x2_t) { + unsafe { vst2_u64(a, b) } + } + #[doc = "See [`arch::vst2_u8`]."] + #[inline(always)] + pub unsafe fn vst2_u8(self, a: *mut u8, b: uint8x8x2_t) { + unsafe { vst2_u8(a, b) } + } + #[doc = "See [`arch::vst2q_u8`]."] + #[inline(always)] + pub unsafe fn vst2q_u8(self, a: *mut u8, b: uint8x16x2_t) { + unsafe { vst2q_u8(a, b) } + } + #[doc = "See [`arch::vst2_u16`]."] + #[inline(always)] + pub unsafe fn vst2_u16(self, a: *mut u16, b: uint16x4x2_t) { + unsafe { vst2_u16(a, b) } + } + #[doc = "See [`arch::vst2q_u16`]."] + #[inline(always)] + pub unsafe fn vst2q_u16(self, a: *mut u16, b: uint16x8x2_t) { + unsafe { vst2q_u16(a, b) } + } + #[doc = "See [`arch::vst2_u32`]."] + #[inline(always)] + pub unsafe fn vst2_u32(self, a: *mut u32, b: uint32x2x2_t) { + unsafe { vst2_u32(a, b) } + } + #[doc = "See [`arch::vst2q_u32`]."] + #[inline(always)] + pub unsafe fn vst2q_u32(self, a: *mut u32, b: uint32x4x2_t) { + unsafe { vst2q_u32(a, b) } + } + #[doc = "See [`arch::vst2_p8`]."] + #[inline(always)] + pub unsafe fn vst2_p8(self, a: *mut p8, b: poly8x8x2_t) { + unsafe { vst2_p8(a, b) } + } + #[doc = "See [`arch::vst2q_p8`]."] + #[inline(always)] + pub unsafe fn vst2q_p8(self, a: *mut p8, b: poly8x16x2_t) { + unsafe { vst2q_p8(a, b) } + } + #[doc = "See [`arch::vst2_p16`]."] + #[inline(always)] + pub unsafe fn vst2_p16(self, a: *mut p16, b: poly16x4x2_t) { + unsafe { vst2_p16(a, b) } + } + #[doc = "See [`arch::vst2q_p16`]."] + #[inline(always)] + pub unsafe fn vst2q_p16(self, a: *mut p16, b: poly16x8x2_t) { + unsafe { vst2q_p16(a, b) } + } + #[doc = "See [`arch::vst3_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst3_lane_u8(self, a: *mut u8, b: uint8x8x3_t) { + unsafe { vst3_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst3_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst3_lane_u16(self, a: *mut u16, b: uint16x4x3_t) { + unsafe { vst3_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_u16(self, a: *mut u16, b: uint16x8x3_t) { + unsafe { vst3q_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst3_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst3_lane_u32(self, a: *mut u32, b: uint32x2x3_t) { + unsafe { vst3_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_u32(self, a: *mut u32, b: uint32x4x3_t) { + unsafe { vst3q_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst3_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst3_lane_p8(self, a: *mut p8, b: poly8x8x3_t) { + unsafe { vst3_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst3_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst3_lane_p16(self, a: *mut p16, b: poly16x4x3_t) { + unsafe { vst3_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst3q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst3q_lane_p16(self, a: *mut p16, b: poly16x8x3_t) { + unsafe { vst3q_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst3_p64`]."] + #[inline(always)] + pub unsafe fn vst3_p64(self, a: *mut p64, b: poly64x1x3_t) { + unsafe { vst3_p64(a, b) } + } + #[doc = "See [`arch::vst3_s64`]."] + #[inline(always)] + pub unsafe fn vst3_s64(self, a: *mut i64, b: int64x1x3_t) { + unsafe { vst3_s64(a, b) } + } + #[doc = "See [`arch::vst3_u64`]."] + #[inline(always)] + pub unsafe fn vst3_u64(self, a: *mut u64, b: uint64x1x3_t) { + unsafe { vst3_u64(a, b) } + } + #[doc = "See [`arch::vst3_u8`]."] + #[inline(always)] + pub unsafe fn vst3_u8(self, a: *mut u8, b: uint8x8x3_t) { + unsafe { vst3_u8(a, b) } + } + #[doc = "See [`arch::vst3q_u8`]."] + #[inline(always)] + pub unsafe fn vst3q_u8(self, a: *mut u8, b: uint8x16x3_t) { + unsafe { vst3q_u8(a, b) } + } + #[doc = "See [`arch::vst3_u16`]."] + #[inline(always)] + pub unsafe fn vst3_u16(self, a: *mut u16, b: uint16x4x3_t) { + unsafe { vst3_u16(a, b) } + } + #[doc = "See [`arch::vst3q_u16`]."] + #[inline(always)] + pub unsafe fn vst3q_u16(self, a: *mut u16, b: uint16x8x3_t) { + unsafe { vst3q_u16(a, b) } + } + #[doc = "See [`arch::vst3_u32`]."] + #[inline(always)] + pub unsafe fn vst3_u32(self, a: *mut u32, b: uint32x2x3_t) { + unsafe { vst3_u32(a, b) } + } + #[doc = "See [`arch::vst3q_u32`]."] + #[inline(always)] + pub unsafe fn vst3q_u32(self, a: *mut u32, b: uint32x4x3_t) { + unsafe { vst3q_u32(a, b) } + } + #[doc = "See [`arch::vst3_p8`]."] + #[inline(always)] + pub unsafe fn vst3_p8(self, a: *mut p8, b: poly8x8x3_t) { + unsafe { vst3_p8(a, b) } + } + #[doc = "See [`arch::vst3q_p8`]."] + #[inline(always)] + pub unsafe fn vst3q_p8(self, a: *mut p8, b: poly8x16x3_t) { + unsafe { vst3q_p8(a, b) } + } + #[doc = "See [`arch::vst3_p16`]."] + #[inline(always)] + pub unsafe fn vst3_p16(self, a: *mut p16, b: poly16x4x3_t) { + unsafe { vst3_p16(a, b) } + } + #[doc = "See [`arch::vst3q_p16`]."] + #[inline(always)] + pub unsafe fn vst3q_p16(self, a: *mut p16, b: poly16x8x3_t) { + unsafe { vst3q_p16(a, b) } + } + #[doc = "See [`arch::vst4_lane_u8`]."] + #[inline(always)] + pub unsafe fn vst4_lane_u8(self, a: *mut u8, b: uint8x8x4_t) { + unsafe { vst4_lane_u8::(a, b) } + } + #[doc = "See [`arch::vst4_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst4_lane_u16(self, a: *mut u16, b: uint16x4x4_t) { + unsafe { vst4_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_u16`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_u16(self, a: *mut u16, b: uint16x8x4_t) { + unsafe { vst4q_lane_u16::(a, b) } + } + #[doc = "See [`arch::vst4_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst4_lane_u32(self, a: *mut u32, b: uint32x2x4_t) { + unsafe { vst4_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_u32`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_u32(self, a: *mut u32, b: uint32x4x4_t) { + unsafe { vst4q_lane_u32::(a, b) } + } + #[doc = "See [`arch::vst4_lane_p8`]."] + #[inline(always)] + pub unsafe fn vst4_lane_p8(self, a: *mut p8, b: poly8x8x4_t) { + unsafe { vst4_lane_p8::(a, b) } + } + #[doc = "See [`arch::vst4_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst4_lane_p16(self, a: *mut p16, b: poly16x4x4_t) { + unsafe { vst4_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst4q_lane_p16`]."] + #[inline(always)] + pub unsafe fn vst4q_lane_p16(self, a: *mut p16, b: poly16x8x4_t) { + unsafe { vst4q_lane_p16::(a, b) } + } + #[doc = "See [`arch::vst4_p64`]."] + #[inline(always)] + pub unsafe fn vst4_p64(self, a: *mut p64, b: poly64x1x4_t) { + unsafe { vst4_p64(a, b) } + } + #[doc = "See [`arch::vst4_u64`]."] + #[inline(always)] + pub unsafe fn vst4_u64(self, a: *mut u64, b: uint64x1x4_t) { + unsafe { vst4_u64(a, b) } + } + #[doc = "See [`arch::vst4_u8`]."] + #[inline(always)] + pub unsafe fn vst4_u8(self, a: *mut u8, b: uint8x8x4_t) { + unsafe { vst4_u8(a, b) } + } + #[doc = "See [`arch::vst4q_u8`]."] + #[inline(always)] + pub unsafe fn vst4q_u8(self, a: *mut u8, b: uint8x16x4_t) { + unsafe { vst4q_u8(a, b) } + } + #[doc = "See [`arch::vst4_u16`]."] + #[inline(always)] + pub unsafe fn vst4_u16(self, a: *mut u16, b: uint16x4x4_t) { + unsafe { vst4_u16(a, b) } + } + #[doc = "See [`arch::vst4q_u16`]."] + #[inline(always)] + pub unsafe fn vst4q_u16(self, a: *mut u16, b: uint16x8x4_t) { + unsafe { vst4q_u16(a, b) } + } + #[doc = "See [`arch::vst4_u32`]."] + #[inline(always)] + pub unsafe fn vst4_u32(self, a: *mut u32, b: uint32x2x4_t) { + unsafe { vst4_u32(a, b) } + } + #[doc = "See [`arch::vst4q_u32`]."] + #[inline(always)] + pub unsafe fn vst4q_u32(self, a: *mut u32, b: uint32x4x4_t) { + unsafe { vst4q_u32(a, b) } + } + #[doc = "See [`arch::vst4_p8`]."] + #[inline(always)] + pub unsafe fn vst4_p8(self, a: *mut p8, b: poly8x8x4_t) { + unsafe { vst4_p8(a, b) } + } + #[doc = "See [`arch::vst4q_p8`]."] + #[inline(always)] + pub unsafe fn vst4q_p8(self, a: *mut p8, b: poly8x16x4_t) { + unsafe { vst4q_p8(a, b) } + } + #[doc = "See [`arch::vst4_p16`]."] + #[inline(always)] + pub unsafe fn vst4_p16(self, a: *mut p16, b: poly16x4x4_t) { + unsafe { vst4_p16(a, b) } + } + #[doc = "See [`arch::vst4q_p16`]."] + #[inline(always)] + pub unsafe fn vst4q_p16(self, a: *mut p16, b: poly16x8x4_t) { + unsafe { vst4q_p16(a, b) } + } + #[doc = "See [`arch::vstrq_p128`]."] + #[inline(always)] + pub unsafe fn vstrq_p128(self, a: *mut p128, b: p128) { + unsafe { vstrq_p128(a, b) } + } + #[doc = "See [`arch::vsub_f32`]."] + #[inline(always)] + pub fn vsub_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2_t { + unsafe { vsub_f32(a, b) } + } + #[doc = "See [`arch::vsubq_f32`]."] + #[inline(always)] + pub fn vsubq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4_t { + unsafe { vsubq_f32(a, b) } + } + #[doc = "See [`arch::vsub_s16`]."] + #[inline(always)] + pub fn vsub_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4_t { + unsafe { vsub_s16(a, b) } + } + #[doc = "See [`arch::vsubq_s16`]."] + #[inline(always)] + pub fn vsubq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8_t { + unsafe { vsubq_s16(a, b) } + } + #[doc = "See [`arch::vsub_u16`]."] + #[inline(always)] + pub fn vsub_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vsub_u16(a, b) } + } + #[doc = "See [`arch::vsubq_u16`]."] + #[inline(always)] + pub fn vsubq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vsubq_u16(a, b) } + } + #[doc = "See [`arch::vsub_s32`]."] + #[inline(always)] + pub fn vsub_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2_t { + unsafe { vsub_s32(a, b) } + } + #[doc = "See [`arch::vsubq_s32`]."] + #[inline(always)] + pub fn vsubq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4_t { + unsafe { vsubq_s32(a, b) } + } + #[doc = "See [`arch::vsub_u32`]."] + #[inline(always)] + pub fn vsub_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vsub_u32(a, b) } + } + #[doc = "See [`arch::vsubq_u32`]."] + #[inline(always)] + pub fn vsubq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vsubq_u32(a, b) } + } + #[doc = "See [`arch::vsub_s64`]."] + #[inline(always)] + pub fn vsub_s64(self, a: int64x1_t, b: int64x1_t) -> int64x1_t { + unsafe { vsub_s64(a, b) } + } + #[doc = "See [`arch::vsubq_s64`]."] + #[inline(always)] + pub fn vsubq_s64(self, a: int64x2_t, b: int64x2_t) -> int64x2_t { + unsafe { vsubq_s64(a, b) } + } + #[doc = "See [`arch::vsub_u64`]."] + #[inline(always)] + pub fn vsub_u64(self, a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + unsafe { vsub_u64(a, b) } + } + #[doc = "See [`arch::vsubq_u64`]."] + #[inline(always)] + pub fn vsubq_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + unsafe { vsubq_u64(a, b) } + } + #[doc = "See [`arch::vsub_s8`]."] + #[inline(always)] + pub fn vsub_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8_t { + unsafe { vsub_s8(a, b) } + } + #[doc = "See [`arch::vsubq_s8`]."] + #[inline(always)] + pub fn vsubq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16_t { + unsafe { vsubq_s8(a, b) } + } + #[doc = "See [`arch::vsub_u8`]."] + #[inline(always)] + pub fn vsub_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vsub_u8(a, b) } + } + #[doc = "See [`arch::vsubq_u8`]."] + #[inline(always)] + pub fn vsubq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vsubq_u8(a, b) } + } + #[doc = "See [`arch::vsubhn_high_s16`]."] + #[inline(always)] + pub fn vsubhn_high_s16(self, a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { + unsafe { vsubhn_high_s16(a, b, c) } + } + #[doc = "See [`arch::vsubhn_high_s32`]."] + #[inline(always)] + pub fn vsubhn_high_s32(self, a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { + unsafe { vsubhn_high_s32(a, b, c) } + } + #[doc = "See [`arch::vsubhn_high_s64`]."] + #[inline(always)] + pub fn vsubhn_high_s64(self, a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { + unsafe { vsubhn_high_s64(a, b, c) } + } + #[doc = "See [`arch::vsubhn_high_u16`]."] + #[inline(always)] + pub fn vsubhn_high_u16(self, a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { + unsafe { vsubhn_high_u16(a, b, c) } + } + #[doc = "See [`arch::vsubhn_high_u32`]."] + #[inline(always)] + pub fn vsubhn_high_u32(self, a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { + unsafe { vsubhn_high_u32(a, b, c) } + } + #[doc = "See [`arch::vsubhn_high_u64`]."] + #[inline(always)] + pub fn vsubhn_high_u64(self, a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { + unsafe { vsubhn_high_u64(a, b, c) } + } + #[doc = "See [`arch::vsubhn_s16`]."] + #[inline(always)] + pub fn vsubhn_s16(self, a: int16x8_t, b: int16x8_t) -> int8x8_t { + unsafe { vsubhn_s16(a, b) } + } + #[doc = "See [`arch::vsubhn_s32`]."] + #[inline(always)] + pub fn vsubhn_s32(self, a: int32x4_t, b: int32x4_t) -> int16x4_t { + unsafe { vsubhn_s32(a, b) } + } + #[doc = "See [`arch::vsubhn_s64`]."] + #[inline(always)] + pub fn vsubhn_s64(self, a: int64x2_t, b: int64x2_t) -> int32x2_t { + unsafe { vsubhn_s64(a, b) } + } + #[doc = "See [`arch::vsubhn_u16`]."] + #[inline(always)] + pub fn vsubhn_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { + unsafe { vsubhn_u16(a, b) } + } + #[doc = "See [`arch::vsubhn_u32`]."] + #[inline(always)] + pub fn vsubhn_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { + unsafe { vsubhn_u32(a, b) } + } + #[doc = "See [`arch::vsubhn_u64`]."] + #[inline(always)] + pub fn vsubhn_u64(self, a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { + unsafe { vsubhn_u64(a, b) } + } + #[doc = "See [`arch::vsubl_s8`]."] + #[inline(always)] + pub fn vsubl_s8(self, a: int8x8_t, b: int8x8_t) -> int16x8_t { + unsafe { vsubl_s8(a, b) } + } + #[doc = "See [`arch::vsubl_s16`]."] + #[inline(always)] + pub fn vsubl_s16(self, a: int16x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vsubl_s16(a, b) } + } + #[doc = "See [`arch::vsubl_s32`]."] + #[inline(always)] + pub fn vsubl_s32(self, a: int32x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vsubl_s32(a, b) } + } + #[doc = "See [`arch::vsubl_u8`]."] + #[inline(always)] + pub fn vsubl_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { vsubl_u8(a, b) } + } + #[doc = "See [`arch::vsubl_u16`]."] + #[inline(always)] + pub fn vsubl_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vsubl_u16(a, b) } + } + #[doc = "See [`arch::vsubl_u32`]."] + #[inline(always)] + pub fn vsubl_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vsubl_u32(a, b) } + } + #[doc = "See [`arch::vsubw_s8`]."] + #[inline(always)] + pub fn vsubw_s8(self, a: int16x8_t, b: int8x8_t) -> int16x8_t { + unsafe { vsubw_s8(a, b) } + } + #[doc = "See [`arch::vsubw_s16`]."] + #[inline(always)] + pub fn vsubw_s16(self, a: int32x4_t, b: int16x4_t) -> int32x4_t { + unsafe { vsubw_s16(a, b) } + } + #[doc = "See [`arch::vsubw_s32`]."] + #[inline(always)] + pub fn vsubw_s32(self, a: int64x2_t, b: int32x2_t) -> int64x2_t { + unsafe { vsubw_s32(a, b) } + } + #[doc = "See [`arch::vsubw_u8`]."] + #[inline(always)] + pub fn vsubw_u8(self, a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { + unsafe { vsubw_u8(a, b) } + } + #[doc = "See [`arch::vsubw_u16`]."] + #[inline(always)] + pub fn vsubw_u16(self, a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { + unsafe { vsubw_u16(a, b) } + } + #[doc = "See [`arch::vsubw_u32`]."] + #[inline(always)] + pub fn vsubw_u32(self, a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { + unsafe { vsubw_u32(a, b) } + } + #[doc = "See [`arch::vtrn_f32`]."] + #[inline(always)] + pub fn vtrn_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + unsafe { vtrn_f32(a, b) } + } + #[doc = "See [`arch::vtrn_s32`]."] + #[inline(always)] + pub fn vtrn_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + unsafe { vtrn_s32(a, b) } + } + #[doc = "See [`arch::vtrn_u32`]."] + #[inline(always)] + pub fn vtrn_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + unsafe { vtrn_u32(a, b) } + } + #[doc = "See [`arch::vtrnq_f32`]."] + #[inline(always)] + pub fn vtrnq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + unsafe { vtrnq_f32(a, b) } + } + #[doc = "See [`arch::vtrn_s8`]."] + #[inline(always)] + pub fn vtrn_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + unsafe { vtrn_s8(a, b) } + } + #[doc = "See [`arch::vtrnq_s8`]."] + #[inline(always)] + pub fn vtrnq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + unsafe { vtrnq_s8(a, b) } + } + #[doc = "See [`arch::vtrn_s16`]."] + #[inline(always)] + pub fn vtrn_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + unsafe { vtrn_s16(a, b) } + } + #[doc = "See [`arch::vtrnq_s16`]."] + #[inline(always)] + pub fn vtrnq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + unsafe { vtrnq_s16(a, b) } + } + #[doc = "See [`arch::vtrnq_s32`]."] + #[inline(always)] + pub fn vtrnq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + unsafe { vtrnq_s32(a, b) } + } + #[doc = "See [`arch::vtrn_u8`]."] + #[inline(always)] + pub fn vtrn_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + unsafe { vtrn_u8(a, b) } + } + #[doc = "See [`arch::vtrnq_u8`]."] + #[inline(always)] + pub fn vtrnq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + unsafe { vtrnq_u8(a, b) } + } + #[doc = "See [`arch::vtrn_u16`]."] + #[inline(always)] + pub fn vtrn_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + unsafe { vtrn_u16(a, b) } + } + #[doc = "See [`arch::vtrnq_u16`]."] + #[inline(always)] + pub fn vtrnq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + unsafe { vtrnq_u16(a, b) } + } + #[doc = "See [`arch::vtrnq_u32`]."] + #[inline(always)] + pub fn vtrnq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + unsafe { vtrnq_u32(a, b) } + } + #[doc = "See [`arch::vtrn_p8`]."] + #[inline(always)] + pub fn vtrn_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + unsafe { vtrn_p8(a, b) } + } + #[doc = "See [`arch::vtrnq_p8`]."] + #[inline(always)] + pub fn vtrnq_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + unsafe { vtrnq_p8(a, b) } + } + #[doc = "See [`arch::vtrn_p16`]."] + #[inline(always)] + pub fn vtrn_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + unsafe { vtrn_p16(a, b) } + } + #[doc = "See [`arch::vtrnq_p16`]."] + #[inline(always)] + pub fn vtrnq_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + unsafe { vtrnq_p16(a, b) } + } + #[doc = "See [`arch::vtst_s8`]."] + #[inline(always)] + pub fn vtst_s8(self, a: int8x8_t, b: int8x8_t) -> uint8x8_t { + unsafe { vtst_s8(a, b) } + } + #[doc = "See [`arch::vtstq_s8`]."] + #[inline(always)] + pub fn vtstq_s8(self, a: int8x16_t, b: int8x16_t) -> uint8x16_t { + unsafe { vtstq_s8(a, b) } + } + #[doc = "See [`arch::vtst_s16`]."] + #[inline(always)] + pub fn vtst_s16(self, a: int16x4_t, b: int16x4_t) -> uint16x4_t { + unsafe { vtst_s16(a, b) } + } + #[doc = "See [`arch::vtstq_s16`]."] + #[inline(always)] + pub fn vtstq_s16(self, a: int16x8_t, b: int16x8_t) -> uint16x8_t { + unsafe { vtstq_s16(a, b) } + } + #[doc = "See [`arch::vtst_s32`]."] + #[inline(always)] + pub fn vtst_s32(self, a: int32x2_t, b: int32x2_t) -> uint32x2_t { + unsafe { vtst_s32(a, b) } + } + #[doc = "See [`arch::vtstq_s32`]."] + #[inline(always)] + pub fn vtstq_s32(self, a: int32x4_t, b: int32x4_t) -> uint32x4_t { + unsafe { vtstq_s32(a, b) } + } + #[doc = "See [`arch::vtst_p8`]."] + #[inline(always)] + pub fn vtst_p8(self, a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { + unsafe { vtst_p8(a, b) } + } + #[doc = "See [`arch::vtstq_p8`]."] + #[inline(always)] + pub fn vtstq_p8(self, a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { + unsafe { vtstq_p8(a, b) } + } + #[doc = "See [`arch::vtst_p16`]."] + #[inline(always)] + pub fn vtst_p16(self, a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { + unsafe { vtst_p16(a, b) } + } + #[doc = "See [`arch::vtstq_p16`]."] + #[inline(always)] + pub fn vtstq_p16(self, a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { + unsafe { vtstq_p16(a, b) } + } + #[doc = "See [`arch::vtst_u8`]."] + #[inline(always)] + pub fn vtst_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + unsafe { vtst_u8(a, b) } + } + #[doc = "See [`arch::vtstq_u8`]."] + #[inline(always)] + pub fn vtstq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + unsafe { vtstq_u8(a, b) } + } + #[doc = "See [`arch::vtst_u16`]."] + #[inline(always)] + pub fn vtst_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + unsafe { vtst_u16(a, b) } + } + #[doc = "See [`arch::vtstq_u16`]."] + #[inline(always)] + pub fn vtstq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + unsafe { vtstq_u16(a, b) } + } + #[doc = "See [`arch::vtst_u32`]."] + #[inline(always)] + pub fn vtst_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + unsafe { vtst_u32(a, b) } + } + #[doc = "See [`arch::vtstq_u32`]."] + #[inline(always)] + pub fn vtstq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + unsafe { vtstq_u32(a, b) } + } + #[doc = "See [`arch::vuzp_f32`]."] + #[inline(always)] + pub fn vuzp_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + unsafe { vuzp_f32(a, b) } + } + #[doc = "See [`arch::vuzp_s32`]."] + #[inline(always)] + pub fn vuzp_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + unsafe { vuzp_s32(a, b) } + } + #[doc = "See [`arch::vuzp_u32`]."] + #[inline(always)] + pub fn vuzp_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + unsafe { vuzp_u32(a, b) } + } + #[doc = "See [`arch::vuzpq_f32`]."] + #[inline(always)] + pub fn vuzpq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + unsafe { vuzpq_f32(a, b) } + } + #[doc = "See [`arch::vuzp_s8`]."] + #[inline(always)] + pub fn vuzp_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + unsafe { vuzp_s8(a, b) } + } + #[doc = "See [`arch::vuzpq_s8`]."] + #[inline(always)] + pub fn vuzpq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + unsafe { vuzpq_s8(a, b) } + } + #[doc = "See [`arch::vuzp_s16`]."] + #[inline(always)] + pub fn vuzp_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + unsafe { vuzp_s16(a, b) } + } + #[doc = "See [`arch::vuzpq_s16`]."] + #[inline(always)] + pub fn vuzpq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + unsafe { vuzpq_s16(a, b) } + } + #[doc = "See [`arch::vuzpq_s32`]."] + #[inline(always)] + pub fn vuzpq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + unsafe { vuzpq_s32(a, b) } + } + #[doc = "See [`arch::vuzp_u8`]."] + #[inline(always)] + pub fn vuzp_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + unsafe { vuzp_u8(a, b) } + } + #[doc = "See [`arch::vuzpq_u8`]."] + #[inline(always)] + pub fn vuzpq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + unsafe { vuzpq_u8(a, b) } + } + #[doc = "See [`arch::vuzp_u16`]."] + #[inline(always)] + pub fn vuzp_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + unsafe { vuzp_u16(a, b) } + } + #[doc = "See [`arch::vuzpq_u16`]."] + #[inline(always)] + pub fn vuzpq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + unsafe { vuzpq_u16(a, b) } + } + #[doc = "See [`arch::vuzpq_u32`]."] + #[inline(always)] + pub fn vuzpq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + unsafe { vuzpq_u32(a, b) } + } + #[doc = "See [`arch::vuzp_p8`]."] + #[inline(always)] + pub fn vuzp_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + unsafe { vuzp_p8(a, b) } + } + #[doc = "See [`arch::vuzpq_p8`]."] + #[inline(always)] + pub fn vuzpq_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + unsafe { vuzpq_p8(a, b) } + } + #[doc = "See [`arch::vuzp_p16`]."] + #[inline(always)] + pub fn vuzp_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + unsafe { vuzp_p16(a, b) } + } + #[doc = "See [`arch::vuzpq_p16`]."] + #[inline(always)] + pub fn vuzpq_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + unsafe { vuzpq_p16(a, b) } + } + #[doc = "See [`arch::vzip_f32`]."] + #[inline(always)] + pub fn vzip_f32(self, a: float32x2_t, b: float32x2_t) -> float32x2x2_t { + unsafe { vzip_f32(a, b) } + } + #[doc = "See [`arch::vzip_s32`]."] + #[inline(always)] + pub fn vzip_s32(self, a: int32x2_t, b: int32x2_t) -> int32x2x2_t { + unsafe { vzip_s32(a, b) } + } + #[doc = "See [`arch::vzip_u32`]."] + #[inline(always)] + pub fn vzip_u32(self, a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { + unsafe { vzip_u32(a, b) } + } + #[doc = "See [`arch::vzip_s8`]."] + #[inline(always)] + pub fn vzip_s8(self, a: int8x8_t, b: int8x8_t) -> int8x8x2_t { + unsafe { vzip_s8(a, b) } + } + #[doc = "See [`arch::vzip_s16`]."] + #[inline(always)] + pub fn vzip_s16(self, a: int16x4_t, b: int16x4_t) -> int16x4x2_t { + unsafe { vzip_s16(a, b) } + } + #[doc = "See [`arch::vzip_u8`]."] + #[inline(always)] + pub fn vzip_u8(self, a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { + unsafe { vzip_u8(a, b) } + } + #[doc = "See [`arch::vzip_u16`]."] + #[inline(always)] + pub fn vzip_u16(self, a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { + unsafe { vzip_u16(a, b) } + } + #[doc = "See [`arch::vzip_p8`]."] + #[inline(always)] + pub fn vzip_p8(self, a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { + unsafe { vzip_p8(a, b) } + } + #[doc = "See [`arch::vzip_p16`]."] + #[inline(always)] + pub fn vzip_p16(self, a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { + unsafe { vzip_p16(a, b) } + } + #[doc = "See [`arch::vzipq_f32`]."] + #[inline(always)] + pub fn vzipq_f32(self, a: float32x4_t, b: float32x4_t) -> float32x4x2_t { + unsafe { vzipq_f32(a, b) } + } + #[doc = "See [`arch::vzipq_s8`]."] + #[inline(always)] + pub fn vzipq_s8(self, a: int8x16_t, b: int8x16_t) -> int8x16x2_t { + unsafe { vzipq_s8(a, b) } + } + #[doc = "See [`arch::vzipq_s16`]."] + #[inline(always)] + pub fn vzipq_s16(self, a: int16x8_t, b: int16x8_t) -> int16x8x2_t { + unsafe { vzipq_s16(a, b) } + } + #[doc = "See [`arch::vzipq_s32`]."] + #[inline(always)] + pub fn vzipq_s32(self, a: int32x4_t, b: int32x4_t) -> int32x4x2_t { + unsafe { vzipq_s32(a, b) } + } + #[doc = "See [`arch::vzipq_u8`]."] + #[inline(always)] + pub fn vzipq_u8(self, a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { + unsafe { vzipq_u8(a, b) } + } + #[doc = "See [`arch::vzipq_u16`]."] + #[inline(always)] + pub fn vzipq_u16(self, a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { + unsafe { vzipq_u16(a, b) } + } + #[doc = "See [`arch::vzipq_u32`]."] + #[inline(always)] + pub fn vzipq_u32(self, a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { + unsafe { vzipq_u32(a, b) } + } + #[doc = "See [`arch::vzipq_p8`]."] + #[inline(always)] + pub fn vzipq_p8(self, a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { + unsafe { vzipq_p8(a, b) } + } + #[doc = "See [`arch::vzipq_p16`]."] + #[inline(always)] + pub fn vzipq_p16(self, a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { + unsafe { vzipq_p16(a, b) } + } +} diff --git a/fearless_simd/src/core_arch/fallback.rs b/fearless_simd/src/core_arch/fallback.rs index 2ef9ee058..8414d4e29 100644 --- a/fearless_simd/src/core_arch/fallback.rs +++ b/fearless_simd/src/core_arch/fallback.rs @@ -10,6 +10,10 @@ pub struct Fallback { impl Fallback { /// Create a SIMD token. #[inline] + #[expect( + clippy::new_without_default, + reason = "other architectures have unsafe `new_unchecked` constructors and cannot implement `Default`; for symmetry, we do not do so either" + )] pub const fn new() -> Self { Self { _private: () } } diff --git a/fearless_simd/src/core_arch/mod.rs b/fearless_simd/src/core_arch/mod.rs index 51599cd56..08ee86397 100644 --- a/fearless_simd/src/core_arch/mod.rs +++ b/fearless_simd/src/core_arch/mod.rs @@ -5,9 +5,23 @@ #![expect( missing_docs, - clippy::new_without_default, reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" )] +#![allow( + clippy::allow_attributes_without_reason, + reason = "these attributes are copied from stdarch" +)] +#![allow( + deprecated, + reason = "some intrinsics are deprecated, and hence their wrappers call deprecated functions" +)] +#![cfg_attr( + any(target_arch = "x86", target_arch = "x86_64"), + expect( + clippy::not_unsafe_ptr_arg_deref, + reason = "_mm_prefetch is safe to call, but clippy thinks it dereferences the pointer for some reason" + ) +)] #[cfg(target_arch = "aarch64")] pub mod aarch64; diff --git a/fearless_simd/src/core_arch/wasm32/mod.rs b/fearless_simd/src/core_arch/wasm32/mod.rs index 4636eeebe..dea521a6e 100644 --- a/fearless_simd/src/core_arch/wasm32/mod.rs +++ b/fearless_simd/src/core_arch/wasm32/mod.rs @@ -1,18 +1,8 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -/// A token for WASM SIMD128. -#[derive(Clone, Copy, Debug)] -pub struct WasmSimd128 { - _private: (), -} +// This file is autogenerated by fearless_simd_gen -// There is intentionally no method delegation here because all the WASM SIMD128 methods are enabled or disabled -// statically--there is no feature detection. -impl WasmSimd128 { - /// Create a SIMD token. - #[inline] - pub const fn new() -> Self { - Self { _private: () } - } -} +#![doc = "Access to intrinsics on `wasm32`."] +mod simd128; +pub use simd128::WasmSimd128; diff --git a/fearless_simd/src/core_arch/wasm32/simd128.rs b/fearless_simd/src/core_arch/wasm32/simd128.rs new file mode 100644 index 000000000..8f901ec09 --- /dev/null +++ b/fearless_simd/src/core_arch/wasm32/simd128.rs @@ -0,0 +1,1423 @@ +// Copyright 2026 the Fearless_SIMD Authors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// This file is autogenerated by fearless_simd_gen + +use arch::*; +use core::arch::wasm32 as arch; +#[doc = "A token for `WasmSimd128` intrinsics on `wasm32`."] +#[derive(Clone, Copy, Debug)] +pub struct WasmSimd128 { + _private: (), +} +#[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" +)] +impl WasmSimd128 { + #[doc = r" Create a SIMD token."] + #[inline] + #[expect( + clippy::new_without_default, + reason = "other architectures have unsafe `new_unchecked` constructors and cannot implement `Default`; for symmetry, we do not do so either" + )] + pub const fn new() -> Self { + Self { _private: () } + } + #[doc = "See [`arch::v128_load`]."] + #[inline(always)] + pub unsafe fn v128_load(self, m: *const v128) -> v128 { + unsafe { v128_load(m) } + } + #[doc = "See [`arch::i16x8_load_extend_i8x8`]."] + #[inline(always)] + pub unsafe fn i16x8_load_extend_i8x8(self, m: *const i8) -> v128 { + unsafe { i16x8_load_extend_i8x8(m) } + } + #[doc = "See [`arch::i16x8_load_extend_u8x8`]."] + #[inline(always)] + pub unsafe fn i16x8_load_extend_u8x8(self, m: *const u8) -> v128 { + unsafe { i16x8_load_extend_u8x8(m) } + } + #[doc = "See [`arch::i32x4_load_extend_i16x4`]."] + #[inline(always)] + pub unsafe fn i32x4_load_extend_i16x4(self, m: *const i16) -> v128 { + unsafe { i32x4_load_extend_i16x4(m) } + } + #[doc = "See [`arch::i32x4_load_extend_u16x4`]."] + #[inline(always)] + pub unsafe fn i32x4_load_extend_u16x4(self, m: *const u16) -> v128 { + unsafe { i32x4_load_extend_u16x4(m) } + } + #[doc = "See [`arch::i64x2_load_extend_i32x2`]."] + #[inline(always)] + pub unsafe fn i64x2_load_extend_i32x2(self, m: *const i32) -> v128 { + unsafe { i64x2_load_extend_i32x2(m) } + } + #[doc = "See [`arch::i64x2_load_extend_u32x2`]."] + #[inline(always)] + pub unsafe fn i64x2_load_extend_u32x2(self, m: *const u32) -> v128 { + unsafe { i64x2_load_extend_u32x2(m) } + } + #[doc = "See [`arch::v128_load8_splat`]."] + #[inline(always)] + pub unsafe fn v128_load8_splat(self, m: *const u8) -> v128 { + unsafe { v128_load8_splat(m) } + } + #[doc = "See [`arch::v128_load16_splat`]."] + #[inline(always)] + pub unsafe fn v128_load16_splat(self, m: *const u16) -> v128 { + unsafe { v128_load16_splat(m) } + } + #[doc = "See [`arch::v128_load32_splat`]."] + #[inline(always)] + pub unsafe fn v128_load32_splat(self, m: *const u32) -> v128 { + unsafe { v128_load32_splat(m) } + } + #[doc = "See [`arch::v128_load64_splat`]."] + #[inline(always)] + pub unsafe fn v128_load64_splat(self, m: *const u64) -> v128 { + unsafe { v128_load64_splat(m) } + } + #[doc = "See [`arch::v128_load32_zero`]."] + #[inline(always)] + pub unsafe fn v128_load32_zero(self, m: *const u32) -> v128 { + unsafe { v128_load32_zero(m) } + } + #[doc = "See [`arch::v128_load64_zero`]."] + #[inline(always)] + pub unsafe fn v128_load64_zero(self, m: *const u64) -> v128 { + unsafe { v128_load64_zero(m) } + } + #[doc = "See [`arch::v128_store`]."] + #[inline(always)] + pub unsafe fn v128_store(self, m: *mut v128, a: v128) { + unsafe { v128_store(m, a) } + } + #[doc = "See [`arch::v128_load8_lane`]."] + #[inline(always)] + pub unsafe fn v128_load8_lane(self, v: v128, m: *const u8) -> v128 { + unsafe { v128_load8_lane::(v, m) } + } + #[doc = "See [`arch::v128_load16_lane`]."] + #[inline(always)] + pub unsafe fn v128_load16_lane(self, v: v128, m: *const u16) -> v128 { + unsafe { v128_load16_lane::(v, m) } + } + #[doc = "See [`arch::v128_load32_lane`]."] + #[inline(always)] + pub unsafe fn v128_load32_lane(self, v: v128, m: *const u32) -> v128 { + unsafe { v128_load32_lane::(v, m) } + } + #[doc = "See [`arch::v128_load64_lane`]."] + #[inline(always)] + pub unsafe fn v128_load64_lane(self, v: v128, m: *const u64) -> v128 { + unsafe { v128_load64_lane::(v, m) } + } + #[doc = "See [`arch::v128_store8_lane`]."] + #[inline(always)] + pub unsafe fn v128_store8_lane(self, v: v128, m: *mut u8) { + unsafe { v128_store8_lane::(v, m) } + } + #[doc = "See [`arch::v128_store16_lane`]."] + #[inline(always)] + pub unsafe fn v128_store16_lane(self, v: v128, m: *mut u16) { + unsafe { v128_store16_lane::(v, m) } + } + #[doc = "See [`arch::v128_store32_lane`]."] + #[inline(always)] + pub unsafe fn v128_store32_lane(self, v: v128, m: *mut u32) { + unsafe { v128_store32_lane::(v, m) } + } + #[doc = "See [`arch::v128_store64_lane`]."] + #[inline(always)] + pub unsafe fn v128_store64_lane(self, v: v128, m: *mut u64) { + unsafe { v128_store64_lane::(v, m) } + } + #[doc = "See [`arch::i8x16`]."] + #[inline(always)] + pub fn i8x16( + self, + a0: i8, + a1: i8, + a2: i8, + a3: i8, + a4: i8, + a5: i8, + a6: i8, + a7: i8, + a8: i8, + a9: i8, + a10: i8, + a11: i8, + a12: i8, + a13: i8, + a14: i8, + a15: i8, + ) -> v128 { + unsafe { + i8x16( + a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + ) + } + } + #[doc = "See [`arch::u8x16`]."] + #[inline(always)] + pub fn u8x16( + self, + a0: u8, + a1: u8, + a2: u8, + a3: u8, + a4: u8, + a5: u8, + a6: u8, + a7: u8, + a8: u8, + a9: u8, + a10: u8, + a11: u8, + a12: u8, + a13: u8, + a14: u8, + a15: u8, + ) -> v128 { + unsafe { + u8x16( + a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + ) + } + } + #[doc = "See [`arch::i16x8`]."] + #[inline(always)] + pub fn i16x8( + self, + a0: i16, + a1: i16, + a2: i16, + a3: i16, + a4: i16, + a5: i16, + a6: i16, + a7: i16, + ) -> v128 { + unsafe { i16x8(a0, a1, a2, a3, a4, a5, a6, a7) } + } + #[doc = "See [`arch::u16x8`]."] + #[inline(always)] + pub fn u16x8( + self, + a0: u16, + a1: u16, + a2: u16, + a3: u16, + a4: u16, + a5: u16, + a6: u16, + a7: u16, + ) -> v128 { + unsafe { u16x8(a0, a1, a2, a3, a4, a5, a6, a7) } + } + #[doc = "See [`arch::i32x4`]."] + #[inline(always)] + pub fn i32x4(self, a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { + unsafe { i32x4(a0, a1, a2, a3) } + } + #[doc = "See [`arch::u32x4`]."] + #[inline(always)] + pub fn u32x4(self, a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { + unsafe { u32x4(a0, a1, a2, a3) } + } + #[doc = "See [`arch::i64x2`]."] + #[inline(always)] + pub fn i64x2(self, a0: i64, a1: i64) -> v128 { + unsafe { i64x2(a0, a1) } + } + #[doc = "See [`arch::u64x2`]."] + #[inline(always)] + pub fn u64x2(self, a0: u64, a1: u64) -> v128 { + unsafe { u64x2(a0, a1) } + } + #[doc = "See [`arch::f32x4`]."] + #[inline(always)] + pub fn f32x4(self, a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { + unsafe { f32x4(a0, a1, a2, a3) } + } + #[doc = "See [`arch::f64x2`]."] + #[inline(always)] + pub fn f64x2(self, a0: f64, a1: f64) -> v128 { + unsafe { f64x2(a0, a1) } + } + #[doc = "See [`arch::i8x16_shuffle`]."] + #[inline(always)] + pub fn i8x16_shuffle< + const I0: usize, + const I1: usize, + const I2: usize, + const I3: usize, + const I4: usize, + const I5: usize, + const I6: usize, + const I7: usize, + const I8: usize, + const I9: usize, + const I10: usize, + const I11: usize, + const I12: usize, + const I13: usize, + const I14: usize, + const I15: usize, + >( + self, + a: v128, + b: v128, + ) -> v128 { + unsafe { + i8x16_shuffle::( + a, b, + ) + } + } + #[doc = "See [`arch::i16x8_shuffle`]."] + #[inline(always)] + pub fn i16x8_shuffle< + const I0: usize, + const I1: usize, + const I2: usize, + const I3: usize, + const I4: usize, + const I5: usize, + const I6: usize, + const I7: usize, + >( + self, + a: v128, + b: v128, + ) -> v128 { + unsafe { i16x8_shuffle::(a, b) } + } + #[doc = "See [`arch::i32x4_shuffle`]."] + #[inline(always)] + pub fn i32x4_shuffle( + self, + a: v128, + b: v128, + ) -> v128 { + unsafe { i32x4_shuffle::(a, b) } + } + #[doc = "See [`arch::i64x2_shuffle`]."] + #[inline(always)] + pub fn i64x2_shuffle(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_shuffle::(a, b) } + } + #[doc = "See [`arch::i8x16_extract_lane`]."] + #[inline(always)] + pub fn i8x16_extract_lane(self, a: v128) -> i8 { + unsafe { i8x16_extract_lane::(a) } + } + #[doc = "See [`arch::u8x16_extract_lane`]."] + #[inline(always)] + pub fn u8x16_extract_lane(self, a: v128) -> u8 { + unsafe { u8x16_extract_lane::(a) } + } + #[doc = "See [`arch::i8x16_replace_lane`]."] + #[inline(always)] + pub fn i8x16_replace_lane(self, a: v128, val: i8) -> v128 { + unsafe { i8x16_replace_lane::(a, val) } + } + #[doc = "See [`arch::u8x16_replace_lane`]."] + #[inline(always)] + pub fn u8x16_replace_lane(self, a: v128, val: u8) -> v128 { + unsafe { u8x16_replace_lane::(a, val) } + } + #[doc = "See [`arch::i16x8_extract_lane`]."] + #[inline(always)] + pub fn i16x8_extract_lane(self, a: v128) -> i16 { + unsafe { i16x8_extract_lane::(a) } + } + #[doc = "See [`arch::u16x8_extract_lane`]."] + #[inline(always)] + pub fn u16x8_extract_lane(self, a: v128) -> u16 { + unsafe { u16x8_extract_lane::(a) } + } + #[doc = "See [`arch::i16x8_replace_lane`]."] + #[inline(always)] + pub fn i16x8_replace_lane(self, a: v128, val: i16) -> v128 { + unsafe { i16x8_replace_lane::(a, val) } + } + #[doc = "See [`arch::u16x8_replace_lane`]."] + #[inline(always)] + pub fn u16x8_replace_lane(self, a: v128, val: u16) -> v128 { + unsafe { u16x8_replace_lane::(a, val) } + } + #[doc = "See [`arch::i32x4_extract_lane`]."] + #[inline(always)] + pub fn i32x4_extract_lane(self, a: v128) -> i32 { + unsafe { i32x4_extract_lane::(a) } + } + #[doc = "See [`arch::u32x4_extract_lane`]."] + #[inline(always)] + pub fn u32x4_extract_lane(self, a: v128) -> u32 { + unsafe { u32x4_extract_lane::(a) } + } + #[doc = "See [`arch::i32x4_replace_lane`]."] + #[inline(always)] + pub fn i32x4_replace_lane(self, a: v128, val: i32) -> v128 { + unsafe { i32x4_replace_lane::(a, val) } + } + #[doc = "See [`arch::u32x4_replace_lane`]."] + #[inline(always)] + pub fn u32x4_replace_lane(self, a: v128, val: u32) -> v128 { + unsafe { u32x4_replace_lane::(a, val) } + } + #[doc = "See [`arch::i64x2_extract_lane`]."] + #[inline(always)] + pub fn i64x2_extract_lane(self, a: v128) -> i64 { + unsafe { i64x2_extract_lane::(a) } + } + #[doc = "See [`arch::u64x2_extract_lane`]."] + #[inline(always)] + pub fn u64x2_extract_lane(self, a: v128) -> u64 { + unsafe { u64x2_extract_lane::(a) } + } + #[doc = "See [`arch::i64x2_replace_lane`]."] + #[inline(always)] + pub fn i64x2_replace_lane(self, a: v128, val: i64) -> v128 { + unsafe { i64x2_replace_lane::(a, val) } + } + #[doc = "See [`arch::u64x2_replace_lane`]."] + #[inline(always)] + pub fn u64x2_replace_lane(self, a: v128, val: u64) -> v128 { + unsafe { u64x2_replace_lane::(a, val) } + } + #[doc = "See [`arch::f32x4_extract_lane`]."] + #[inline(always)] + pub fn f32x4_extract_lane(self, a: v128) -> f32 { + unsafe { f32x4_extract_lane::(a) } + } + #[doc = "See [`arch::f32x4_replace_lane`]."] + #[inline(always)] + pub fn f32x4_replace_lane(self, a: v128, val: f32) -> v128 { + unsafe { f32x4_replace_lane::(a, val) } + } + #[doc = "See [`arch::f64x2_extract_lane`]."] + #[inline(always)] + pub fn f64x2_extract_lane(self, a: v128) -> f64 { + unsafe { f64x2_extract_lane::(a) } + } + #[doc = "See [`arch::f64x2_replace_lane`]."] + #[inline(always)] + pub fn f64x2_replace_lane(self, a: v128, val: f64) -> v128 { + unsafe { f64x2_replace_lane::(a, val) } + } + #[doc = "See [`arch::i8x16_swizzle`]."] + #[inline(always)] + pub fn i8x16_swizzle(self, a: v128, s: v128) -> v128 { + unsafe { i8x16_swizzle(a, s) } + } + #[doc = "See [`arch::i8x16_splat`]."] + #[inline(always)] + pub fn i8x16_splat(self, a: i8) -> v128 { + unsafe { i8x16_splat(a) } + } + #[doc = "See [`arch::u8x16_splat`]."] + #[inline(always)] + pub fn u8x16_splat(self, a: u8) -> v128 { + unsafe { u8x16_splat(a) } + } + #[doc = "See [`arch::i16x8_splat`]."] + #[inline(always)] + pub fn i16x8_splat(self, a: i16) -> v128 { + unsafe { i16x8_splat(a) } + } + #[doc = "See [`arch::u16x8_splat`]."] + #[inline(always)] + pub fn u16x8_splat(self, a: u16) -> v128 { + unsafe { u16x8_splat(a) } + } + #[doc = "See [`arch::i32x4_splat`]."] + #[inline(always)] + pub fn i32x4_splat(self, a: i32) -> v128 { + unsafe { i32x4_splat(a) } + } + #[doc = "See [`arch::u32x4_splat`]."] + #[inline(always)] + pub fn u32x4_splat(self, a: u32) -> v128 { + unsafe { u32x4_splat(a) } + } + #[doc = "See [`arch::i64x2_splat`]."] + #[inline(always)] + pub fn i64x2_splat(self, a: i64) -> v128 { + unsafe { i64x2_splat(a) } + } + #[doc = "See [`arch::u64x2_splat`]."] + #[inline(always)] + pub fn u64x2_splat(self, a: u64) -> v128 { + unsafe { u64x2_splat(a) } + } + #[doc = "See [`arch::f32x4_splat`]."] + #[inline(always)] + pub fn f32x4_splat(self, a: f32) -> v128 { + unsafe { f32x4_splat(a) } + } + #[doc = "See [`arch::f64x2_splat`]."] + #[inline(always)] + pub fn f64x2_splat(self, a: f64) -> v128 { + unsafe { f64x2_splat(a) } + } + #[doc = "See [`arch::i8x16_eq`]."] + #[inline(always)] + pub fn i8x16_eq(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_eq(a, b) } + } + #[doc = "See [`arch::i8x16_ne`]."] + #[inline(always)] + pub fn i8x16_ne(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_ne(a, b) } + } + #[doc = "See [`arch::i8x16_lt`]."] + #[inline(always)] + pub fn i8x16_lt(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_lt(a, b) } + } + #[doc = "See [`arch::u8x16_lt`]."] + #[inline(always)] + pub fn u8x16_lt(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_lt(a, b) } + } + #[doc = "See [`arch::i8x16_gt`]."] + #[inline(always)] + pub fn i8x16_gt(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_gt(a, b) } + } + #[doc = "See [`arch::u8x16_gt`]."] + #[inline(always)] + pub fn u8x16_gt(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_gt(a, b) } + } + #[doc = "See [`arch::i8x16_le`]."] + #[inline(always)] + pub fn i8x16_le(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_le(a, b) } + } + #[doc = "See [`arch::u8x16_le`]."] + #[inline(always)] + pub fn u8x16_le(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_le(a, b) } + } + #[doc = "See [`arch::i8x16_ge`]."] + #[inline(always)] + pub fn i8x16_ge(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_ge(a, b) } + } + #[doc = "See [`arch::u8x16_ge`]."] + #[inline(always)] + pub fn u8x16_ge(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_ge(a, b) } + } + #[doc = "See [`arch::i16x8_eq`]."] + #[inline(always)] + pub fn i16x8_eq(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_eq(a, b) } + } + #[doc = "See [`arch::i16x8_ne`]."] + #[inline(always)] + pub fn i16x8_ne(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_ne(a, b) } + } + #[doc = "See [`arch::i16x8_lt`]."] + #[inline(always)] + pub fn i16x8_lt(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_lt(a, b) } + } + #[doc = "See [`arch::u16x8_lt`]."] + #[inline(always)] + pub fn u16x8_lt(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_lt(a, b) } + } + #[doc = "See [`arch::i16x8_gt`]."] + #[inline(always)] + pub fn i16x8_gt(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_gt(a, b) } + } + #[doc = "See [`arch::u16x8_gt`]."] + #[inline(always)] + pub fn u16x8_gt(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_gt(a, b) } + } + #[doc = "See [`arch::i16x8_le`]."] + #[inline(always)] + pub fn i16x8_le(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_le(a, b) } + } + #[doc = "See [`arch::u16x8_le`]."] + #[inline(always)] + pub fn u16x8_le(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_le(a, b) } + } + #[doc = "See [`arch::i16x8_ge`]."] + #[inline(always)] + pub fn i16x8_ge(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_ge(a, b) } + } + #[doc = "See [`arch::u16x8_ge`]."] + #[inline(always)] + pub fn u16x8_ge(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_ge(a, b) } + } + #[doc = "See [`arch::i32x4_eq`]."] + #[inline(always)] + pub fn i32x4_eq(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_eq(a, b) } + } + #[doc = "See [`arch::i32x4_ne`]."] + #[inline(always)] + pub fn i32x4_ne(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_ne(a, b) } + } + #[doc = "See [`arch::i32x4_lt`]."] + #[inline(always)] + pub fn i32x4_lt(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_lt(a, b) } + } + #[doc = "See [`arch::u32x4_lt`]."] + #[inline(always)] + pub fn u32x4_lt(self, a: v128, b: v128) -> v128 { + unsafe { u32x4_lt(a, b) } + } + #[doc = "See [`arch::i32x4_gt`]."] + #[inline(always)] + pub fn i32x4_gt(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_gt(a, b) } + } + #[doc = "See [`arch::u32x4_gt`]."] + #[inline(always)] + pub fn u32x4_gt(self, a: v128, b: v128) -> v128 { + unsafe { u32x4_gt(a, b) } + } + #[doc = "See [`arch::i32x4_le`]."] + #[inline(always)] + pub fn i32x4_le(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_le(a, b) } + } + #[doc = "See [`arch::u32x4_le`]."] + #[inline(always)] + pub fn u32x4_le(self, a: v128, b: v128) -> v128 { + unsafe { u32x4_le(a, b) } + } + #[doc = "See [`arch::i32x4_ge`]."] + #[inline(always)] + pub fn i32x4_ge(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_ge(a, b) } + } + #[doc = "See [`arch::u32x4_ge`]."] + #[inline(always)] + pub fn u32x4_ge(self, a: v128, b: v128) -> v128 { + unsafe { u32x4_ge(a, b) } + } + #[doc = "See [`arch::i64x2_eq`]."] + #[inline(always)] + pub fn i64x2_eq(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_eq(a, b) } + } + #[doc = "See [`arch::i64x2_ne`]."] + #[inline(always)] + pub fn i64x2_ne(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_ne(a, b) } + } + #[doc = "See [`arch::i64x2_lt`]."] + #[inline(always)] + pub fn i64x2_lt(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_lt(a, b) } + } + #[doc = "See [`arch::i64x2_gt`]."] + #[inline(always)] + pub fn i64x2_gt(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_gt(a, b) } + } + #[doc = "See [`arch::i64x2_le`]."] + #[inline(always)] + pub fn i64x2_le(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_le(a, b) } + } + #[doc = "See [`arch::i64x2_ge`]."] + #[inline(always)] + pub fn i64x2_ge(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_ge(a, b) } + } + #[doc = "See [`arch::f32x4_eq`]."] + #[inline(always)] + pub fn f32x4_eq(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_eq(a, b) } + } + #[doc = "See [`arch::f32x4_ne`]."] + #[inline(always)] + pub fn f32x4_ne(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_ne(a, b) } + } + #[doc = "See [`arch::f32x4_lt`]."] + #[inline(always)] + pub fn f32x4_lt(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_lt(a, b) } + } + #[doc = "See [`arch::f32x4_gt`]."] + #[inline(always)] + pub fn f32x4_gt(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_gt(a, b) } + } + #[doc = "See [`arch::f32x4_le`]."] + #[inline(always)] + pub fn f32x4_le(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_le(a, b) } + } + #[doc = "See [`arch::f32x4_ge`]."] + #[inline(always)] + pub fn f32x4_ge(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_ge(a, b) } + } + #[doc = "See [`arch::f64x2_eq`]."] + #[inline(always)] + pub fn f64x2_eq(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_eq(a, b) } + } + #[doc = "See [`arch::f64x2_ne`]."] + #[inline(always)] + pub fn f64x2_ne(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_ne(a, b) } + } + #[doc = "See [`arch::f64x2_lt`]."] + #[inline(always)] + pub fn f64x2_lt(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_lt(a, b) } + } + #[doc = "See [`arch::f64x2_gt`]."] + #[inline(always)] + pub fn f64x2_gt(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_gt(a, b) } + } + #[doc = "See [`arch::f64x2_le`]."] + #[inline(always)] + pub fn f64x2_le(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_le(a, b) } + } + #[doc = "See [`arch::f64x2_ge`]."] + #[inline(always)] + pub fn f64x2_ge(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_ge(a, b) } + } + #[doc = "See [`arch::v128_not`]."] + #[inline(always)] + pub fn v128_not(self, a: v128) -> v128 { + unsafe { v128_not(a) } + } + #[doc = "See [`arch::v128_and`]."] + #[inline(always)] + pub fn v128_and(self, a: v128, b: v128) -> v128 { + unsafe { v128_and(a, b) } + } + #[doc = "See [`arch::v128_andnot`]."] + #[inline(always)] + pub fn v128_andnot(self, a: v128, b: v128) -> v128 { + unsafe { v128_andnot(a, b) } + } + #[doc = "See [`arch::v128_or`]."] + #[inline(always)] + pub fn v128_or(self, a: v128, b: v128) -> v128 { + unsafe { v128_or(a, b) } + } + #[doc = "See [`arch::v128_xor`]."] + #[inline(always)] + pub fn v128_xor(self, a: v128, b: v128) -> v128 { + unsafe { v128_xor(a, b) } + } + #[doc = "See [`arch::v128_bitselect`]."] + #[inline(always)] + pub fn v128_bitselect(self, v1: v128, v2: v128, c: v128) -> v128 { + unsafe { v128_bitselect(v1, v2, c) } + } + #[doc = "See [`arch::v128_any_true`]."] + #[inline(always)] + pub fn v128_any_true(self, a: v128) -> bool { + unsafe { v128_any_true(a) } + } + #[doc = "See [`arch::i8x16_abs`]."] + #[inline(always)] + pub fn i8x16_abs(self, a: v128) -> v128 { + unsafe { i8x16_abs(a) } + } + #[doc = "See [`arch::i8x16_neg`]."] + #[inline(always)] + pub fn i8x16_neg(self, a: v128) -> v128 { + unsafe { i8x16_neg(a) } + } + #[doc = "See [`arch::i8x16_popcnt`]."] + #[inline(always)] + pub fn i8x16_popcnt(self, v: v128) -> v128 { + unsafe { i8x16_popcnt(v) } + } + #[doc = "See [`arch::i8x16_all_true`]."] + #[inline(always)] + pub fn i8x16_all_true(self, a: v128) -> bool { + unsafe { i8x16_all_true(a) } + } + #[doc = "See [`arch::i8x16_bitmask`]."] + #[inline(always)] + pub fn i8x16_bitmask(self, a: v128) -> u16 { + unsafe { i8x16_bitmask(a) } + } + #[doc = "See [`arch::i8x16_narrow_i16x8`]."] + #[inline(always)] + pub fn i8x16_narrow_i16x8(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_narrow_i16x8(a, b) } + } + #[doc = "See [`arch::u8x16_narrow_i16x8`]."] + #[inline(always)] + pub fn u8x16_narrow_i16x8(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_narrow_i16x8(a, b) } + } + #[doc = "See [`arch::i8x16_shl`]."] + #[inline(always)] + pub fn i8x16_shl(self, a: v128, amt: u32) -> v128 { + unsafe { i8x16_shl(a, amt) } + } + #[doc = "See [`arch::i8x16_shr`]."] + #[inline(always)] + pub fn i8x16_shr(self, a: v128, amt: u32) -> v128 { + unsafe { i8x16_shr(a, amt) } + } + #[doc = "See [`arch::u8x16_shr`]."] + #[inline(always)] + pub fn u8x16_shr(self, a: v128, amt: u32) -> v128 { + unsafe { u8x16_shr(a, amt) } + } + #[doc = "See [`arch::i8x16_add`]."] + #[inline(always)] + pub fn i8x16_add(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_add(a, b) } + } + #[doc = "See [`arch::i8x16_add_sat`]."] + #[inline(always)] + pub fn i8x16_add_sat(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_add_sat(a, b) } + } + #[doc = "See [`arch::u8x16_add_sat`]."] + #[inline(always)] + pub fn u8x16_add_sat(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_add_sat(a, b) } + } + #[doc = "See [`arch::i8x16_sub`]."] + #[inline(always)] + pub fn i8x16_sub(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_sub(a, b) } + } + #[doc = "See [`arch::i8x16_sub_sat`]."] + #[inline(always)] + pub fn i8x16_sub_sat(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_sub_sat(a, b) } + } + #[doc = "See [`arch::u8x16_sub_sat`]."] + #[inline(always)] + pub fn u8x16_sub_sat(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_sub_sat(a, b) } + } + #[doc = "See [`arch::i8x16_min`]."] + #[inline(always)] + pub fn i8x16_min(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_min(a, b) } + } + #[doc = "See [`arch::u8x16_min`]."] + #[inline(always)] + pub fn u8x16_min(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_min(a, b) } + } + #[doc = "See [`arch::i8x16_max`]."] + #[inline(always)] + pub fn i8x16_max(self, a: v128, b: v128) -> v128 { + unsafe { i8x16_max(a, b) } + } + #[doc = "See [`arch::u8x16_max`]."] + #[inline(always)] + pub fn u8x16_max(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_max(a, b) } + } + #[doc = "See [`arch::u8x16_avgr`]."] + #[inline(always)] + pub fn u8x16_avgr(self, a: v128, b: v128) -> v128 { + unsafe { u8x16_avgr(a, b) } + } + #[doc = "See [`arch::i16x8_extadd_pairwise_i8x16`]."] + #[inline(always)] + pub fn i16x8_extadd_pairwise_i8x16(self, a: v128) -> v128 { + unsafe { i16x8_extadd_pairwise_i8x16(a) } + } + #[doc = "See [`arch::i16x8_extadd_pairwise_u8x16`]."] + #[inline(always)] + pub fn i16x8_extadd_pairwise_u8x16(self, a: v128) -> v128 { + unsafe { i16x8_extadd_pairwise_u8x16(a) } + } + #[doc = "See [`arch::i16x8_abs`]."] + #[inline(always)] + pub fn i16x8_abs(self, a: v128) -> v128 { + unsafe { i16x8_abs(a) } + } + #[doc = "See [`arch::i16x8_neg`]."] + #[inline(always)] + pub fn i16x8_neg(self, a: v128) -> v128 { + unsafe { i16x8_neg(a) } + } + #[doc = "See [`arch::i16x8_q15mulr_sat`]."] + #[inline(always)] + pub fn i16x8_q15mulr_sat(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_q15mulr_sat(a, b) } + } + #[doc = "See [`arch::i16x8_all_true`]."] + #[inline(always)] + pub fn i16x8_all_true(self, a: v128) -> bool { + unsafe { i16x8_all_true(a) } + } + #[doc = "See [`arch::i16x8_bitmask`]."] + #[inline(always)] + pub fn i16x8_bitmask(self, a: v128) -> u8 { + unsafe { i16x8_bitmask(a) } + } + #[doc = "See [`arch::i16x8_narrow_i32x4`]."] + #[inline(always)] + pub fn i16x8_narrow_i32x4(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_narrow_i32x4(a, b) } + } + #[doc = "See [`arch::u16x8_narrow_i32x4`]."] + #[inline(always)] + pub fn u16x8_narrow_i32x4(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_narrow_i32x4(a, b) } + } + #[doc = "See [`arch::i16x8_extend_low_i8x16`]."] + #[inline(always)] + pub fn i16x8_extend_low_i8x16(self, a: v128) -> v128 { + unsafe { i16x8_extend_low_i8x16(a) } + } + #[doc = "See [`arch::i16x8_extend_high_i8x16`]."] + #[inline(always)] + pub fn i16x8_extend_high_i8x16(self, a: v128) -> v128 { + unsafe { i16x8_extend_high_i8x16(a) } + } + #[doc = "See [`arch::i16x8_extend_low_u8x16`]."] + #[inline(always)] + pub fn i16x8_extend_low_u8x16(self, a: v128) -> v128 { + unsafe { i16x8_extend_low_u8x16(a) } + } + #[doc = "See [`arch::i16x8_extend_high_u8x16`]."] + #[inline(always)] + pub fn i16x8_extend_high_u8x16(self, a: v128) -> v128 { + unsafe { i16x8_extend_high_u8x16(a) } + } + #[doc = "See [`arch::i16x8_shl`]."] + #[inline(always)] + pub fn i16x8_shl(self, a: v128, amt: u32) -> v128 { + unsafe { i16x8_shl(a, amt) } + } + #[doc = "See [`arch::i16x8_shr`]."] + #[inline(always)] + pub fn i16x8_shr(self, a: v128, amt: u32) -> v128 { + unsafe { i16x8_shr(a, amt) } + } + #[doc = "See [`arch::u16x8_shr`]."] + #[inline(always)] + pub fn u16x8_shr(self, a: v128, amt: u32) -> v128 { + unsafe { u16x8_shr(a, amt) } + } + #[doc = "See [`arch::i16x8_add`]."] + #[inline(always)] + pub fn i16x8_add(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_add(a, b) } + } + #[doc = "See [`arch::i16x8_add_sat`]."] + #[inline(always)] + pub fn i16x8_add_sat(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_add_sat(a, b) } + } + #[doc = "See [`arch::u16x8_add_sat`]."] + #[inline(always)] + pub fn u16x8_add_sat(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_add_sat(a, b) } + } + #[doc = "See [`arch::i16x8_sub`]."] + #[inline(always)] + pub fn i16x8_sub(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_sub(a, b) } + } + #[doc = "See [`arch::i16x8_sub_sat`]."] + #[inline(always)] + pub fn i16x8_sub_sat(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_sub_sat(a, b) } + } + #[doc = "See [`arch::u16x8_sub_sat`]."] + #[inline(always)] + pub fn u16x8_sub_sat(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_sub_sat(a, b) } + } + #[doc = "See [`arch::i16x8_mul`]."] + #[inline(always)] + pub fn i16x8_mul(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_mul(a, b) } + } + #[doc = "See [`arch::i16x8_min`]."] + #[inline(always)] + pub fn i16x8_min(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_min(a, b) } + } + #[doc = "See [`arch::u16x8_min`]."] + #[inline(always)] + pub fn u16x8_min(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_min(a, b) } + } + #[doc = "See [`arch::i16x8_max`]."] + #[inline(always)] + pub fn i16x8_max(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_max(a, b) } + } + #[doc = "See [`arch::u16x8_max`]."] + #[inline(always)] + pub fn u16x8_max(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_max(a, b) } + } + #[doc = "See [`arch::u16x8_avgr`]."] + #[inline(always)] + pub fn u16x8_avgr(self, a: v128, b: v128) -> v128 { + unsafe { u16x8_avgr(a, b) } + } + #[doc = "See [`arch::i16x8_extmul_low_i8x16`]."] + #[inline(always)] + pub fn i16x8_extmul_low_i8x16(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_extmul_low_i8x16(a, b) } + } + #[doc = "See [`arch::i16x8_extmul_high_i8x16`]."] + #[inline(always)] + pub fn i16x8_extmul_high_i8x16(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_extmul_high_i8x16(a, b) } + } + #[doc = "See [`arch::i16x8_extmul_low_u8x16`]."] + #[inline(always)] + pub fn i16x8_extmul_low_u8x16(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_extmul_low_u8x16(a, b) } + } + #[doc = "See [`arch::i16x8_extmul_high_u8x16`]."] + #[inline(always)] + pub fn i16x8_extmul_high_u8x16(self, a: v128, b: v128) -> v128 { + unsafe { i16x8_extmul_high_u8x16(a, b) } + } + #[doc = "See [`arch::i32x4_extadd_pairwise_i16x8`]."] + #[inline(always)] + pub fn i32x4_extadd_pairwise_i16x8(self, a: v128) -> v128 { + unsafe { i32x4_extadd_pairwise_i16x8(a) } + } + #[doc = "See [`arch::i32x4_extadd_pairwise_u16x8`]."] + #[inline(always)] + pub fn i32x4_extadd_pairwise_u16x8(self, a: v128) -> v128 { + unsafe { i32x4_extadd_pairwise_u16x8(a) } + } + #[doc = "See [`arch::i32x4_abs`]."] + #[inline(always)] + pub fn i32x4_abs(self, a: v128) -> v128 { + unsafe { i32x4_abs(a) } + } + #[doc = "See [`arch::i32x4_neg`]."] + #[inline(always)] + pub fn i32x4_neg(self, a: v128) -> v128 { + unsafe { i32x4_neg(a) } + } + #[doc = "See [`arch::i32x4_all_true`]."] + #[inline(always)] + pub fn i32x4_all_true(self, a: v128) -> bool { + unsafe { i32x4_all_true(a) } + } + #[doc = "See [`arch::i32x4_bitmask`]."] + #[inline(always)] + pub fn i32x4_bitmask(self, a: v128) -> u8 { + unsafe { i32x4_bitmask(a) } + } + #[doc = "See [`arch::i32x4_extend_low_i16x8`]."] + #[inline(always)] + pub fn i32x4_extend_low_i16x8(self, a: v128) -> v128 { + unsafe { i32x4_extend_low_i16x8(a) } + } + #[doc = "See [`arch::i32x4_extend_high_i16x8`]."] + #[inline(always)] + pub fn i32x4_extend_high_i16x8(self, a: v128) -> v128 { + unsafe { i32x4_extend_high_i16x8(a) } + } + #[doc = "See [`arch::i32x4_extend_low_u16x8`]."] + #[inline(always)] + pub fn i32x4_extend_low_u16x8(self, a: v128) -> v128 { + unsafe { i32x4_extend_low_u16x8(a) } + } + #[doc = "See [`arch::i32x4_extend_high_u16x8`]."] + #[inline(always)] + pub fn i32x4_extend_high_u16x8(self, a: v128) -> v128 { + unsafe { i32x4_extend_high_u16x8(a) } + } + #[doc = "See [`arch::i32x4_shl`]."] + #[inline(always)] + pub fn i32x4_shl(self, a: v128, amt: u32) -> v128 { + unsafe { i32x4_shl(a, amt) } + } + #[doc = "See [`arch::i32x4_shr`]."] + #[inline(always)] + pub fn i32x4_shr(self, a: v128, amt: u32) -> v128 { + unsafe { i32x4_shr(a, amt) } + } + #[doc = "See [`arch::u32x4_shr`]."] + #[inline(always)] + pub fn u32x4_shr(self, a: v128, amt: u32) -> v128 { + unsafe { u32x4_shr(a, amt) } + } + #[doc = "See [`arch::i32x4_add`]."] + #[inline(always)] + pub fn i32x4_add(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_add(a, b) } + } + #[doc = "See [`arch::i32x4_sub`]."] + #[inline(always)] + pub fn i32x4_sub(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_sub(a, b) } + } + #[doc = "See [`arch::i32x4_mul`]."] + #[inline(always)] + pub fn i32x4_mul(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_mul(a, b) } + } + #[doc = "See [`arch::i32x4_min`]."] + #[inline(always)] + pub fn i32x4_min(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_min(a, b) } + } + #[doc = "See [`arch::u32x4_min`]."] + #[inline(always)] + pub fn u32x4_min(self, a: v128, b: v128) -> v128 { + unsafe { u32x4_min(a, b) } + } + #[doc = "See [`arch::i32x4_max`]."] + #[inline(always)] + pub fn i32x4_max(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_max(a, b) } + } + #[doc = "See [`arch::u32x4_max`]."] + #[inline(always)] + pub fn u32x4_max(self, a: v128, b: v128) -> v128 { + unsafe { u32x4_max(a, b) } + } + #[doc = "See [`arch::i32x4_dot_i16x8`]."] + #[inline(always)] + pub fn i32x4_dot_i16x8(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_dot_i16x8(a, b) } + } + #[doc = "See [`arch::i32x4_extmul_low_i16x8`]."] + #[inline(always)] + pub fn i32x4_extmul_low_i16x8(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_extmul_low_i16x8(a, b) } + } + #[doc = "See [`arch::i32x4_extmul_high_i16x8`]."] + #[inline(always)] + pub fn i32x4_extmul_high_i16x8(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_extmul_high_i16x8(a, b) } + } + #[doc = "See [`arch::i32x4_extmul_low_u16x8`]."] + #[inline(always)] + pub fn i32x4_extmul_low_u16x8(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_extmul_low_u16x8(a, b) } + } + #[doc = "See [`arch::i32x4_extmul_high_u16x8`]."] + #[inline(always)] + pub fn i32x4_extmul_high_u16x8(self, a: v128, b: v128) -> v128 { + unsafe { i32x4_extmul_high_u16x8(a, b) } + } + #[doc = "See [`arch::i64x2_abs`]."] + #[inline(always)] + pub fn i64x2_abs(self, a: v128) -> v128 { + unsafe { i64x2_abs(a) } + } + #[doc = "See [`arch::i64x2_neg`]."] + #[inline(always)] + pub fn i64x2_neg(self, a: v128) -> v128 { + unsafe { i64x2_neg(a) } + } + #[doc = "See [`arch::i64x2_all_true`]."] + #[inline(always)] + pub fn i64x2_all_true(self, a: v128) -> bool { + unsafe { i64x2_all_true(a) } + } + #[doc = "See [`arch::i64x2_bitmask`]."] + #[inline(always)] + pub fn i64x2_bitmask(self, a: v128) -> u8 { + unsafe { i64x2_bitmask(a) } + } + #[doc = "See [`arch::i64x2_extend_low_i32x4`]."] + #[inline(always)] + pub fn i64x2_extend_low_i32x4(self, a: v128) -> v128 { + unsafe { i64x2_extend_low_i32x4(a) } + } + #[doc = "See [`arch::i64x2_extend_high_i32x4`]."] + #[inline(always)] + pub fn i64x2_extend_high_i32x4(self, a: v128) -> v128 { + unsafe { i64x2_extend_high_i32x4(a) } + } + #[doc = "See [`arch::i64x2_extend_low_u32x4`]."] + #[inline(always)] + pub fn i64x2_extend_low_u32x4(self, a: v128) -> v128 { + unsafe { i64x2_extend_low_u32x4(a) } + } + #[doc = "See [`arch::i64x2_extend_high_u32x4`]."] + #[inline(always)] + pub fn i64x2_extend_high_u32x4(self, a: v128) -> v128 { + unsafe { i64x2_extend_high_u32x4(a) } + } + #[doc = "See [`arch::i64x2_shl`]."] + #[inline(always)] + pub fn i64x2_shl(self, a: v128, amt: u32) -> v128 { + unsafe { i64x2_shl(a, amt) } + } + #[doc = "See [`arch::i64x2_shr`]."] + #[inline(always)] + pub fn i64x2_shr(self, a: v128, amt: u32) -> v128 { + unsafe { i64x2_shr(a, amt) } + } + #[doc = "See [`arch::u64x2_shr`]."] + #[inline(always)] + pub fn u64x2_shr(self, a: v128, amt: u32) -> v128 { + unsafe { u64x2_shr(a, amt) } + } + #[doc = "See [`arch::i64x2_add`]."] + #[inline(always)] + pub fn i64x2_add(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_add(a, b) } + } + #[doc = "See [`arch::i64x2_sub`]."] + #[inline(always)] + pub fn i64x2_sub(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_sub(a, b) } + } + #[doc = "See [`arch::i64x2_mul`]."] + #[inline(always)] + pub fn i64x2_mul(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_mul(a, b) } + } + #[doc = "See [`arch::i64x2_extmul_low_i32x4`]."] + #[inline(always)] + pub fn i64x2_extmul_low_i32x4(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_extmul_low_i32x4(a, b) } + } + #[doc = "See [`arch::i64x2_extmul_high_i32x4`]."] + #[inline(always)] + pub fn i64x2_extmul_high_i32x4(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_extmul_high_i32x4(a, b) } + } + #[doc = "See [`arch::i64x2_extmul_low_u32x4`]."] + #[inline(always)] + pub fn i64x2_extmul_low_u32x4(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_extmul_low_u32x4(a, b) } + } + #[doc = "See [`arch::i64x2_extmul_high_u32x4`]."] + #[inline(always)] + pub fn i64x2_extmul_high_u32x4(self, a: v128, b: v128) -> v128 { + unsafe { i64x2_extmul_high_u32x4(a, b) } + } + #[doc = "See [`arch::f32x4_ceil`]."] + #[inline(always)] + pub fn f32x4_ceil(self, a: v128) -> v128 { + unsafe { f32x4_ceil(a) } + } + #[doc = "See [`arch::f32x4_floor`]."] + #[inline(always)] + pub fn f32x4_floor(self, a: v128) -> v128 { + unsafe { f32x4_floor(a) } + } + #[doc = "See [`arch::f32x4_trunc`]."] + #[inline(always)] + pub fn f32x4_trunc(self, a: v128) -> v128 { + unsafe { f32x4_trunc(a) } + } + #[doc = "See [`arch::f32x4_nearest`]."] + #[inline(always)] + pub fn f32x4_nearest(self, a: v128) -> v128 { + unsafe { f32x4_nearest(a) } + } + #[doc = "See [`arch::f32x4_abs`]."] + #[inline(always)] + pub fn f32x4_abs(self, a: v128) -> v128 { + unsafe { f32x4_abs(a) } + } + #[doc = "See [`arch::f32x4_neg`]."] + #[inline(always)] + pub fn f32x4_neg(self, a: v128) -> v128 { + unsafe { f32x4_neg(a) } + } + #[doc = "See [`arch::f32x4_sqrt`]."] + #[inline(always)] + pub fn f32x4_sqrt(self, a: v128) -> v128 { + unsafe { f32x4_sqrt(a) } + } + #[doc = "See [`arch::f32x4_add`]."] + #[inline(always)] + pub fn f32x4_add(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_add(a, b) } + } + #[doc = "See [`arch::f32x4_sub`]."] + #[inline(always)] + pub fn f32x4_sub(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_sub(a, b) } + } + #[doc = "See [`arch::f32x4_mul`]."] + #[inline(always)] + pub fn f32x4_mul(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_mul(a, b) } + } + #[doc = "See [`arch::f32x4_div`]."] + #[inline(always)] + pub fn f32x4_div(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_div(a, b) } + } + #[doc = "See [`arch::f32x4_min`]."] + #[inline(always)] + pub fn f32x4_min(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_min(a, b) } + } + #[doc = "See [`arch::f32x4_max`]."] + #[inline(always)] + pub fn f32x4_max(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_max(a, b) } + } + #[doc = "See [`arch::f32x4_pmin`]."] + #[inline(always)] + pub fn f32x4_pmin(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_pmin(a, b) } + } + #[doc = "See [`arch::f32x4_pmax`]."] + #[inline(always)] + pub fn f32x4_pmax(self, a: v128, b: v128) -> v128 { + unsafe { f32x4_pmax(a, b) } + } + #[doc = "See [`arch::f64x2_ceil`]."] + #[inline(always)] + pub fn f64x2_ceil(self, a: v128) -> v128 { + unsafe { f64x2_ceil(a) } + } + #[doc = "See [`arch::f64x2_floor`]."] + #[inline(always)] + pub fn f64x2_floor(self, a: v128) -> v128 { + unsafe { f64x2_floor(a) } + } + #[doc = "See [`arch::f64x2_trunc`]."] + #[inline(always)] + pub fn f64x2_trunc(self, a: v128) -> v128 { + unsafe { f64x2_trunc(a) } + } + #[doc = "See [`arch::f64x2_nearest`]."] + #[inline(always)] + pub fn f64x2_nearest(self, a: v128) -> v128 { + unsafe { f64x2_nearest(a) } + } + #[doc = "See [`arch::f64x2_abs`]."] + #[inline(always)] + pub fn f64x2_abs(self, a: v128) -> v128 { + unsafe { f64x2_abs(a) } + } + #[doc = "See [`arch::f64x2_neg`]."] + #[inline(always)] + pub fn f64x2_neg(self, a: v128) -> v128 { + unsafe { f64x2_neg(a) } + } + #[doc = "See [`arch::f64x2_sqrt`]."] + #[inline(always)] + pub fn f64x2_sqrt(self, a: v128) -> v128 { + unsafe { f64x2_sqrt(a) } + } + #[doc = "See [`arch::f64x2_add`]."] + #[inline(always)] + pub fn f64x2_add(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_add(a, b) } + } + #[doc = "See [`arch::f64x2_sub`]."] + #[inline(always)] + pub fn f64x2_sub(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_sub(a, b) } + } + #[doc = "See [`arch::f64x2_mul`]."] + #[inline(always)] + pub fn f64x2_mul(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_mul(a, b) } + } + #[doc = "See [`arch::f64x2_div`]."] + #[inline(always)] + pub fn f64x2_div(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_div(a, b) } + } + #[doc = "See [`arch::f64x2_min`]."] + #[inline(always)] + pub fn f64x2_min(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_min(a, b) } + } + #[doc = "See [`arch::f64x2_max`]."] + #[inline(always)] + pub fn f64x2_max(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_max(a, b) } + } + #[doc = "See [`arch::f64x2_pmin`]."] + #[inline(always)] + pub fn f64x2_pmin(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_pmin(a, b) } + } + #[doc = "See [`arch::f64x2_pmax`]."] + #[inline(always)] + pub fn f64x2_pmax(self, a: v128, b: v128) -> v128 { + unsafe { f64x2_pmax(a, b) } + } + #[doc = "See [`arch::i32x4_trunc_sat_f32x4`]."] + #[inline(always)] + pub fn i32x4_trunc_sat_f32x4(self, a: v128) -> v128 { + unsafe { i32x4_trunc_sat_f32x4(a) } + } + #[doc = "See [`arch::u32x4_trunc_sat_f32x4`]."] + #[inline(always)] + pub fn u32x4_trunc_sat_f32x4(self, a: v128) -> v128 { + unsafe { u32x4_trunc_sat_f32x4(a) } + } + #[doc = "See [`arch::f32x4_convert_i32x4`]."] + #[inline(always)] + pub fn f32x4_convert_i32x4(self, a: v128) -> v128 { + unsafe { f32x4_convert_i32x4(a) } + } + #[doc = "See [`arch::f32x4_convert_u32x4`]."] + #[inline(always)] + pub fn f32x4_convert_u32x4(self, a: v128) -> v128 { + unsafe { f32x4_convert_u32x4(a) } + } + #[doc = "See [`arch::i32x4_trunc_sat_f64x2_zero`]."] + #[inline(always)] + pub fn i32x4_trunc_sat_f64x2_zero(self, a: v128) -> v128 { + unsafe { i32x4_trunc_sat_f64x2_zero(a) } + } + #[doc = "See [`arch::u32x4_trunc_sat_f64x2_zero`]."] + #[inline(always)] + pub fn u32x4_trunc_sat_f64x2_zero(self, a: v128) -> v128 { + unsafe { u32x4_trunc_sat_f64x2_zero(a) } + } + #[doc = "See [`arch::f64x2_convert_low_i32x4`]."] + #[inline(always)] + pub fn f64x2_convert_low_i32x4(self, a: v128) -> v128 { + unsafe { f64x2_convert_low_i32x4(a) } + } + #[doc = "See [`arch::f64x2_convert_low_u32x4`]."] + #[inline(always)] + pub fn f64x2_convert_low_u32x4(self, a: v128) -> v128 { + unsafe { f64x2_convert_low_u32x4(a) } + } + #[doc = "See [`arch::f32x4_demote_f64x2_zero`]."] + #[inline(always)] + pub fn f32x4_demote_f64x2_zero(self, a: v128) -> v128 { + unsafe { f32x4_demote_f64x2_zero(a) } + } + #[doc = "See [`arch::f64x2_promote_low_f32x4`]."] + #[inline(always)] + pub fn f64x2_promote_low_f32x4(self, a: v128) -> v128 { + unsafe { f64x2_promote_low_f32x4(a) } + } +} diff --git a/fearless_simd/src/core_arch/x86/avx.rs b/fearless_simd/src/core_arch/x86/avx.rs index 99c953f78..64b781f19 100644 --- a/fearless_simd/src/core_arch/x86/avx.rs +++ b/fearless_simd/src/core_arch/x86/avx.rs @@ -1,340 +1,1145 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to AVX intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for AVX intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Avx` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Avx { _private: (), } - -#[expect( +#[allow( clippy::missing_safety_doc, - reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" + reason = "The underlying functions have their own safety docs" )] impl Avx { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm256_add_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_add_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_and_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_and_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_or_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_or_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_shuffle_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_shuffle_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_andnot_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_andnot_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_max_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_max_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_min_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_min_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_mul_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_mul_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_addsub_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_sub_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_sub_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_div_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_div_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_round_pd(a: __m256d) -> __m256d; - fn _mm256_ceil_pd(a: __m256d) -> __m256d; - fn _mm256_floor_pd(a: __m256d) -> __m256d; - fn _mm256_round_ps(a: __m256) -> __m256; - fn _mm256_ceil_ps(a: __m256) -> __m256; - fn _mm256_floor_ps(a: __m256) -> __m256; - fn _mm256_sqrt_ps(a: __m256) -> __m256; - fn _mm256_sqrt_pd(a: __m256d) -> __m256d; - fn _mm256_blend_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_blend_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; - fn _mm256_blendv_ps(a: __m256, b: __m256, c: __m256) -> __m256; - fn _mm256_dp_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_hadd_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_hadd_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_hsub_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_hsub_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_xor_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_xor_ps(a: __m256, b: __m256) -> __m256; - fn _mm_cmp_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm256_cmp_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm_cmp_ps(a: __m128, b: __m128) -> __m128; - fn _mm256_cmp_ps(a: __m256, b: __m256) -> __m256; - fn _mm_cmp_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmp_ss(a: __m128, b: __m128) -> __m128; - fn _mm256_cvtepi32_pd(a: __m128i) -> __m256d; - fn _mm256_cvtepi32_ps(a: __m256i) -> __m256; - fn _mm256_cvtpd_ps(a: __m256d) -> __m128; - fn _mm256_cvtps_epi32(a: __m256) -> __m256i; - fn _mm256_cvtps_pd(a: __m128) -> __m256d; - fn _mm256_cvttpd_epi32(a: __m256d) -> __m128i; - fn _mm256_cvtpd_epi32(a: __m256d) -> __m128i; - fn _mm256_cvttps_epi32(a: __m256) -> __m256i; - fn _mm256_extractf128_ps(a: __m256) -> __m128; - fn _mm256_extractf128_pd(a: __m256d) -> __m128d; - fn _mm256_extractf128_si256(a: __m256i) -> __m128i; - fn _mm256_zeroall(); - fn _mm256_zeroupper(); - fn _mm256_permutevar_ps(a: __m256, b: __m256i) -> __m256; - fn _mm_permutevar_ps(a: __m128, b: __m128i) -> __m128; - fn _mm256_permute_ps(a: __m256) -> __m256; - fn _mm_permute_ps(a: __m128) -> __m128; - fn _mm256_permutevar_pd(a: __m256d, b: __m256i) -> __m256d; - fn _mm_permutevar_pd(a: __m128d, b: __m128i) -> __m128d; - fn _mm256_permute_pd(a: __m256d) -> __m256d; - fn _mm_permute_pd(a: __m128d) -> __m128d; - fn _mm256_permute2f128_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_permute2f128_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_permute2f128_si256(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_broadcast_ss(f: &f32) -> __m256; - fn _mm_broadcast_ss(f: &f32) -> __m128; - fn _mm256_broadcast_sd(f: &f64) -> __m256d; - fn _mm256_broadcast_ps(a: &__m128) -> __m256; - fn _mm256_broadcast_pd(a: &__m128d) -> __m256d; - fn _mm256_insertf128_ps(a: __m256, b: __m128) -> __m256; - fn _mm256_insertf128_pd(a: __m256d, b: __m128d) -> __m256d; - fn _mm256_insertf128_si256(a: __m256i, b: __m128i) -> __m256i; - fn _mm256_insert_epi8(a: __m256i, i: i8) -> __m256i; - fn _mm256_insert_epi16(a: __m256i, i: i16) -> __m256i; - fn _mm256_insert_epi32(a: __m256i, i: i32) -> __m256i; - unsafe fn _mm256_load_pd(mem_addr: *const f64) -> __m256d; - unsafe fn _mm256_store_pd(mem_addr: *mut f64, a: __m256d); - unsafe fn _mm256_load_ps(mem_addr: *const f32) -> __m256; - unsafe fn _mm256_store_ps(mem_addr: *mut f32, a: __m256); - unsafe fn _mm256_loadu_pd(mem_addr: *const f64) -> __m256d; - unsafe fn _mm256_storeu_pd(mem_addr: *mut f64, a: __m256d); - unsafe fn _mm256_loadu_ps(mem_addr: *const f32) -> __m256; - unsafe fn _mm256_storeu_ps(mem_addr: *mut f32, a: __m256); - unsafe fn _mm256_load_si256(mem_addr: *const __m256i) -> __m256i; - unsafe fn _mm256_store_si256(mem_addr: *mut __m256i, a: __m256i); - unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i; - unsafe fn _mm256_storeu_si256(mem_addr: *mut __m256i, a: __m256i); - unsafe fn _mm256_maskload_pd(mem_addr: *const f64, mask: __m256i) -> __m256d; - unsafe fn _mm256_maskstore_pd(mem_addr: *mut f64, mask: __m256i, a: __m256d); - unsafe fn _mm_maskload_pd(mem_addr: *const f64, mask: __m128i) -> __m128d; - unsafe fn _mm_maskstore_pd(mem_addr: *mut f64, mask: __m128i, a: __m128d); - unsafe fn _mm256_maskload_ps(mem_addr: *const f32, mask: __m256i) -> __m256; - unsafe fn _mm256_maskstore_ps(mem_addr: *mut f32, mask: __m256i, a: __m256); - unsafe fn _mm_maskload_ps(mem_addr: *const f32, mask: __m128i) -> __m128; - unsafe fn _mm_maskstore_ps(mem_addr: *mut f32, mask: __m128i, a: __m128); - fn _mm256_movehdup_ps(a: __m256) -> __m256; - fn _mm256_moveldup_ps(a: __m256) -> __m256; - fn _mm256_movedup_pd(a: __m256d) -> __m256d; - unsafe fn _mm256_lddqu_si256(mem_addr: *const __m256i) -> __m256i; - unsafe fn _mm256_stream_si256(mem_addr: *mut __m256i, a: __m256i); - unsafe fn _mm256_stream_pd(mem_addr: *mut f64, a: __m256d); - unsafe fn _mm256_stream_ps(mem_addr: *mut f32, a: __m256); - fn _mm256_rcp_ps(a: __m256) -> __m256; - fn _mm256_rsqrt_ps(a: __m256) -> __m256; - fn _mm256_unpackhi_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_unpackhi_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_unpacklo_pd(a: __m256d, b: __m256d) -> __m256d; - fn _mm256_unpacklo_ps(a: __m256, b: __m256) -> __m256; - fn _mm256_testz_si256(a: __m256i, b: __m256i) -> i32; - fn _mm256_testc_si256(a: __m256i, b: __m256i) -> i32; - fn _mm256_testnzc_si256(a: __m256i, b: __m256i) -> i32; - fn _mm256_testz_pd(a: __m256d, b: __m256d) -> i32; - fn _mm256_testc_pd(a: __m256d, b: __m256d) -> i32; - fn _mm256_testnzc_pd(a: __m256d, b: __m256d) -> i32; - fn _mm_testz_pd(a: __m128d, b: __m128d) -> i32; - fn _mm_testc_pd(a: __m128d, b: __m128d) -> i32; - fn _mm_testnzc_pd(a: __m128d, b: __m128d) -> i32; - fn _mm256_testz_ps(a: __m256, b: __m256) -> i32; - fn _mm256_testc_ps(a: __m256, b: __m256) -> i32; - fn _mm256_testnzc_ps(a: __m256, b: __m256) -> i32; - fn _mm_testz_ps(a: __m128, b: __m128) -> i32; - fn _mm_testc_ps(a: __m128, b: __m128) -> i32; - fn _mm_testnzc_ps(a: __m128, b: __m128) -> i32; - fn _mm256_movemask_pd(a: __m256d) -> i32; - fn _mm256_movemask_ps(a: __m256) -> i32; - fn _mm256_setzero_pd() -> __m256d; - fn _mm256_setzero_ps() -> __m256; - fn _mm256_setzero_si256() -> __m256i; - fn _mm256_set_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d; - fn _mm256_set_ps(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32) -> __m256; - fn _mm256_set_epi8( - e00: i8, - e01: i8, - e02: i8, - e03: i8, - e04: i8, - e05: i8, - e06: i8, - e07: i8, - e08: i8, - e09: i8, - e10: i8, - e11: i8, - e12: i8, - e13: i8, - e14: i8, - e15: i8, - e16: i8, - e17: i8, - e18: i8, - e19: i8, - e20: i8, - e21: i8, - e22: i8, - e23: i8, - e24: i8, - e25: i8, - e26: i8, - e27: i8, - e28: i8, - e29: i8, - e30: i8, - e31: i8, - ) -> __m256i; - fn _mm256_set_epi16( - e00: i16, - e01: i16, - e02: i16, - e03: i16, - e04: i16, - e05: i16, - e06: i16, - e07: i16, - e08: i16, - e09: i16, - e10: i16, - e11: i16, - e12: i16, - e13: i16, - e14: i16, - e15: i16, - ) -> __m256i; - fn _mm256_set_epi32( - e0: i32, - e1: i32, - e2: i32, - e3: i32, - e4: i32, - e5: i32, - e6: i32, - e7: i32, - ) -> __m256i; - fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i; - fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d; - fn _mm256_setr_ps(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32) - -> __m256; - fn _mm256_setr_epi8( - e00: i8, - e01: i8, - e02: i8, - e03: i8, - e04: i8, - e05: i8, - e06: i8, - e07: i8, - e08: i8, - e09: i8, - e10: i8, - e11: i8, - e12: i8, - e13: i8, - e14: i8, - e15: i8, - e16: i8, - e17: i8, - e18: i8, - e19: i8, - e20: i8, - e21: i8, - e22: i8, - e23: i8, - e24: i8, - e25: i8, - e26: i8, - e27: i8, - e28: i8, - e29: i8, - e30: i8, - e31: i8, - ) -> __m256i; - fn _mm256_setr_epi16( - e00: i16, - e01: i16, - e02: i16, - e03: i16, - e04: i16, - e05: i16, - e06: i16, - e07: i16, - e08: i16, - e09: i16, - e10: i16, - e11: i16, - e12: i16, - e13: i16, - e14: i16, - e15: i16, - ) -> __m256i; - fn _mm256_setr_epi32( - e0: i32, - e1: i32, - e2: i32, - e3: i32, - e4: i32, - e5: i32, - e6: i32, - e7: i32, - ) -> __m256i; - fn _mm256_setr_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i; - fn _mm256_set1_pd(a: f64) -> __m256d; - fn _mm256_set1_ps(a: f32) -> __m256; - fn _mm256_set1_epi8(a: i8) -> __m256i; - fn _mm256_set1_epi16(a: i16) -> __m256i; - fn _mm256_set1_epi32(a: i32) -> __m256i; - fn _mm256_set1_epi64x(a: i64) -> __m256i; - fn _mm256_castpd_ps(a: __m256d) -> __m256; - fn _mm256_castps_pd(a: __m256) -> __m256d; - fn _mm256_castps_si256(a: __m256) -> __m256i; - fn _mm256_castsi256_ps(a: __m256i) -> __m256; - fn _mm256_castpd_si256(a: __m256d) -> __m256i; - fn _mm256_castsi256_pd(a: __m256i) -> __m256d; - fn _mm256_castps256_ps128(a: __m256) -> __m128; - fn _mm256_castpd256_pd128(a: __m256d) -> __m128d; - fn _mm256_castsi256_si128(a: __m256i) -> __m128i; - fn _mm256_castps128_ps256(a: __m128) -> __m256; - fn _mm256_castpd128_pd256(a: __m128d) -> __m256d; - fn _mm256_castsi128_si256(a: __m128i) -> __m256i; - fn _mm256_zextps128_ps256(a: __m128) -> __m256; - fn _mm256_zextsi128_si256(a: __m128i) -> __m256i; - fn _mm256_zextpd128_pd256(a: __m128d) -> __m256d; - fn _mm256_undefined_ps() -> __m256; - fn _mm256_undefined_pd() -> __m256d; - fn _mm256_undefined_si256() -> __m256i; - fn _mm256_set_m128(hi: __m128, lo: __m128) -> __m256; - fn _mm256_set_m128d(hi: __m128d, lo: __m128d) -> __m256d; - fn _mm256_set_m128i(hi: __m128i, lo: __m128i) -> __m256i; - fn _mm256_setr_m128(lo: __m128, hi: __m128) -> __m256; - fn _mm256_setr_m128d(lo: __m128d, hi: __m128d) -> __m256d; - fn _mm256_setr_m128i(lo: __m128i, hi: __m128i) -> __m256i; - unsafe fn _mm256_loadu2_m128(hiaddr: *const f32, loaddr: *const f32) -> __m256; - unsafe fn _mm256_loadu2_m128d(hiaddr: *const f64, loaddr: *const f64) -> __m256d; - unsafe fn _mm256_loadu2_m128i(hiaddr: *const __m128i, loaddr: *const __m128i) -> __m256i; - unsafe fn _mm256_storeu2_m128(hiaddr: *mut f32, loaddr: *mut f32, a: __m256); - unsafe fn _mm256_storeu2_m128d(hiaddr: *mut f64, loaddr: *mut f64, a: __m256d); - unsafe fn _mm256_storeu2_m128i(hiaddr: *mut __m128i, loaddr: *mut __m128i, a: __m256i); - fn _mm256_cvtss_f32(a: __m256) -> f32; + #[doc = "See [`arch::_mm256_add_pd`]."] + #[inline(always)] + pub fn _mm256_add_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_add_pd(a, b) } + } + #[doc = "See [`arch::_mm256_add_ps`]."] + #[inline(always)] + pub fn _mm256_add_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_add_ps(a, b) } + } + #[doc = "See [`arch::_mm256_and_pd`]."] + #[inline(always)] + pub fn _mm256_and_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_and_pd(a, b) } + } + #[doc = "See [`arch::_mm256_and_ps`]."] + #[inline(always)] + pub fn _mm256_and_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_and_ps(a, b) } + } + #[doc = "See [`arch::_mm256_or_pd`]."] + #[inline(always)] + pub fn _mm256_or_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_or_pd(a, b) } + } + #[doc = "See [`arch::_mm256_or_ps`]."] + #[inline(always)] + pub fn _mm256_or_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_or_ps(a, b) } + } + #[doc = "See [`arch::_mm256_shuffle_pd`]."] + #[inline(always)] + pub fn _mm256_shuffle_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_shuffle_pd::(a, b) } + } + #[doc = "See [`arch::_mm256_shuffle_ps`]."] + #[inline(always)] + pub fn _mm256_shuffle_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_shuffle_ps::(a, b) } + } + #[doc = "See [`arch::_mm256_andnot_pd`]."] + #[inline(always)] + pub fn _mm256_andnot_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_andnot_pd(a, b) } + } + #[doc = "See [`arch::_mm256_andnot_ps`]."] + #[inline(always)] + pub fn _mm256_andnot_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_andnot_ps(a, b) } + } + #[doc = "See [`arch::_mm256_max_pd`]."] + #[inline(always)] + pub fn _mm256_max_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_max_pd(a, b) } + } + #[doc = "See [`arch::_mm256_max_ps`]."] + #[inline(always)] + pub fn _mm256_max_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_max_ps(a, b) } + } + #[doc = "See [`arch::_mm256_min_pd`]."] + #[inline(always)] + pub fn _mm256_min_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_min_pd(a, b) } + } + #[doc = "See [`arch::_mm256_min_ps`]."] + #[inline(always)] + pub fn _mm256_min_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_min_ps(a, b) } + } + #[doc = "See [`arch::_mm256_mul_pd`]."] + #[inline(always)] + pub fn _mm256_mul_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_mul_pd(a, b) } + } + #[doc = "See [`arch::_mm256_mul_ps`]."] + #[inline(always)] + pub fn _mm256_mul_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_mul_ps(a, b) } + } + #[doc = "See [`arch::_mm256_addsub_pd`]."] + #[inline(always)] + pub fn _mm256_addsub_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_addsub_pd(a, b) } + } + #[doc = "See [`arch::_mm256_addsub_ps`]."] + #[inline(always)] + pub fn _mm256_addsub_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_addsub_ps(a, b) } + } + #[doc = "See [`arch::_mm256_sub_pd`]."] + #[inline(always)] + pub fn _mm256_sub_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_sub_pd(a, b) } + } + #[doc = "See [`arch::_mm256_sub_ps`]."] + #[inline(always)] + pub fn _mm256_sub_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_sub_ps(a, b) } + } + #[doc = "See [`arch::_mm256_div_ps`]."] + #[inline(always)] + pub fn _mm256_div_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_div_ps(a, b) } + } + #[doc = "See [`arch::_mm256_div_pd`]."] + #[inline(always)] + pub fn _mm256_div_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_div_pd(a, b) } + } + #[doc = "See [`arch::_mm256_round_pd`]."] + #[inline(always)] + pub fn _mm256_round_pd(self, a: __m256d) -> __m256d { + unsafe { _mm256_round_pd::(a) } + } + #[doc = "See [`arch::_mm256_ceil_pd`]."] + #[inline(always)] + pub fn _mm256_ceil_pd(self, a: __m256d) -> __m256d { + unsafe { _mm256_ceil_pd(a) } + } + #[doc = "See [`arch::_mm256_floor_pd`]."] + #[inline(always)] + pub fn _mm256_floor_pd(self, a: __m256d) -> __m256d { + unsafe { _mm256_floor_pd(a) } + } + #[doc = "See [`arch::_mm256_round_ps`]."] + #[inline(always)] + pub fn _mm256_round_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_round_ps::(a) } + } + #[doc = "See [`arch::_mm256_ceil_ps`]."] + #[inline(always)] + pub fn _mm256_ceil_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_ceil_ps(a) } + } + #[doc = "See [`arch::_mm256_floor_ps`]."] + #[inline(always)] + pub fn _mm256_floor_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_floor_ps(a) } + } + #[doc = "See [`arch::_mm256_sqrt_ps`]."] + #[inline(always)] + pub fn _mm256_sqrt_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_sqrt_ps(a) } + } + #[doc = "See [`arch::_mm256_sqrt_pd`]."] + #[inline(always)] + pub fn _mm256_sqrt_pd(self, a: __m256d) -> __m256d { + unsafe { _mm256_sqrt_pd(a) } + } + #[doc = "See [`arch::_mm256_blend_pd`]."] + #[inline(always)] + pub fn _mm256_blend_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_blend_pd::(a, b) } + } + #[doc = "See [`arch::_mm256_blend_ps`]."] + #[inline(always)] + pub fn _mm256_blend_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_blend_ps::(a, b) } + } + #[doc = "See [`arch::_mm256_blendv_pd`]."] + #[inline(always)] + pub fn _mm256_blendv_pd(self, a: __m256d, b: __m256d, c: __m256d) -> __m256d { + unsafe { _mm256_blendv_pd(a, b, c) } + } + #[doc = "See [`arch::_mm256_blendv_ps`]."] + #[inline(always)] + pub fn _mm256_blendv_ps(self, a: __m256, b: __m256, c: __m256) -> __m256 { + unsafe { _mm256_blendv_ps(a, b, c) } + } + #[doc = "See [`arch::_mm256_dp_ps`]."] + #[inline(always)] + pub fn _mm256_dp_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_dp_ps::(a, b) } + } + #[doc = "See [`arch::_mm256_hadd_pd`]."] + #[inline(always)] + pub fn _mm256_hadd_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_hadd_pd(a, b) } + } + #[doc = "See [`arch::_mm256_hadd_ps`]."] + #[inline(always)] + pub fn _mm256_hadd_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_hadd_ps(a, b) } + } + #[doc = "See [`arch::_mm256_hsub_pd`]."] + #[inline(always)] + pub fn _mm256_hsub_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_hsub_pd(a, b) } + } + #[doc = "See [`arch::_mm256_hsub_ps`]."] + #[inline(always)] + pub fn _mm256_hsub_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_hsub_ps(a, b) } + } + #[doc = "See [`arch::_mm256_xor_pd`]."] + #[inline(always)] + pub fn _mm256_xor_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_xor_pd(a, b) } + } + #[doc = "See [`arch::_mm256_xor_ps`]."] + #[inline(always)] + pub fn _mm256_xor_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_xor_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmp_pd`]."] + #[inline(always)] + pub fn _mm_cmp_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmp_pd::(a, b) } + } + #[doc = "See [`arch::_mm256_cmp_pd`]."] + #[inline(always)] + pub fn _mm256_cmp_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_cmp_pd::(a, b) } + } + #[doc = "See [`arch::_mm_cmp_ps`]."] + #[inline(always)] + pub fn _mm_cmp_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmp_ps::(a, b) } + } + #[doc = "See [`arch::_mm256_cmp_ps`]."] + #[inline(always)] + pub fn _mm256_cmp_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_cmp_ps::(a, b) } + } + #[doc = "See [`arch::_mm_cmp_sd`]."] + #[inline(always)] + pub fn _mm_cmp_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmp_sd::(a, b) } + } + #[doc = "See [`arch::_mm_cmp_ss`]."] + #[inline(always)] + pub fn _mm_cmp_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmp_ss::(a, b) } + } + #[doc = "See [`arch::_mm256_cvtepi32_pd`]."] + #[inline(always)] + pub fn _mm256_cvtepi32_pd(self, a: __m128i) -> __m256d { + unsafe { _mm256_cvtepi32_pd(a) } + } + #[doc = "See [`arch::_mm256_cvtepi32_ps`]."] + #[inline(always)] + pub fn _mm256_cvtepi32_ps(self, a: __m256i) -> __m256 { + unsafe { _mm256_cvtepi32_ps(a) } + } + #[doc = "See [`arch::_mm256_cvtpd_ps`]."] + #[inline(always)] + pub fn _mm256_cvtpd_ps(self, a: __m256d) -> __m128 { + unsafe { _mm256_cvtpd_ps(a) } + } + #[doc = "See [`arch::_mm256_cvtps_epi32`]."] + #[inline(always)] + pub fn _mm256_cvtps_epi32(self, a: __m256) -> __m256i { + unsafe { _mm256_cvtps_epi32(a) } + } + #[doc = "See [`arch::_mm256_cvtps_pd`]."] + #[inline(always)] + pub fn _mm256_cvtps_pd(self, a: __m128) -> __m256d { + unsafe { _mm256_cvtps_pd(a) } + } + #[doc = "See [`arch::_mm256_cvtsd_f64`]."] + #[inline(always)] + pub fn _mm256_cvtsd_f64(self, a: __m256d) -> f64 { + unsafe { _mm256_cvtsd_f64(a) } + } + #[doc = "See [`arch::_mm256_cvttpd_epi32`]."] + #[inline(always)] + pub fn _mm256_cvttpd_epi32(self, a: __m256d) -> __m128i { + unsafe { _mm256_cvttpd_epi32(a) } + } + #[doc = "See [`arch::_mm256_cvtpd_epi32`]."] + #[inline(always)] + pub fn _mm256_cvtpd_epi32(self, a: __m256d) -> __m128i { + unsafe { _mm256_cvtpd_epi32(a) } + } + #[doc = "See [`arch::_mm256_cvttps_epi32`]."] + #[inline(always)] + pub fn _mm256_cvttps_epi32(self, a: __m256) -> __m256i { + unsafe { _mm256_cvttps_epi32(a) } + } + #[doc = "See [`arch::_mm256_extractf128_ps`]."] + #[inline(always)] + pub fn _mm256_extractf128_ps(self, a: __m256) -> __m128 { + unsafe { _mm256_extractf128_ps::(a) } + } + #[doc = "See [`arch::_mm256_extractf128_pd`]."] + #[inline(always)] + pub fn _mm256_extractf128_pd(self, a: __m256d) -> __m128d { + unsafe { _mm256_extractf128_pd::(a) } + } + #[doc = "See [`arch::_mm256_extractf128_si256`]."] + #[inline(always)] + pub fn _mm256_extractf128_si256(self, a: __m256i) -> __m128i { + unsafe { _mm256_extractf128_si256::(a) } + } + #[doc = "See [`arch::_mm256_extract_epi32`]."] + #[inline(always)] + pub fn _mm256_extract_epi32(self, a: __m256i) -> i32 { + unsafe { _mm256_extract_epi32::(a) } + } + #[doc = "See [`arch::_mm256_cvtsi256_si32`]."] + #[inline(always)] + pub fn _mm256_cvtsi256_si32(self, a: __m256i) -> i32 { + unsafe { _mm256_cvtsi256_si32(a) } + } + #[doc = "See [`arch::_mm256_zeroall`]."] + #[inline(always)] + pub fn _mm256_zeroall(self) { + unsafe { _mm256_zeroall() } + } + #[doc = "See [`arch::_mm256_zeroupper`]."] + #[inline(always)] + pub fn _mm256_zeroupper(self) { + unsafe { _mm256_zeroupper() } + } + #[doc = "See [`arch::_mm256_permutevar_ps`]."] + #[inline(always)] + pub fn _mm256_permutevar_ps(self, a: __m256, b: __m256i) -> __m256 { + unsafe { _mm256_permutevar_ps(a, b) } + } + #[doc = "See [`arch::_mm_permutevar_ps`]."] + #[inline(always)] + pub fn _mm_permutevar_ps(self, a: __m128, b: __m128i) -> __m128 { + unsafe { _mm_permutevar_ps(a, b) } + } + #[doc = "See [`arch::_mm256_permute_ps`]."] + #[inline(always)] + pub fn _mm256_permute_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_permute_ps::(a) } + } + #[doc = "See [`arch::_mm_permute_ps`]."] + #[inline(always)] + pub fn _mm_permute_ps(self, a: __m128) -> __m128 { + unsafe { _mm_permute_ps::(a) } + } + #[doc = "See [`arch::_mm256_permutevar_pd`]."] + #[inline(always)] + pub fn _mm256_permutevar_pd(self, a: __m256d, b: __m256i) -> __m256d { + unsafe { _mm256_permutevar_pd(a, b) } + } + #[doc = "See [`arch::_mm_permutevar_pd`]."] + #[inline(always)] + pub fn _mm_permutevar_pd(self, a: __m128d, b: __m128i) -> __m128d { + unsafe { _mm_permutevar_pd(a, b) } + } + #[doc = "See [`arch::_mm256_permute_pd`]."] + #[inline(always)] + pub fn _mm256_permute_pd(self, a: __m256d) -> __m256d { + unsafe { _mm256_permute_pd::(a) } + } + #[doc = "See [`arch::_mm_permute_pd`]."] + #[inline(always)] + pub fn _mm_permute_pd(self, a: __m128d) -> __m128d { + unsafe { _mm_permute_pd::(a) } + } + #[doc = "See [`arch::_mm256_permute2f128_ps`]."] + #[inline(always)] + pub fn _mm256_permute2f128_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_permute2f128_ps::(a, b) } + } + #[doc = "See [`arch::_mm256_permute2f128_pd`]."] + #[inline(always)] + pub fn _mm256_permute2f128_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_permute2f128_pd::(a, b) } + } + #[doc = "See [`arch::_mm256_permute2f128_si256`]."] + #[inline(always)] + pub fn _mm256_permute2f128_si256(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_permute2f128_si256::(a, b) } + } + #[doc = "See [`arch::_mm256_broadcast_ss`]."] + #[allow(clippy::trivially_copy_pass_by_ref)] + #[inline(always)] + pub fn _mm256_broadcast_ss(self, f: &f32) -> __m256 { + unsafe { _mm256_broadcast_ss(f) } + } + #[doc = "See [`arch::_mm_broadcast_ss`]."] + #[allow(clippy::trivially_copy_pass_by_ref)] + #[inline(always)] + pub fn _mm_broadcast_ss(self, f: &f32) -> __m128 { + unsafe { _mm_broadcast_ss(f) } + } + #[doc = "See [`arch::_mm256_broadcast_sd`]."] + #[allow(clippy::trivially_copy_pass_by_ref)] + #[inline(always)] + pub fn _mm256_broadcast_sd(self, f: &f64) -> __m256d { + unsafe { _mm256_broadcast_sd(f) } + } + #[doc = "See [`arch::_mm256_broadcast_ps`]."] + #[inline(always)] + pub fn _mm256_broadcast_ps(self, a: &__m128) -> __m256 { + unsafe { _mm256_broadcast_ps(a) } + } + #[doc = "See [`arch::_mm256_broadcast_pd`]."] + #[inline(always)] + pub fn _mm256_broadcast_pd(self, a: &__m128d) -> __m256d { + unsafe { _mm256_broadcast_pd(a) } + } + #[doc = "See [`arch::_mm256_insertf128_ps`]."] + #[inline(always)] + pub fn _mm256_insertf128_ps(self, a: __m256, b: __m128) -> __m256 { + unsafe { _mm256_insertf128_ps::(a, b) } + } + #[doc = "See [`arch::_mm256_insertf128_pd`]."] + #[inline(always)] + pub fn _mm256_insertf128_pd(self, a: __m256d, b: __m128d) -> __m256d { + unsafe { _mm256_insertf128_pd::(a, b) } + } + #[doc = "See [`arch::_mm256_insertf128_si256`]."] + #[inline(always)] + pub fn _mm256_insertf128_si256(self, a: __m256i, b: __m128i) -> __m256i { + unsafe { _mm256_insertf128_si256::(a, b) } + } + #[doc = "See [`arch::_mm256_insert_epi8`]."] + #[inline(always)] + pub fn _mm256_insert_epi8(self, a: __m256i, i: i8) -> __m256i { + unsafe { _mm256_insert_epi8::(a, i) } + } + #[doc = "See [`arch::_mm256_insert_epi16`]."] + #[inline(always)] + pub fn _mm256_insert_epi16(self, a: __m256i, i: i16) -> __m256i { + unsafe { _mm256_insert_epi16::(a, i) } + } + #[doc = "See [`arch::_mm256_insert_epi32`]."] + #[inline(always)] + pub fn _mm256_insert_epi32(self, a: __m256i, i: i32) -> __m256i { + unsafe { _mm256_insert_epi32::(a, i) } + } + #[doc = "See [`arch::_mm256_load_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm256_load_pd(self, mem_addr: *const f64) -> __m256d { + unsafe { _mm256_load_pd(mem_addr) } + } + #[doc = "See [`arch::_mm256_store_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm256_store_pd(self, mem_addr: *mut f64, a: __m256d) { + unsafe { _mm256_store_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_load_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm256_load_ps(self, mem_addr: *const f32) -> __m256 { + unsafe { _mm256_load_ps(mem_addr) } + } + #[doc = "See [`arch::_mm256_store_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm256_store_ps(self, mem_addr: *mut f32, a: __m256) { + unsafe { _mm256_store_ps(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_loadu_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_loadu_pd(self, mem_addr: *const f64) -> __m256d { + unsafe { _mm256_loadu_pd(mem_addr) } + } + #[doc = "See [`arch::_mm256_storeu_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_storeu_pd(self, mem_addr: *mut f64, a: __m256d) { + unsafe { _mm256_storeu_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_loadu_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_loadu_ps(self, mem_addr: *const f32) -> __m256 { + unsafe { _mm256_loadu_ps(mem_addr) } + } + #[doc = "See [`arch::_mm256_storeu_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_storeu_ps(self, mem_addr: *mut f32, a: __m256) { + unsafe { _mm256_storeu_ps(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_load_si256`]."] + #[inline(always)] + pub unsafe fn _mm256_load_si256(self, mem_addr: *const __m256i) -> __m256i { + unsafe { _mm256_load_si256(mem_addr) } + } + #[doc = "See [`arch::_mm256_store_si256`]."] + #[inline(always)] + pub unsafe fn _mm256_store_si256(self, mem_addr: *mut __m256i, a: __m256i) { + unsafe { _mm256_store_si256(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_loadu_si256`]."] + #[inline(always)] + pub unsafe fn _mm256_loadu_si256(self, mem_addr: *const __m256i) -> __m256i { + unsafe { _mm256_loadu_si256(mem_addr) } + } + #[doc = "See [`arch::_mm256_storeu_si256`]."] + #[inline(always)] + pub unsafe fn _mm256_storeu_si256(self, mem_addr: *mut __m256i, a: __m256i) { + unsafe { _mm256_storeu_si256(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_maskload_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_maskload_pd(self, mem_addr: *const f64, mask: __m256i) -> __m256d { + unsafe { _mm256_maskload_pd(mem_addr, mask) } + } + #[doc = "See [`arch::_mm256_maskstore_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_maskstore_pd(self, mem_addr: *mut f64, mask: __m256i, a: __m256d) { + unsafe { _mm256_maskstore_pd(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm_maskload_pd`]."] + #[inline(always)] + pub unsafe fn _mm_maskload_pd(self, mem_addr: *const f64, mask: __m128i) -> __m128d { + unsafe { _mm_maskload_pd(mem_addr, mask) } + } + #[doc = "See [`arch::_mm_maskstore_pd`]."] + #[inline(always)] + pub unsafe fn _mm_maskstore_pd(self, mem_addr: *mut f64, mask: __m128i, a: __m128d) { + unsafe { _mm_maskstore_pd(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm256_maskload_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_maskload_ps(self, mem_addr: *const f32, mask: __m256i) -> __m256 { + unsafe { _mm256_maskload_ps(mem_addr, mask) } + } + #[doc = "See [`arch::_mm256_maskstore_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_maskstore_ps(self, mem_addr: *mut f32, mask: __m256i, a: __m256) { + unsafe { _mm256_maskstore_ps(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm_maskload_ps`]."] + #[inline(always)] + pub unsafe fn _mm_maskload_ps(self, mem_addr: *const f32, mask: __m128i) -> __m128 { + unsafe { _mm_maskload_ps(mem_addr, mask) } + } + #[doc = "See [`arch::_mm_maskstore_ps`]."] + #[inline(always)] + pub unsafe fn _mm_maskstore_ps(self, mem_addr: *mut f32, mask: __m128i, a: __m128) { + unsafe { _mm_maskstore_ps(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm256_movehdup_ps`]."] + #[inline(always)] + pub fn _mm256_movehdup_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_movehdup_ps(a) } + } + #[doc = "See [`arch::_mm256_moveldup_ps`]."] + #[inline(always)] + pub fn _mm256_moveldup_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_moveldup_ps(a) } + } + #[doc = "See [`arch::_mm256_movedup_pd`]."] + #[inline(always)] + pub fn _mm256_movedup_pd(self, a: __m256d) -> __m256d { + unsafe { _mm256_movedup_pd(a) } + } + #[doc = "See [`arch::_mm256_lddqu_si256`]."] + #[inline(always)] + pub unsafe fn _mm256_lddqu_si256(self, mem_addr: *const __m256i) -> __m256i { + unsafe { _mm256_lddqu_si256(mem_addr) } + } + #[doc = "See [`arch::_mm256_stream_si256`]."] + #[inline(always)] + pub unsafe fn _mm256_stream_si256(self, mem_addr: *mut __m256i, a: __m256i) { + unsafe { _mm256_stream_si256(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_stream_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm256_stream_pd(self, mem_addr: *mut f64, a: __m256d) { + unsafe { _mm256_stream_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_stream_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm256_stream_ps(self, mem_addr: *mut f32, a: __m256) { + unsafe { _mm256_stream_ps(mem_addr, a) } + } + #[doc = "See [`arch::_mm256_rcp_ps`]."] + #[inline(always)] + pub fn _mm256_rcp_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_rcp_ps(a) } + } + #[doc = "See [`arch::_mm256_rsqrt_ps`]."] + #[inline(always)] + pub fn _mm256_rsqrt_ps(self, a: __m256) -> __m256 { + unsafe { _mm256_rsqrt_ps(a) } + } + #[doc = "See [`arch::_mm256_unpackhi_pd`]."] + #[inline(always)] + pub fn _mm256_unpackhi_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_unpackhi_pd(a, b) } + } + #[doc = "See [`arch::_mm256_unpackhi_ps`]."] + #[inline(always)] + pub fn _mm256_unpackhi_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_unpackhi_ps(a, b) } + } + #[doc = "See [`arch::_mm256_unpacklo_pd`]."] + #[inline(always)] + pub fn _mm256_unpacklo_pd(self, a: __m256d, b: __m256d) -> __m256d { + unsafe { _mm256_unpacklo_pd(a, b) } + } + #[doc = "See [`arch::_mm256_unpacklo_ps`]."] + #[inline(always)] + pub fn _mm256_unpacklo_ps(self, a: __m256, b: __m256) -> __m256 { + unsafe { _mm256_unpacklo_ps(a, b) } + } + #[doc = "See [`arch::_mm256_testz_si256`]."] + #[inline(always)] + pub fn _mm256_testz_si256(self, a: __m256i, b: __m256i) -> i32 { + unsafe { _mm256_testz_si256(a, b) } + } + #[doc = "See [`arch::_mm256_testc_si256`]."] + #[inline(always)] + pub fn _mm256_testc_si256(self, a: __m256i, b: __m256i) -> i32 { + unsafe { _mm256_testc_si256(a, b) } + } + #[doc = "See [`arch::_mm256_testnzc_si256`]."] + #[inline(always)] + pub fn _mm256_testnzc_si256(self, a: __m256i, b: __m256i) -> i32 { + unsafe { _mm256_testnzc_si256(a, b) } + } + #[doc = "See [`arch::_mm256_testz_pd`]."] + #[inline(always)] + pub fn _mm256_testz_pd(self, a: __m256d, b: __m256d) -> i32 { + unsafe { _mm256_testz_pd(a, b) } + } + #[doc = "See [`arch::_mm256_testc_pd`]."] + #[inline(always)] + pub fn _mm256_testc_pd(self, a: __m256d, b: __m256d) -> i32 { + unsafe { _mm256_testc_pd(a, b) } + } + #[doc = "See [`arch::_mm256_testnzc_pd`]."] + #[inline(always)] + pub fn _mm256_testnzc_pd(self, a: __m256d, b: __m256d) -> i32 { + unsafe { _mm256_testnzc_pd(a, b) } + } + #[doc = "See [`arch::_mm_testz_pd`]."] + #[inline(always)] + pub fn _mm_testz_pd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_testz_pd(a, b) } + } + #[doc = "See [`arch::_mm_testc_pd`]."] + #[inline(always)] + pub fn _mm_testc_pd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_testc_pd(a, b) } + } + #[doc = "See [`arch::_mm_testnzc_pd`]."] + #[inline(always)] + pub fn _mm_testnzc_pd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_testnzc_pd(a, b) } + } + #[doc = "See [`arch::_mm256_testz_ps`]."] + #[inline(always)] + pub fn _mm256_testz_ps(self, a: __m256, b: __m256) -> i32 { + unsafe { _mm256_testz_ps(a, b) } + } + #[doc = "See [`arch::_mm256_testc_ps`]."] + #[inline(always)] + pub fn _mm256_testc_ps(self, a: __m256, b: __m256) -> i32 { + unsafe { _mm256_testc_ps(a, b) } + } + #[doc = "See [`arch::_mm256_testnzc_ps`]."] + #[inline(always)] + pub fn _mm256_testnzc_ps(self, a: __m256, b: __m256) -> i32 { + unsafe { _mm256_testnzc_ps(a, b) } + } + #[doc = "See [`arch::_mm_testz_ps`]."] + #[inline(always)] + pub fn _mm_testz_ps(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_testz_ps(a, b) } + } + #[doc = "See [`arch::_mm_testc_ps`]."] + #[inline(always)] + pub fn _mm_testc_ps(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_testc_ps(a, b) } + } + #[doc = "See [`arch::_mm_testnzc_ps`]."] + #[inline(always)] + pub fn _mm_testnzc_ps(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_testnzc_ps(a, b) } + } + #[doc = "See [`arch::_mm256_movemask_pd`]."] + #[inline(always)] + pub fn _mm256_movemask_pd(self, a: __m256d) -> i32 { + unsafe { _mm256_movemask_pd(a) } + } + #[doc = "See [`arch::_mm256_movemask_ps`]."] + #[inline(always)] + pub fn _mm256_movemask_ps(self, a: __m256) -> i32 { + unsafe { _mm256_movemask_ps(a) } + } + #[doc = "See [`arch::_mm256_setzero_pd`]."] + #[inline(always)] + pub fn _mm256_setzero_pd(self) -> __m256d { + unsafe { _mm256_setzero_pd() } + } + #[doc = "See [`arch::_mm256_setzero_ps`]."] + #[inline(always)] + pub fn _mm256_setzero_ps(self) -> __m256 { + unsafe { _mm256_setzero_ps() } + } + #[doc = "See [`arch::_mm256_setzero_si256`]."] + #[inline(always)] + pub fn _mm256_setzero_si256(self) -> __m256i { + unsafe { _mm256_setzero_si256() } + } + #[doc = "See [`arch::_mm256_set_pd`]."] + #[inline(always)] + pub fn _mm256_set_pd(self, a: f64, b: f64, c: f64, d: f64) -> __m256d { + unsafe { _mm256_set_pd(a, b, c, d) } + } + #[doc = "See [`arch::_mm256_set_ps`]."] + #[inline(always)] + pub fn _mm256_set_ps( + self, + a: f32, + b: f32, + c: f32, + d: f32, + e: f32, + f: f32, + g: f32, + h: f32, + ) -> __m256 { + unsafe { _mm256_set_ps(a, b, c, d, e, f, g, h) } + } + #[doc = "See [`arch::_mm256_set_epi8`]."] + #[inline(always)] + pub fn _mm256_set_epi8( + self, + e00: i8, + e01: i8, + e02: i8, + e03: i8, + e04: i8, + e05: i8, + e06: i8, + e07: i8, + e08: i8, + e09: i8, + e10: i8, + e11: i8, + e12: i8, + e13: i8, + e14: i8, + e15: i8, + e16: i8, + e17: i8, + e18: i8, + e19: i8, + e20: i8, + e21: i8, + e22: i8, + e23: i8, + e24: i8, + e25: i8, + e26: i8, + e27: i8, + e28: i8, + e29: i8, + e30: i8, + e31: i8, + ) -> __m256i { + unsafe { + _mm256_set_epi8( + e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, + e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, + ) + } + } + #[doc = "See [`arch::_mm256_set_epi16`]."] + #[inline(always)] + pub fn _mm256_set_epi16( + self, + e00: i16, + e01: i16, + e02: i16, + e03: i16, + e04: i16, + e05: i16, + e06: i16, + e07: i16, + e08: i16, + e09: i16, + e10: i16, + e11: i16, + e12: i16, + e13: i16, + e14: i16, + e15: i16, + ) -> __m256i { + unsafe { + _mm256_set_epi16( + e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, + ) + } + } + #[doc = "See [`arch::_mm256_set_epi32`]."] + #[inline(always)] + pub fn _mm256_set_epi32( + self, + e0: i32, + e1: i32, + e2: i32, + e3: i32, + e4: i32, + e5: i32, + e6: i32, + e7: i32, + ) -> __m256i { + unsafe { _mm256_set_epi32(e0, e1, e2, e3, e4, e5, e6, e7) } + } + #[doc = "See [`arch::_mm256_set_epi64x`]."] + #[inline(always)] + pub fn _mm256_set_epi64x(self, a: i64, b: i64, c: i64, d: i64) -> __m256i { + unsafe { _mm256_set_epi64x(a, b, c, d) } + } + #[doc = "See [`arch::_mm256_setr_pd`]."] + #[inline(always)] + pub fn _mm256_setr_pd(self, a: f64, b: f64, c: f64, d: f64) -> __m256d { + unsafe { _mm256_setr_pd(a, b, c, d) } + } + #[doc = "See [`arch::_mm256_setr_ps`]."] + #[inline(always)] + pub fn _mm256_setr_ps( + self, + a: f32, + b: f32, + c: f32, + d: f32, + e: f32, + f: f32, + g: f32, + h: f32, + ) -> __m256 { + unsafe { _mm256_setr_ps(a, b, c, d, e, f, g, h) } + } + #[doc = "See [`arch::_mm256_setr_epi8`]."] + #[inline(always)] + pub fn _mm256_setr_epi8( + self, + e00: i8, + e01: i8, + e02: i8, + e03: i8, + e04: i8, + e05: i8, + e06: i8, + e07: i8, + e08: i8, + e09: i8, + e10: i8, + e11: i8, + e12: i8, + e13: i8, + e14: i8, + e15: i8, + e16: i8, + e17: i8, + e18: i8, + e19: i8, + e20: i8, + e21: i8, + e22: i8, + e23: i8, + e24: i8, + e25: i8, + e26: i8, + e27: i8, + e28: i8, + e29: i8, + e30: i8, + e31: i8, + ) -> __m256i { + unsafe { + _mm256_setr_epi8( + e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, + e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, + ) + } + } + #[doc = "See [`arch::_mm256_setr_epi16`]."] + #[inline(always)] + pub fn _mm256_setr_epi16( + self, + e00: i16, + e01: i16, + e02: i16, + e03: i16, + e04: i16, + e05: i16, + e06: i16, + e07: i16, + e08: i16, + e09: i16, + e10: i16, + e11: i16, + e12: i16, + e13: i16, + e14: i16, + e15: i16, + ) -> __m256i { + unsafe { + _mm256_setr_epi16( + e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, + ) + } + } + #[doc = "See [`arch::_mm256_setr_epi32`]."] + #[inline(always)] + pub fn _mm256_setr_epi32( + self, + e0: i32, + e1: i32, + e2: i32, + e3: i32, + e4: i32, + e5: i32, + e6: i32, + e7: i32, + ) -> __m256i { + unsafe { _mm256_setr_epi32(e0, e1, e2, e3, e4, e5, e6, e7) } + } + #[doc = "See [`arch::_mm256_setr_epi64x`]."] + #[inline(always)] + pub fn _mm256_setr_epi64x(self, a: i64, b: i64, c: i64, d: i64) -> __m256i { + unsafe { _mm256_setr_epi64x(a, b, c, d) } + } + #[doc = "See [`arch::_mm256_set1_pd`]."] + #[inline(always)] + pub fn _mm256_set1_pd(self, a: f64) -> __m256d { + unsafe { _mm256_set1_pd(a) } + } + #[doc = "See [`arch::_mm256_set1_ps`]."] + #[inline(always)] + pub fn _mm256_set1_ps(self, a: f32) -> __m256 { + unsafe { _mm256_set1_ps(a) } + } + #[doc = "See [`arch::_mm256_set1_epi8`]."] + #[inline(always)] + pub fn _mm256_set1_epi8(self, a: i8) -> __m256i { + unsafe { _mm256_set1_epi8(a) } + } + #[doc = "See [`arch::_mm256_set1_epi16`]."] + #[inline(always)] + pub fn _mm256_set1_epi16(self, a: i16) -> __m256i { + unsafe { _mm256_set1_epi16(a) } + } + #[doc = "See [`arch::_mm256_set1_epi32`]."] + #[inline(always)] + pub fn _mm256_set1_epi32(self, a: i32) -> __m256i { + unsafe { _mm256_set1_epi32(a) } + } + #[doc = "See [`arch::_mm256_set1_epi64x`]."] + #[inline(always)] + pub fn _mm256_set1_epi64x(self, a: i64) -> __m256i { + unsafe { _mm256_set1_epi64x(a) } + } + #[doc = "See [`arch::_mm256_castpd_ps`]."] + #[inline(always)] + pub fn _mm256_castpd_ps(self, a: __m256d) -> __m256 { + unsafe { _mm256_castpd_ps(a) } + } + #[doc = "See [`arch::_mm256_castps_pd`]."] + #[inline(always)] + pub fn _mm256_castps_pd(self, a: __m256) -> __m256d { + unsafe { _mm256_castps_pd(a) } + } + #[doc = "See [`arch::_mm256_castps_si256`]."] + #[inline(always)] + pub fn _mm256_castps_si256(self, a: __m256) -> __m256i { + unsafe { _mm256_castps_si256(a) } + } + #[doc = "See [`arch::_mm256_castsi256_ps`]."] + #[inline(always)] + pub fn _mm256_castsi256_ps(self, a: __m256i) -> __m256 { + unsafe { _mm256_castsi256_ps(a) } + } + #[doc = "See [`arch::_mm256_castpd_si256`]."] + #[inline(always)] + pub fn _mm256_castpd_si256(self, a: __m256d) -> __m256i { + unsafe { _mm256_castpd_si256(a) } + } + #[doc = "See [`arch::_mm256_castsi256_pd`]."] + #[inline(always)] + pub fn _mm256_castsi256_pd(self, a: __m256i) -> __m256d { + unsafe { _mm256_castsi256_pd(a) } + } + #[doc = "See [`arch::_mm256_castps256_ps128`]."] + #[inline(always)] + pub fn _mm256_castps256_ps128(self, a: __m256) -> __m128 { + unsafe { _mm256_castps256_ps128(a) } + } + #[doc = "See [`arch::_mm256_castpd256_pd128`]."] + #[inline(always)] + pub fn _mm256_castpd256_pd128(self, a: __m256d) -> __m128d { + unsafe { _mm256_castpd256_pd128(a) } + } + #[doc = "See [`arch::_mm256_castsi256_si128`]."] + #[inline(always)] + pub fn _mm256_castsi256_si128(self, a: __m256i) -> __m128i { + unsafe { _mm256_castsi256_si128(a) } + } + #[doc = "See [`arch::_mm256_castps128_ps256`]."] + #[inline(always)] + pub fn _mm256_castps128_ps256(self, a: __m128) -> __m256 { + unsafe { _mm256_castps128_ps256(a) } + } + #[doc = "See [`arch::_mm256_castpd128_pd256`]."] + #[inline(always)] + pub fn _mm256_castpd128_pd256(self, a: __m128d) -> __m256d { + unsafe { _mm256_castpd128_pd256(a) } + } + #[doc = "See [`arch::_mm256_castsi128_si256`]."] + #[inline(always)] + pub fn _mm256_castsi128_si256(self, a: __m128i) -> __m256i { + unsafe { _mm256_castsi128_si256(a) } + } + #[doc = "See [`arch::_mm256_zextps128_ps256`]."] + #[inline(always)] + pub fn _mm256_zextps128_ps256(self, a: __m128) -> __m256 { + unsafe { _mm256_zextps128_ps256(a) } + } + #[doc = "See [`arch::_mm256_zextsi128_si256`]."] + #[inline(always)] + pub fn _mm256_zextsi128_si256(self, a: __m128i) -> __m256i { + unsafe { _mm256_zextsi128_si256(a) } + } + #[doc = "See [`arch::_mm256_zextpd128_pd256`]."] + #[inline(always)] + pub fn _mm256_zextpd128_pd256(self, a: __m128d) -> __m256d { + unsafe { _mm256_zextpd128_pd256(a) } + } + #[doc = "See [`arch::_mm256_undefined_ps`]."] + #[inline(always)] + pub fn _mm256_undefined_ps(self) -> __m256 { + unsafe { _mm256_undefined_ps() } + } + #[doc = "See [`arch::_mm256_undefined_pd`]."] + #[inline(always)] + pub fn _mm256_undefined_pd(self) -> __m256d { + unsafe { _mm256_undefined_pd() } + } + #[doc = "See [`arch::_mm256_undefined_si256`]."] + #[inline(always)] + pub fn _mm256_undefined_si256(self) -> __m256i { + unsafe { _mm256_undefined_si256() } + } + #[doc = "See [`arch::_mm256_set_m128`]."] + #[inline(always)] + pub fn _mm256_set_m128(self, hi: __m128, lo: __m128) -> __m256 { + unsafe { _mm256_set_m128(hi, lo) } + } + #[doc = "See [`arch::_mm256_set_m128d`]."] + #[inline(always)] + pub fn _mm256_set_m128d(self, hi: __m128d, lo: __m128d) -> __m256d { + unsafe { _mm256_set_m128d(hi, lo) } + } + #[doc = "See [`arch::_mm256_set_m128i`]."] + #[inline(always)] + pub fn _mm256_set_m128i(self, hi: __m128i, lo: __m128i) -> __m256i { + unsafe { _mm256_set_m128i(hi, lo) } + } + #[doc = "See [`arch::_mm256_setr_m128`]."] + #[inline(always)] + pub fn _mm256_setr_m128(self, lo: __m128, hi: __m128) -> __m256 { + unsafe { _mm256_setr_m128(lo, hi) } + } + #[doc = "See [`arch::_mm256_setr_m128d`]."] + #[inline(always)] + pub fn _mm256_setr_m128d(self, lo: __m128d, hi: __m128d) -> __m256d { + unsafe { _mm256_setr_m128d(lo, hi) } + } + #[doc = "See [`arch::_mm256_setr_m128i`]."] + #[inline(always)] + pub fn _mm256_setr_m128i(self, lo: __m128i, hi: __m128i) -> __m256i { + unsafe { _mm256_setr_m128i(lo, hi) } + } + #[doc = "See [`arch::_mm256_loadu2_m128`]."] + #[inline(always)] + pub unsafe fn _mm256_loadu2_m128(self, hiaddr: *const f32, loaddr: *const f32) -> __m256 { + unsafe { _mm256_loadu2_m128(hiaddr, loaddr) } + } + #[doc = "See [`arch::_mm256_loadu2_m128d`]."] + #[inline(always)] + pub unsafe fn _mm256_loadu2_m128d(self, hiaddr: *const f64, loaddr: *const f64) -> __m256d { + unsafe { _mm256_loadu2_m128d(hiaddr, loaddr) } + } + #[doc = "See [`arch::_mm256_loadu2_m128i`]."] + #[inline(always)] + pub unsafe fn _mm256_loadu2_m128i( + self, + hiaddr: *const __m128i, + loaddr: *const __m128i, + ) -> __m256i { + unsafe { _mm256_loadu2_m128i(hiaddr, loaddr) } + } + #[doc = "See [`arch::_mm256_storeu2_m128`]."] + #[inline(always)] + pub unsafe fn _mm256_storeu2_m128(self, hiaddr: *mut f32, loaddr: *mut f32, a: __m256) { + unsafe { _mm256_storeu2_m128(hiaddr, loaddr, a) } + } + #[doc = "See [`arch::_mm256_storeu2_m128d`]."] + #[inline(always)] + pub unsafe fn _mm256_storeu2_m128d(self, hiaddr: *mut f64, loaddr: *mut f64, a: __m256d) { + unsafe { _mm256_storeu2_m128d(hiaddr, loaddr, a) } + } + #[doc = "See [`arch::_mm256_storeu2_m128i`]."] + #[inline(always)] + pub unsafe fn _mm256_storeu2_m128i( + self, + hiaddr: *mut __m128i, + loaddr: *mut __m128i, + a: __m256i, + ) { + unsafe { _mm256_storeu2_m128i(hiaddr, loaddr, a) } + } + #[doc = "See [`arch::_mm256_cvtss_f32`]."] + #[inline(always)] + pub fn _mm256_cvtss_f32(self, a: __m256) -> f32 { + unsafe { _mm256_cvtss_f32(a) } } } diff --git a/fearless_simd/src/core_arch/x86/avx2.rs b/fearless_simd/src/core_arch/x86/avx2.rs index 01f8e76cd..af6792e50 100644 --- a/fearless_simd/src/core_arch/x86/avx2.rs +++ b/fearless_simd/src/core_arch/x86/avx2.rs @@ -1,355 +1,1155 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -#![expect( - clippy::missing_safety_doc, - reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" -)] +// This file is autogenerated by fearless_simd_gen -//! Access to AVX2 intrinsics. - -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for AVX2 intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Avx2` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Avx2 { _private: (), } - +#[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" +)] impl Avx2 { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] + #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm256_abs_epi32(a: __m256i) -> __m256i; - fn _mm256_abs_epi16(a: __m256i) -> __m256i; - fn _mm256_abs_epi8(a: __m256i) -> __m256i; - fn _mm256_add_epi64(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_add_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_add_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_add_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_adds_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_adds_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_adds_epu8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_adds_epu16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_alignr_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_and_si256(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_andnot_si256(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_avg_epu16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_avg_epu8(a: __m256i, b: __m256i) -> __m256i; - fn _mm_blend_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm256_blend_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_blend_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_blendv_epi8(a: __m256i, b: __m256i, mask: __m256i) -> __m256i; - fn _mm_broadcastb_epi8(a: __m128i) -> __m128i; - fn _mm256_broadcastb_epi8(a: __m128i) -> __m256i; - fn _mm_broadcastd_epi32(a: __m128i) -> __m128i; - fn _mm256_broadcastd_epi32(a: __m128i) -> __m256i; - fn _mm_broadcastq_epi64(a: __m128i) -> __m128i; - fn _mm256_broadcastq_epi64(a: __m128i) -> __m256i; - fn _mm_broadcastsd_pd(a: __m128d) -> __m128d; - fn _mm256_broadcastsd_pd(a: __m128d) -> __m256d; - fn _mm256_broadcastsi128_si256(a: __m128i) -> __m256i; - fn _mm_broadcastss_ps(a: __m128) -> __m128; - fn _mm256_broadcastss_ps(a: __m128) -> __m256; - fn _mm_broadcastw_epi16(a: __m128i) -> __m128i; - fn _mm256_broadcastw_epi16(a: __m128i) -> __m256i; - fn _mm256_cmpeq_epi64(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cmpeq_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cmpeq_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cmpeq_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cmpgt_epi64(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cmpgt_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cmpgt_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cmpgt_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_cvtepi16_epi32(a: __m128i) -> __m256i; - fn _mm256_cvtepi16_epi64(a: __m128i) -> __m256i; - fn _mm256_cvtepi32_epi64(a: __m128i) -> __m256i; - fn _mm256_cvtepi8_epi16(a: __m128i) -> __m256i; - fn _mm256_cvtepi8_epi32(a: __m128i) -> __m256i; - fn _mm256_cvtepi8_epi64(a: __m128i) -> __m256i; - fn _mm256_cvtepu16_epi32(a: __m128i) -> __m256i; - fn _mm256_cvtepu16_epi64(a: __m128i) -> __m256i; - fn _mm256_cvtepu32_epi64(a: __m128i) -> __m256i; - fn _mm256_cvtepu8_epi16(a: __m128i) -> __m256i; - fn _mm256_cvtepu8_epi32(a: __m128i) -> __m256i; - fn _mm256_cvtepu8_epi64(a: __m128i) -> __m256i; - fn _mm256_extracti128_si256(a: __m256i) -> __m128i; - fn _mm256_hadd_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_hadd_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_hadds_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_hsub_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_hsub_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_hsubs_epi16(a: __m256i, b: __m256i) -> __m256i; - unsafe fn _mm_i32gather_epi32( - slice: *const i32, - offsets: __m128i, - ) -> __m128i; - unsafe fn _mm_mask_i32gather_epi32( - src: __m128i, - slice: *const i32, - offsets: __m128i, - mask: __m128i, - ) -> __m128i; - unsafe fn _mm256_i32gather_epi32( - slice: *const i32, - offsets: __m256i, - ) -> __m256i; - unsafe fn _mm256_mask_i32gather_epi32( - src: __m256i, - slice: *const i32, - offsets: __m256i, - mask: __m256i, - ) -> __m256i; - unsafe fn _mm_i32gather_ps(slice: *const f32, offsets: __m128i) - -> __m128; - unsafe fn _mm_mask_i32gather_ps( - src: __m128, - slice: *const f32, - offsets: __m128i, - mask: __m128, - ) -> __m128; - unsafe fn _mm256_i32gather_ps( - slice: *const f32, - offsets: __m256i, - ) -> __m256; - unsafe fn _mm256_mask_i32gather_ps( - src: __m256, - slice: *const f32, - offsets: __m256i, - mask: __m256, - ) -> __m256; - unsafe fn _mm_i32gather_epi64( - slice: *const i64, - offsets: __m128i, - ) -> __m128i; - unsafe fn _mm_mask_i32gather_epi64( - src: __m128i, - slice: *const i64, - offsets: __m128i, - mask: __m128i, - ) -> __m128i; - unsafe fn _mm256_i32gather_epi64( - slice: *const i64, - offsets: __m128i, - ) -> __m256i; - unsafe fn _mm256_mask_i32gather_epi64( - src: __m256i, - slice: *const i64, - offsets: __m128i, - mask: __m256i, - ) -> __m256i; - unsafe fn _mm_i32gather_pd( - slice: *const f64, - offsets: __m128i, - ) -> __m128d; - unsafe fn _mm_mask_i32gather_pd( - src: __m128d, - slice: *const f64, - offsets: __m128i, - mask: __m128d, - ) -> __m128d; - unsafe fn _mm256_i32gather_pd( - slice: *const f64, - offsets: __m128i, - ) -> __m256d; - unsafe fn _mm256_mask_i32gather_pd( - src: __m256d, - slice: *const f64, - offsets: __m128i, - mask: __m256d, - ) -> __m256d; - unsafe fn _mm_i64gather_epi32( - slice: *const i32, - offsets: __m128i, - ) -> __m128i; - unsafe fn _mm_mask_i64gather_epi32( - src: __m128i, - slice: *const i32, - offsets: __m128i, - mask: __m128i, - ) -> __m128i; - unsafe fn _mm256_i64gather_epi32( - slice: *const i32, - offsets: __m256i, - ) -> __m128i; - unsafe fn _mm256_mask_i64gather_epi32( - src: __m128i, - slice: *const i32, - offsets: __m256i, - mask: __m128i, - ) -> __m128i; - unsafe fn _mm_i64gather_ps(slice: *const f32, offsets: __m128i) - -> __m128; - unsafe fn _mm_mask_i64gather_ps( - src: __m128, - slice: *const f32, - offsets: __m128i, - mask: __m128, - ) -> __m128; - unsafe fn _mm256_i64gather_ps( - slice: *const f32, - offsets: __m256i, - ) -> __m128; - unsafe fn _mm256_mask_i64gather_ps( - src: __m128, - slice: *const f32, - offsets: __m256i, - mask: __m128, - ) -> __m128; - unsafe fn _mm_i64gather_epi64( - slice: *const i64, - offsets: __m128i, - ) -> __m128i; - unsafe fn _mm_mask_i64gather_epi64( - src: __m128i, - slice: *const i64, - offsets: __m128i, - mask: __m128i, - ) -> __m128i; - unsafe fn _mm256_i64gather_epi64( - slice: *const i64, - offsets: __m256i, - ) -> __m256i; - unsafe fn _mm256_mask_i64gather_epi64( - src: __m256i, - slice: *const i64, - offsets: __m256i, - mask: __m256i, - ) -> __m256i; - unsafe fn _mm_i64gather_pd( - slice: *const f64, - offsets: __m128i, - ) -> __m128d; - unsafe fn _mm_mask_i64gather_pd( - src: __m128d, - slice: *const f64, - offsets: __m128i, - mask: __m128d, - ) -> __m128d; - unsafe fn _mm256_i64gather_pd( - slice: *const f64, - offsets: __m256i, - ) -> __m256d; - unsafe fn _mm256_mask_i64gather_pd( - src: __m256d, - slice: *const f64, - offsets: __m256i, - mask: __m256d, - ) -> __m256d; - fn _mm256_inserti128_si256(a: __m256i, b: __m128i) -> __m256i; - fn _mm256_madd_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_maddubs_epi16(a: __m256i, b: __m256i) -> __m256i; - unsafe fn _mm_maskload_epi32(mem_addr: *const i32, mask: __m128i) -> __m128i; - unsafe fn _mm256_maskload_epi32(mem_addr: *const i32, mask: __m256i) -> __m256i; - unsafe fn _mm_maskload_epi64(mem_addr: *const i64, mask: __m128i) -> __m128i; - unsafe fn _mm256_maskload_epi64(mem_addr: *const i64, mask: __m256i) -> __m256i; - unsafe fn _mm_maskstore_epi32(mem_addr: *mut i32, mask: __m128i, a: __m128i); - unsafe fn _mm256_maskstore_epi32(mem_addr: *mut i32, mask: __m256i, a: __m256i); - unsafe fn _mm_maskstore_epi64(mem_addr: *mut i64, mask: __m128i, a: __m128i); - unsafe fn _mm256_maskstore_epi64(mem_addr: *mut i64, mask: __m256i, a: __m256i); - fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_movemask_epi8(a: __m256i) -> i32; - fn _mm256_mpsadbw_epu8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_mul_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_mul_epu32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_mulhi_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_mulhi_epu16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_mullo_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_mullo_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_mulhrs_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_or_si256(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_packs_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_packs_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_packus_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_packus_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_permutevar8x32_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_permute4x64_epi64(a: __m256i) -> __m256i; - fn _mm256_permute2x128_si256(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_permute4x64_pd(a: __m256d) -> __m256d; - fn _mm256_permutevar8x32_ps(a: __m256, idx: __m256i) -> __m256; - fn _mm256_sad_epu8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_shuffle_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_shuffle_epi32(a: __m256i) -> __m256i; - fn _mm256_shufflehi_epi16(a: __m256i) -> __m256i; - fn _mm256_shufflelo_epi16(a: __m256i) -> __m256i; - fn _mm256_sign_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_sign_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_sign_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_sll_epi16(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_sll_epi32(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_slli_epi16(a: __m256i) -> __m256i; - fn _mm256_slli_epi32(a: __m256i) -> __m256i; - fn _mm256_slli_epi64(a: __m256i) -> __m256i; - fn _mm256_slli_si256(a: __m256i) -> __m256i; - fn _mm256_bslli_epi128(a: __m256i) -> __m256i; - fn _mm_sllv_epi32(a: __m128i, count: __m128i) -> __m128i; - fn _mm256_sllv_epi32(a: __m256i, count: __m256i) -> __m256i; - fn _mm_sllv_epi64(a: __m128i, count: __m128i) -> __m128i; - fn _mm256_sllv_epi64(a: __m256i, count: __m256i) -> __m256i; - fn _mm256_sra_epi16(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_srai_epi16(a: __m256i) -> __m256i; - fn _mm256_srai_epi32(a: __m256i) -> __m256i; - fn _mm_srav_epi32(a: __m128i, count: __m128i) -> __m128i; - fn _mm256_srav_epi32(a: __m256i, count: __m256i) -> __m256i; - fn _mm256_srli_si256(a: __m256i) -> __m256i; - fn _mm256_bsrli_epi128(a: __m256i) -> __m256i; - fn _mm256_srl_epi16(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_srl_epi32(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i; - fn _mm256_srli_epi16(a: __m256i) -> __m256i; - fn _mm256_srli_epi32(a: __m256i) -> __m256i; - fn _mm256_srli_epi64(a: __m256i) -> __m256i; - fn _mm_srlv_epi32(a: __m128i, count: __m128i) -> __m128i; - fn _mm256_srlv_epi32(a: __m256i, count: __m256i) -> __m256i; - fn _mm_srlv_epi64(a: __m128i, count: __m128i) -> __m128i; - fn _mm256_srlv_epi64(a: __m256i, count: __m256i) -> __m256i; - fn _mm256_sub_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_sub_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_sub_epi64(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_sub_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_subs_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_subs_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_subs_epu16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_subs_epu8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpackhi_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpacklo_epi8(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpackhi_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpacklo_epi16(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpackhi_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpacklo_epi32(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpackhi_epi64(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_unpacklo_epi64(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_xor_si256(a: __m256i, b: __m256i) -> __m256i; - fn _mm256_extract_epi8(a: __m256i) -> i32; - fn _mm256_extract_epi16(a: __m256i) -> i32; - fn _mm256_extract_epi32(a: __m256i) -> i32; - fn _mm256_cvtsd_f64(a: __m256d) -> f64; - fn _mm256_cvtsi256_si32(a: __m256i) -> i32; + #[doc = "See [`arch::_mm256_abs_epi32`]."] + #[inline(always)] + pub fn _mm256_abs_epi32(self, a: __m256i) -> __m256i { + unsafe { _mm256_abs_epi32(a) } + } + #[doc = "See [`arch::_mm256_abs_epi16`]."] + #[inline(always)] + pub fn _mm256_abs_epi16(self, a: __m256i) -> __m256i { + unsafe { _mm256_abs_epi16(a) } + } + #[doc = "See [`arch::_mm256_abs_epi8`]."] + #[inline(always)] + pub fn _mm256_abs_epi8(self, a: __m256i) -> __m256i { + unsafe { _mm256_abs_epi8(a) } + } + #[doc = "See [`arch::_mm256_add_epi64`]."] + #[inline(always)] + pub fn _mm256_add_epi64(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_add_epi64(a, b) } + } + #[doc = "See [`arch::_mm256_add_epi32`]."] + #[inline(always)] + pub fn _mm256_add_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_add_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_add_epi16`]."] + #[inline(always)] + pub fn _mm256_add_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_add_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_add_epi8`]."] + #[inline(always)] + pub fn _mm256_add_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_add_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_adds_epi8`]."] + #[inline(always)] + pub fn _mm256_adds_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_adds_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_adds_epi16`]."] + #[inline(always)] + pub fn _mm256_adds_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_adds_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_adds_epu8`]."] + #[inline(always)] + pub fn _mm256_adds_epu8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_adds_epu8(a, b) } + } + #[doc = "See [`arch::_mm256_adds_epu16`]."] + #[inline(always)] + pub fn _mm256_adds_epu16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_adds_epu16(a, b) } + } + #[doc = "See [`arch::_mm256_alignr_epi8`]."] + #[inline(always)] + pub fn _mm256_alignr_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_alignr_epi8::(a, b) } + } + #[doc = "See [`arch::_mm256_and_si256`]."] + #[inline(always)] + pub fn _mm256_and_si256(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_and_si256(a, b) } + } + #[doc = "See [`arch::_mm256_andnot_si256`]."] + #[inline(always)] + pub fn _mm256_andnot_si256(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_andnot_si256(a, b) } + } + #[doc = "See [`arch::_mm256_avg_epu16`]."] + #[inline(always)] + pub fn _mm256_avg_epu16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_avg_epu16(a, b) } + } + #[doc = "See [`arch::_mm256_avg_epu8`]."] + #[inline(always)] + pub fn _mm256_avg_epu8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_avg_epu8(a, b) } + } + #[doc = "See [`arch::_mm_blend_epi32`]."] + #[inline(always)] + pub fn _mm_blend_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_blend_epi32::(a, b) } + } + #[doc = "See [`arch::_mm256_blend_epi32`]."] + #[inline(always)] + pub fn _mm256_blend_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_blend_epi32::(a, b) } + } + #[doc = "See [`arch::_mm256_blend_epi16`]."] + #[inline(always)] + pub fn _mm256_blend_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_blend_epi16::(a, b) } + } + #[doc = "See [`arch::_mm256_blendv_epi8`]."] + #[inline(always)] + pub fn _mm256_blendv_epi8(self, a: __m256i, b: __m256i, mask: __m256i) -> __m256i { + unsafe { _mm256_blendv_epi8(a, b, mask) } + } + #[doc = "See [`arch::_mm_broadcastb_epi8`]."] + #[inline(always)] + pub fn _mm_broadcastb_epi8(self, a: __m128i) -> __m128i { + unsafe { _mm_broadcastb_epi8(a) } + } + #[doc = "See [`arch::_mm256_broadcastb_epi8`]."] + #[inline(always)] + pub fn _mm256_broadcastb_epi8(self, a: __m128i) -> __m256i { + unsafe { _mm256_broadcastb_epi8(a) } + } + #[doc = "See [`arch::_mm_broadcastd_epi32`]."] + #[inline(always)] + pub fn _mm_broadcastd_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_broadcastd_epi32(a) } + } + #[doc = "See [`arch::_mm256_broadcastd_epi32`]."] + #[inline(always)] + pub fn _mm256_broadcastd_epi32(self, a: __m128i) -> __m256i { + unsafe { _mm256_broadcastd_epi32(a) } + } + #[doc = "See [`arch::_mm_broadcastq_epi64`]."] + #[inline(always)] + pub fn _mm_broadcastq_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_broadcastq_epi64(a) } + } + #[doc = "See [`arch::_mm256_broadcastq_epi64`]."] + #[inline(always)] + pub fn _mm256_broadcastq_epi64(self, a: __m128i) -> __m256i { + unsafe { _mm256_broadcastq_epi64(a) } + } + #[doc = "See [`arch::_mm_broadcastsd_pd`]."] + #[inline(always)] + pub fn _mm_broadcastsd_pd(self, a: __m128d) -> __m128d { + unsafe { _mm_broadcastsd_pd(a) } + } + #[doc = "See [`arch::_mm256_broadcastsd_pd`]."] + #[inline(always)] + pub fn _mm256_broadcastsd_pd(self, a: __m128d) -> __m256d { + unsafe { _mm256_broadcastsd_pd(a) } + } + #[doc = "See [`arch::_mm_broadcastsi128_si256`]."] + #[inline(always)] + pub fn _mm_broadcastsi128_si256(self, a: __m128i) -> __m256i { + unsafe { _mm_broadcastsi128_si256(a) } + } + #[doc = "See [`arch::_mm256_broadcastsi128_si256`]."] + #[inline(always)] + pub fn _mm256_broadcastsi128_si256(self, a: __m128i) -> __m256i { + unsafe { _mm256_broadcastsi128_si256(a) } + } + #[doc = "See [`arch::_mm_broadcastss_ps`]."] + #[inline(always)] + pub fn _mm_broadcastss_ps(self, a: __m128) -> __m128 { + unsafe { _mm_broadcastss_ps(a) } + } + #[doc = "See [`arch::_mm256_broadcastss_ps`]."] + #[inline(always)] + pub fn _mm256_broadcastss_ps(self, a: __m128) -> __m256 { + unsafe { _mm256_broadcastss_ps(a) } + } + #[doc = "See [`arch::_mm_broadcastw_epi16`]."] + #[inline(always)] + pub fn _mm_broadcastw_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_broadcastw_epi16(a) } + } + #[doc = "See [`arch::_mm256_broadcastw_epi16`]."] + #[inline(always)] + pub fn _mm256_broadcastw_epi16(self, a: __m128i) -> __m256i { + unsafe { _mm256_broadcastw_epi16(a) } + } + #[doc = "See [`arch::_mm256_cmpeq_epi64`]."] + #[inline(always)] + pub fn _mm256_cmpeq_epi64(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpeq_epi64(a, b) } + } + #[doc = "See [`arch::_mm256_cmpeq_epi32`]."] + #[inline(always)] + pub fn _mm256_cmpeq_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpeq_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_cmpeq_epi16`]."] + #[inline(always)] + pub fn _mm256_cmpeq_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpeq_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_cmpeq_epi8`]."] + #[inline(always)] + pub fn _mm256_cmpeq_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpeq_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_cmpgt_epi64`]."] + #[inline(always)] + pub fn _mm256_cmpgt_epi64(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpgt_epi64(a, b) } + } + #[doc = "See [`arch::_mm256_cmpgt_epi32`]."] + #[inline(always)] + pub fn _mm256_cmpgt_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpgt_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_cmpgt_epi16`]."] + #[inline(always)] + pub fn _mm256_cmpgt_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpgt_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_cmpgt_epi8`]."] + #[inline(always)] + pub fn _mm256_cmpgt_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_cmpgt_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_cvtepi16_epi32`]."] + #[inline(always)] + pub fn _mm256_cvtepi16_epi32(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepi16_epi32(a) } + } + #[doc = "See [`arch::_mm256_cvtepi16_epi64`]."] + #[inline(always)] + pub fn _mm256_cvtepi16_epi64(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepi16_epi64(a) } + } + #[doc = "See [`arch::_mm256_cvtepi32_epi64`]."] + #[inline(always)] + pub fn _mm256_cvtepi32_epi64(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepi32_epi64(a) } + } + #[doc = "See [`arch::_mm256_cvtepi8_epi16`]."] + #[inline(always)] + pub fn _mm256_cvtepi8_epi16(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepi8_epi16(a) } + } + #[doc = "See [`arch::_mm256_cvtepi8_epi32`]."] + #[inline(always)] + pub fn _mm256_cvtepi8_epi32(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepi8_epi32(a) } + } + #[doc = "See [`arch::_mm256_cvtepi8_epi64`]."] + #[inline(always)] + pub fn _mm256_cvtepi8_epi64(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepi8_epi64(a) } + } + #[doc = "See [`arch::_mm256_cvtepu16_epi32`]."] + #[inline(always)] + pub fn _mm256_cvtepu16_epi32(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepu16_epi32(a) } + } + #[doc = "See [`arch::_mm256_cvtepu16_epi64`]."] + #[inline(always)] + pub fn _mm256_cvtepu16_epi64(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepu16_epi64(a) } + } + #[doc = "See [`arch::_mm256_cvtepu32_epi64`]."] + #[inline(always)] + pub fn _mm256_cvtepu32_epi64(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepu32_epi64(a) } + } + #[doc = "See [`arch::_mm256_cvtepu8_epi16`]."] + #[inline(always)] + pub fn _mm256_cvtepu8_epi16(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepu8_epi16(a) } + } + #[doc = "See [`arch::_mm256_cvtepu8_epi32`]."] + #[inline(always)] + pub fn _mm256_cvtepu8_epi32(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepu8_epi32(a) } + } + #[doc = "See [`arch::_mm256_cvtepu8_epi64`]."] + #[inline(always)] + pub fn _mm256_cvtepu8_epi64(self, a: __m128i) -> __m256i { + unsafe { _mm256_cvtepu8_epi64(a) } + } + #[doc = "See [`arch::_mm256_extracti128_si256`]."] + #[inline(always)] + pub fn _mm256_extracti128_si256(self, a: __m256i) -> __m128i { + unsafe { _mm256_extracti128_si256::(a) } + } + #[doc = "See [`arch::_mm256_hadd_epi16`]."] + #[inline(always)] + pub fn _mm256_hadd_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_hadd_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_hadd_epi32`]."] + #[inline(always)] + pub fn _mm256_hadd_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_hadd_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_hadds_epi16`]."] + #[inline(always)] + pub fn _mm256_hadds_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_hadds_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_hsub_epi16`]."] + #[inline(always)] + pub fn _mm256_hsub_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_hsub_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_hsub_epi32`]."] + #[inline(always)] + pub fn _mm256_hsub_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_hsub_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_hsubs_epi16`]."] + #[inline(always)] + pub fn _mm256_hsubs_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_hsubs_epi16(a, b) } + } + #[doc = "See [`arch::_mm_i32gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm_i32gather_epi32( + self, + slice: *const i32, + offsets: __m128i, + ) -> __m128i { + unsafe { _mm_i32gather_epi32::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i32gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i32gather_epi32( + self, + src: __m128i, + slice: *const i32, + offsets: __m128i, + mask: __m128i, + ) -> __m128i { + unsafe { _mm_mask_i32gather_epi32::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i32gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm256_i32gather_epi32( + self, + slice: *const i32, + offsets: __m256i, + ) -> __m256i { + unsafe { _mm256_i32gather_epi32::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i32gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i32gather_epi32( + self, + src: __m256i, + slice: *const i32, + offsets: __m256i, + mask: __m256i, + ) -> __m256i { + unsafe { _mm256_mask_i32gather_epi32::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm_i32gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm_i32gather_ps( + self, + slice: *const f32, + offsets: __m128i, + ) -> __m128 { + unsafe { _mm_i32gather_ps::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i32gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i32gather_ps( + self, + src: __m128, + slice: *const f32, + offsets: __m128i, + mask: __m128, + ) -> __m128 { + unsafe { _mm_mask_i32gather_ps::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i32gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_i32gather_ps( + self, + slice: *const f32, + offsets: __m256i, + ) -> __m256 { + unsafe { _mm256_i32gather_ps::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i32gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i32gather_ps( + self, + src: __m256, + slice: *const f32, + offsets: __m256i, + mask: __m256, + ) -> __m256 { + unsafe { _mm256_mask_i32gather_ps::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm_i32gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_i32gather_epi64( + self, + slice: *const i64, + offsets: __m128i, + ) -> __m128i { + unsafe { _mm_i32gather_epi64::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i32gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i32gather_epi64( + self, + src: __m128i, + slice: *const i64, + offsets: __m128i, + mask: __m128i, + ) -> __m128i { + unsafe { _mm_mask_i32gather_epi64::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i32gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm256_i32gather_epi64( + self, + slice: *const i64, + offsets: __m128i, + ) -> __m256i { + unsafe { _mm256_i32gather_epi64::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i32gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i32gather_epi64( + self, + src: __m256i, + slice: *const i64, + offsets: __m128i, + mask: __m256i, + ) -> __m256i { + unsafe { _mm256_mask_i32gather_epi64::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm_i32gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm_i32gather_pd( + self, + slice: *const f64, + offsets: __m128i, + ) -> __m128d { + unsafe { _mm_i32gather_pd::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i32gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i32gather_pd( + self, + src: __m128d, + slice: *const f64, + offsets: __m128i, + mask: __m128d, + ) -> __m128d { + unsafe { _mm_mask_i32gather_pd::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i32gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_i32gather_pd( + self, + slice: *const f64, + offsets: __m128i, + ) -> __m256d { + unsafe { _mm256_i32gather_pd::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i32gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i32gather_pd( + self, + src: __m256d, + slice: *const f64, + offsets: __m128i, + mask: __m256d, + ) -> __m256d { + unsafe { _mm256_mask_i32gather_pd::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm_i64gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm_i64gather_epi32( + self, + slice: *const i32, + offsets: __m128i, + ) -> __m128i { + unsafe { _mm_i64gather_epi32::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i64gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i64gather_epi32( + self, + src: __m128i, + slice: *const i32, + offsets: __m128i, + mask: __m128i, + ) -> __m128i { + unsafe { _mm_mask_i64gather_epi32::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i64gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm256_i64gather_epi32( + self, + slice: *const i32, + offsets: __m256i, + ) -> __m128i { + unsafe { _mm256_i64gather_epi32::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i64gather_epi32`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i64gather_epi32( + self, + src: __m128i, + slice: *const i32, + offsets: __m256i, + mask: __m128i, + ) -> __m128i { + unsafe { _mm256_mask_i64gather_epi32::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm_i64gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm_i64gather_ps( + self, + slice: *const f32, + offsets: __m128i, + ) -> __m128 { + unsafe { _mm_i64gather_ps::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i64gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i64gather_ps( + self, + src: __m128, + slice: *const f32, + offsets: __m128i, + mask: __m128, + ) -> __m128 { + unsafe { _mm_mask_i64gather_ps::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i64gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_i64gather_ps( + self, + slice: *const f32, + offsets: __m256i, + ) -> __m128 { + unsafe { _mm256_i64gather_ps::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i64gather_ps`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i64gather_ps( + self, + src: __m128, + slice: *const f32, + offsets: __m256i, + mask: __m128, + ) -> __m128 { + unsafe { _mm256_mask_i64gather_ps::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm_i64gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_i64gather_epi64( + self, + slice: *const i64, + offsets: __m128i, + ) -> __m128i { + unsafe { _mm_i64gather_epi64::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i64gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i64gather_epi64( + self, + src: __m128i, + slice: *const i64, + offsets: __m128i, + mask: __m128i, + ) -> __m128i { + unsafe { _mm_mask_i64gather_epi64::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i64gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm256_i64gather_epi64( + self, + slice: *const i64, + offsets: __m256i, + ) -> __m256i { + unsafe { _mm256_i64gather_epi64::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i64gather_epi64`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i64gather_epi64( + self, + src: __m256i, + slice: *const i64, + offsets: __m256i, + mask: __m256i, + ) -> __m256i { + unsafe { _mm256_mask_i64gather_epi64::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm_i64gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm_i64gather_pd( + self, + slice: *const f64, + offsets: __m128i, + ) -> __m128d { + unsafe { _mm_i64gather_pd::(slice, offsets) } + } + #[doc = "See [`arch::_mm_mask_i64gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm_mask_i64gather_pd( + self, + src: __m128d, + slice: *const f64, + offsets: __m128i, + mask: __m128d, + ) -> __m128d { + unsafe { _mm_mask_i64gather_pd::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_i64gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_i64gather_pd( + self, + slice: *const f64, + offsets: __m256i, + ) -> __m256d { + unsafe { _mm256_i64gather_pd::(slice, offsets) } + } + #[doc = "See [`arch::_mm256_mask_i64gather_pd`]."] + #[inline(always)] + pub unsafe fn _mm256_mask_i64gather_pd( + self, + src: __m256d, + slice: *const f64, + offsets: __m256i, + mask: __m256d, + ) -> __m256d { + unsafe { _mm256_mask_i64gather_pd::(src, slice, offsets, mask) } + } + #[doc = "See [`arch::_mm256_inserti128_si256`]."] + #[inline(always)] + pub fn _mm256_inserti128_si256(self, a: __m256i, b: __m128i) -> __m256i { + unsafe { _mm256_inserti128_si256::(a, b) } + } + #[doc = "See [`arch::_mm256_madd_epi16`]."] + #[inline(always)] + pub fn _mm256_madd_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_madd_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_maddubs_epi16`]."] + #[inline(always)] + pub fn _mm256_maddubs_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_maddubs_epi16(a, b) } + } + #[doc = "See [`arch::_mm_maskload_epi32`]."] + #[inline(always)] + pub unsafe fn _mm_maskload_epi32(self, mem_addr: *const i32, mask: __m128i) -> __m128i { + unsafe { _mm_maskload_epi32(mem_addr, mask) } + } + #[doc = "See [`arch::_mm256_maskload_epi32`]."] + #[inline(always)] + pub unsafe fn _mm256_maskload_epi32(self, mem_addr: *const i32, mask: __m256i) -> __m256i { + unsafe { _mm256_maskload_epi32(mem_addr, mask) } + } + #[doc = "See [`arch::_mm_maskload_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_maskload_epi64(self, mem_addr: *const i64, mask: __m128i) -> __m128i { + unsafe { _mm_maskload_epi64(mem_addr, mask) } + } + #[doc = "See [`arch::_mm256_maskload_epi64`]."] + #[inline(always)] + pub unsafe fn _mm256_maskload_epi64(self, mem_addr: *const i64, mask: __m256i) -> __m256i { + unsafe { _mm256_maskload_epi64(mem_addr, mask) } + } + #[doc = "See [`arch::_mm_maskstore_epi32`]."] + #[inline(always)] + pub unsafe fn _mm_maskstore_epi32(self, mem_addr: *mut i32, mask: __m128i, a: __m128i) { + unsafe { _mm_maskstore_epi32(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm256_maskstore_epi32`]."] + #[inline(always)] + pub unsafe fn _mm256_maskstore_epi32(self, mem_addr: *mut i32, mask: __m256i, a: __m256i) { + unsafe { _mm256_maskstore_epi32(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm_maskstore_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_maskstore_epi64(self, mem_addr: *mut i64, mask: __m128i, a: __m128i) { + unsafe { _mm_maskstore_epi64(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm256_maskstore_epi64`]."] + #[inline(always)] + pub unsafe fn _mm256_maskstore_epi64(self, mem_addr: *mut i64, mask: __m256i, a: __m256i) { + unsafe { _mm256_maskstore_epi64(mem_addr, mask, a) } + } + #[doc = "See [`arch::_mm256_max_epi16`]."] + #[inline(always)] + pub fn _mm256_max_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_max_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_max_epi32`]."] + #[inline(always)] + pub fn _mm256_max_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_max_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_max_epi8`]."] + #[inline(always)] + pub fn _mm256_max_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_max_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_max_epu16`]."] + #[inline(always)] + pub fn _mm256_max_epu16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_max_epu16(a, b) } + } + #[doc = "See [`arch::_mm256_max_epu32`]."] + #[inline(always)] + pub fn _mm256_max_epu32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_max_epu32(a, b) } + } + #[doc = "See [`arch::_mm256_max_epu8`]."] + #[inline(always)] + pub fn _mm256_max_epu8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_max_epu8(a, b) } + } + #[doc = "See [`arch::_mm256_min_epi16`]."] + #[inline(always)] + pub fn _mm256_min_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_min_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_min_epi32`]."] + #[inline(always)] + pub fn _mm256_min_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_min_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_min_epi8`]."] + #[inline(always)] + pub fn _mm256_min_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_min_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_min_epu16`]."] + #[inline(always)] + pub fn _mm256_min_epu16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_min_epu16(a, b) } + } + #[doc = "See [`arch::_mm256_min_epu32`]."] + #[inline(always)] + pub fn _mm256_min_epu32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_min_epu32(a, b) } + } + #[doc = "See [`arch::_mm256_min_epu8`]."] + #[inline(always)] + pub fn _mm256_min_epu8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_min_epu8(a, b) } + } + #[doc = "See [`arch::_mm256_movemask_epi8`]."] + #[inline(always)] + pub fn _mm256_movemask_epi8(self, a: __m256i) -> i32 { + unsafe { _mm256_movemask_epi8(a) } + } + #[doc = "See [`arch::_mm256_mpsadbw_epu8`]."] + #[inline(always)] + pub fn _mm256_mpsadbw_epu8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mpsadbw_epu8::(a, b) } + } + #[doc = "See [`arch::_mm256_mul_epi32`]."] + #[inline(always)] + pub fn _mm256_mul_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mul_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_mul_epu32`]."] + #[inline(always)] + pub fn _mm256_mul_epu32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mul_epu32(a, b) } + } + #[doc = "See [`arch::_mm256_mulhi_epi16`]."] + #[inline(always)] + pub fn _mm256_mulhi_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mulhi_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_mulhi_epu16`]."] + #[inline(always)] + pub fn _mm256_mulhi_epu16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mulhi_epu16(a, b) } + } + #[doc = "See [`arch::_mm256_mullo_epi16`]."] + #[inline(always)] + pub fn _mm256_mullo_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mullo_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_mullo_epi32`]."] + #[inline(always)] + pub fn _mm256_mullo_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mullo_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_mulhrs_epi16`]."] + #[inline(always)] + pub fn _mm256_mulhrs_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_mulhrs_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_or_si256`]."] + #[inline(always)] + pub fn _mm256_or_si256(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_or_si256(a, b) } + } + #[doc = "See [`arch::_mm256_packs_epi16`]."] + #[inline(always)] + pub fn _mm256_packs_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_packs_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_packs_epi32`]."] + #[inline(always)] + pub fn _mm256_packs_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_packs_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_packus_epi16`]."] + #[inline(always)] + pub fn _mm256_packus_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_packus_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_packus_epi32`]."] + #[inline(always)] + pub fn _mm256_packus_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_packus_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_permutevar8x32_epi32`]."] + #[inline(always)] + pub fn _mm256_permutevar8x32_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_permutevar8x32_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_permute4x64_epi64`]."] + #[inline(always)] + pub fn _mm256_permute4x64_epi64(self, a: __m256i) -> __m256i { + unsafe { _mm256_permute4x64_epi64::(a) } + } + #[doc = "See [`arch::_mm256_permute2x128_si256`]."] + #[inline(always)] + pub fn _mm256_permute2x128_si256(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_permute2x128_si256::(a, b) } + } + #[doc = "See [`arch::_mm256_permute4x64_pd`]."] + #[inline(always)] + pub fn _mm256_permute4x64_pd(self, a: __m256d) -> __m256d { + unsafe { _mm256_permute4x64_pd::(a) } + } + #[doc = "See [`arch::_mm256_permutevar8x32_ps`]."] + #[inline(always)] + pub fn _mm256_permutevar8x32_ps(self, a: __m256, idx: __m256i) -> __m256 { + unsafe { _mm256_permutevar8x32_ps(a, idx) } + } + #[doc = "See [`arch::_mm256_sad_epu8`]."] + #[inline(always)] + pub fn _mm256_sad_epu8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sad_epu8(a, b) } + } + #[doc = "See [`arch::_mm256_shuffle_epi8`]."] + #[inline(always)] + pub fn _mm256_shuffle_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_shuffle_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_shuffle_epi32`]."] + #[inline(always)] + pub fn _mm256_shuffle_epi32(self, a: __m256i) -> __m256i { + unsafe { _mm256_shuffle_epi32::(a) } + } + #[doc = "See [`arch::_mm256_shufflehi_epi16`]."] + #[inline(always)] + pub fn _mm256_shufflehi_epi16(self, a: __m256i) -> __m256i { + unsafe { _mm256_shufflehi_epi16::(a) } + } + #[doc = "See [`arch::_mm256_shufflelo_epi16`]."] + #[inline(always)] + pub fn _mm256_shufflelo_epi16(self, a: __m256i) -> __m256i { + unsafe { _mm256_shufflelo_epi16::(a) } + } + #[doc = "See [`arch::_mm256_sign_epi16`]."] + #[inline(always)] + pub fn _mm256_sign_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sign_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_sign_epi32`]."] + #[inline(always)] + pub fn _mm256_sign_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sign_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_sign_epi8`]."] + #[inline(always)] + pub fn _mm256_sign_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sign_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_sll_epi16`]."] + #[inline(always)] + pub fn _mm256_sll_epi16(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_sll_epi16(a, count) } + } + #[doc = "See [`arch::_mm256_sll_epi32`]."] + #[inline(always)] + pub fn _mm256_sll_epi32(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_sll_epi32(a, count) } + } + #[doc = "See [`arch::_mm256_sll_epi64`]."] + #[inline(always)] + pub fn _mm256_sll_epi64(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_sll_epi64(a, count) } + } + #[doc = "See [`arch::_mm256_slli_epi16`]."] + #[inline(always)] + pub fn _mm256_slli_epi16(self, a: __m256i) -> __m256i { + unsafe { _mm256_slli_epi16::(a) } + } + #[doc = "See [`arch::_mm256_slli_epi32`]."] + #[inline(always)] + pub fn _mm256_slli_epi32(self, a: __m256i) -> __m256i { + unsafe { _mm256_slli_epi32::(a) } + } + #[doc = "See [`arch::_mm256_slli_epi64`]."] + #[inline(always)] + pub fn _mm256_slli_epi64(self, a: __m256i) -> __m256i { + unsafe { _mm256_slli_epi64::(a) } + } + #[doc = "See [`arch::_mm256_slli_si256`]."] + #[inline(always)] + pub fn _mm256_slli_si256(self, a: __m256i) -> __m256i { + unsafe { _mm256_slli_si256::(a) } + } + #[doc = "See [`arch::_mm256_bslli_epi128`]."] + #[inline(always)] + pub fn _mm256_bslli_epi128(self, a: __m256i) -> __m256i { + unsafe { _mm256_bslli_epi128::(a) } + } + #[doc = "See [`arch::_mm_sllv_epi32`]."] + #[inline(always)] + pub fn _mm_sllv_epi32(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_sllv_epi32(a, count) } + } + #[doc = "See [`arch::_mm256_sllv_epi32`]."] + #[inline(always)] + pub fn _mm256_sllv_epi32(self, a: __m256i, count: __m256i) -> __m256i { + unsafe { _mm256_sllv_epi32(a, count) } + } + #[doc = "See [`arch::_mm_sllv_epi64`]."] + #[inline(always)] + pub fn _mm_sllv_epi64(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_sllv_epi64(a, count) } + } + #[doc = "See [`arch::_mm256_sllv_epi64`]."] + #[inline(always)] + pub fn _mm256_sllv_epi64(self, a: __m256i, count: __m256i) -> __m256i { + unsafe { _mm256_sllv_epi64(a, count) } + } + #[doc = "See [`arch::_mm256_sra_epi16`]."] + #[inline(always)] + pub fn _mm256_sra_epi16(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_sra_epi16(a, count) } + } + #[doc = "See [`arch::_mm256_sra_epi32`]."] + #[inline(always)] + pub fn _mm256_sra_epi32(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_sra_epi32(a, count) } + } + #[doc = "See [`arch::_mm256_srai_epi16`]."] + #[inline(always)] + pub fn _mm256_srai_epi16(self, a: __m256i) -> __m256i { + unsafe { _mm256_srai_epi16::(a) } + } + #[doc = "See [`arch::_mm256_srai_epi32`]."] + #[inline(always)] + pub fn _mm256_srai_epi32(self, a: __m256i) -> __m256i { + unsafe { _mm256_srai_epi32::(a) } + } + #[doc = "See [`arch::_mm_srav_epi32`]."] + #[inline(always)] + pub fn _mm_srav_epi32(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_srav_epi32(a, count) } + } + #[doc = "See [`arch::_mm256_srav_epi32`]."] + #[inline(always)] + pub fn _mm256_srav_epi32(self, a: __m256i, count: __m256i) -> __m256i { + unsafe { _mm256_srav_epi32(a, count) } + } + #[doc = "See [`arch::_mm256_srli_si256`]."] + #[inline(always)] + pub fn _mm256_srli_si256(self, a: __m256i) -> __m256i { + unsafe { _mm256_srli_si256::(a) } + } + #[doc = "See [`arch::_mm256_bsrli_epi128`]."] + #[inline(always)] + pub fn _mm256_bsrli_epi128(self, a: __m256i) -> __m256i { + unsafe { _mm256_bsrli_epi128::(a) } + } + #[doc = "See [`arch::_mm256_srl_epi16`]."] + #[inline(always)] + pub fn _mm256_srl_epi16(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_srl_epi16(a, count) } + } + #[doc = "See [`arch::_mm256_srl_epi32`]."] + #[inline(always)] + pub fn _mm256_srl_epi32(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_srl_epi32(a, count) } + } + #[doc = "See [`arch::_mm256_srl_epi64`]."] + #[inline(always)] + pub fn _mm256_srl_epi64(self, a: __m256i, count: __m128i) -> __m256i { + unsafe { _mm256_srl_epi64(a, count) } + } + #[doc = "See [`arch::_mm256_srli_epi16`]."] + #[inline(always)] + pub fn _mm256_srli_epi16(self, a: __m256i) -> __m256i { + unsafe { _mm256_srli_epi16::(a) } + } + #[doc = "See [`arch::_mm256_srli_epi32`]."] + #[inline(always)] + pub fn _mm256_srli_epi32(self, a: __m256i) -> __m256i { + unsafe { _mm256_srli_epi32::(a) } + } + #[doc = "See [`arch::_mm256_srli_epi64`]."] + #[inline(always)] + pub fn _mm256_srli_epi64(self, a: __m256i) -> __m256i { + unsafe { _mm256_srli_epi64::(a) } + } + #[doc = "See [`arch::_mm_srlv_epi32`]."] + #[inline(always)] + pub fn _mm_srlv_epi32(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_srlv_epi32(a, count) } + } + #[doc = "See [`arch::_mm256_srlv_epi32`]."] + #[inline(always)] + pub fn _mm256_srlv_epi32(self, a: __m256i, count: __m256i) -> __m256i { + unsafe { _mm256_srlv_epi32(a, count) } + } + #[doc = "See [`arch::_mm_srlv_epi64`]."] + #[inline(always)] + pub fn _mm_srlv_epi64(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_srlv_epi64(a, count) } + } + #[doc = "See [`arch::_mm256_srlv_epi64`]."] + #[inline(always)] + pub fn _mm256_srlv_epi64(self, a: __m256i, count: __m256i) -> __m256i { + unsafe { _mm256_srlv_epi64(a, count) } + } + #[doc = "See [`arch::_mm256_stream_load_si256`]."] + #[inline(always)] + pub unsafe fn _mm256_stream_load_si256(self, mem_addr: *const __m256i) -> __m256i { + unsafe { _mm256_stream_load_si256(mem_addr) } + } + #[doc = "See [`arch::_mm256_sub_epi16`]."] + #[inline(always)] + pub fn _mm256_sub_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sub_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_sub_epi32`]."] + #[inline(always)] + pub fn _mm256_sub_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sub_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_sub_epi64`]."] + #[inline(always)] + pub fn _mm256_sub_epi64(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sub_epi64(a, b) } + } + #[doc = "See [`arch::_mm256_sub_epi8`]."] + #[inline(always)] + pub fn _mm256_sub_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_sub_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_subs_epi16`]."] + #[inline(always)] + pub fn _mm256_subs_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_subs_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_subs_epi8`]."] + #[inline(always)] + pub fn _mm256_subs_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_subs_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_subs_epu16`]."] + #[inline(always)] + pub fn _mm256_subs_epu16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_subs_epu16(a, b) } + } + #[doc = "See [`arch::_mm256_subs_epu8`]."] + #[inline(always)] + pub fn _mm256_subs_epu8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_subs_epu8(a, b) } + } + #[doc = "See [`arch::_mm256_unpackhi_epi8`]."] + #[inline(always)] + pub fn _mm256_unpackhi_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpackhi_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_unpacklo_epi8`]."] + #[inline(always)] + pub fn _mm256_unpacklo_epi8(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpacklo_epi8(a, b) } + } + #[doc = "See [`arch::_mm256_unpackhi_epi16`]."] + #[inline(always)] + pub fn _mm256_unpackhi_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpackhi_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_unpacklo_epi16`]."] + #[inline(always)] + pub fn _mm256_unpacklo_epi16(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpacklo_epi16(a, b) } + } + #[doc = "See [`arch::_mm256_unpackhi_epi32`]."] + #[inline(always)] + pub fn _mm256_unpackhi_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpackhi_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_unpacklo_epi32`]."] + #[inline(always)] + pub fn _mm256_unpacklo_epi32(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpacklo_epi32(a, b) } + } + #[doc = "See [`arch::_mm256_unpackhi_epi64`]."] + #[inline(always)] + pub fn _mm256_unpackhi_epi64(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpackhi_epi64(a, b) } + } + #[doc = "See [`arch::_mm256_unpacklo_epi64`]."] + #[inline(always)] + pub fn _mm256_unpacklo_epi64(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_unpacklo_epi64(a, b) } + } + #[doc = "See [`arch::_mm256_xor_si256`]."] + #[inline(always)] + pub fn _mm256_xor_si256(self, a: __m256i, b: __m256i) -> __m256i { + unsafe { _mm256_xor_si256(a, b) } + } + #[doc = "See [`arch::_mm256_extract_epi8`]."] + #[inline(always)] + pub fn _mm256_extract_epi8(self, a: __m256i) -> i32 { + unsafe { _mm256_extract_epi8::(a) } + } + #[doc = "See [`arch::_mm256_extract_epi16`]."] + #[inline(always)] + pub fn _mm256_extract_epi16(self, a: __m256i) -> i32 { + unsafe { _mm256_extract_epi16::(a) } } } diff --git a/fearless_simd/src/core_arch/x86/fma.rs b/fearless_simd/src/core_arch/x86/fma.rs index 2117b65b0..98b12bc82 100644 --- a/fearless_simd/src/core_arch/x86/fma.rs +++ b/fearless_simd/src/core_arch/x86/fma.rs @@ -1,65 +1,190 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to FMA intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for FMA intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Fma` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Fma { _private: (), } - +#[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" +)] impl Fma { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm_fmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm256_fmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; - fn _mm_fmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm256_fmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256; - fn _mm_fmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm_fmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm256_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; - fn _mm_fmaddsub_ps(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm256_fmaddsub_ps(a: __m256, b: __m256, c: __m256) -> __m256; - fn _mm_fmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm256_fmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; - fn _mm_fmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm256_fmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256; - fn _mm_fmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm_fmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm256_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; - fn _mm_fmsubadd_ps(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm256_fmsubadd_ps(a: __m256, b: __m256, c: __m256) -> __m256; - fn _mm_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm256_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; - fn _mm_fnmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm256_fnmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256; - fn _mm_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm_fnmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm256_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; - fn _mm_fnmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128; - fn _mm256_fnmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256; - fn _mm_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d; - fn _mm_fnmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128; + #[doc = "See [`arch::_mm_fmadd_pd`]."] + #[inline(always)] + pub fn _mm_fmadd_pd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fmadd_pd(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmadd_pd`]."] + #[inline(always)] + pub fn _mm256_fmadd_pd(self, a: __m256d, b: __m256d, c: __m256d) -> __m256d { + unsafe { _mm256_fmadd_pd(a, b, c) } + } + #[doc = "See [`arch::_mm_fmadd_ps`]."] + #[inline(always)] + pub fn _mm_fmadd_ps(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fmadd_ps(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmadd_ps`]."] + #[inline(always)] + pub fn _mm256_fmadd_ps(self, a: __m256, b: __m256, c: __m256) -> __m256 { + unsafe { _mm256_fmadd_ps(a, b, c) } + } + #[doc = "See [`arch::_mm_fmadd_sd`]."] + #[inline(always)] + pub fn _mm_fmadd_sd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fmadd_sd(a, b, c) } + } + #[doc = "See [`arch::_mm_fmadd_ss`]."] + #[inline(always)] + pub fn _mm_fmadd_ss(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fmadd_ss(a, b, c) } + } + #[doc = "See [`arch::_mm_fmaddsub_pd`]."] + #[inline(always)] + pub fn _mm_fmaddsub_pd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fmaddsub_pd(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmaddsub_pd`]."] + #[inline(always)] + pub fn _mm256_fmaddsub_pd(self, a: __m256d, b: __m256d, c: __m256d) -> __m256d { + unsafe { _mm256_fmaddsub_pd(a, b, c) } + } + #[doc = "See [`arch::_mm_fmaddsub_ps`]."] + #[inline(always)] + pub fn _mm_fmaddsub_ps(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fmaddsub_ps(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmaddsub_ps`]."] + #[inline(always)] + pub fn _mm256_fmaddsub_ps(self, a: __m256, b: __m256, c: __m256) -> __m256 { + unsafe { _mm256_fmaddsub_ps(a, b, c) } + } + #[doc = "See [`arch::_mm_fmsub_pd`]."] + #[inline(always)] + pub fn _mm_fmsub_pd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fmsub_pd(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmsub_pd`]."] + #[inline(always)] + pub fn _mm256_fmsub_pd(self, a: __m256d, b: __m256d, c: __m256d) -> __m256d { + unsafe { _mm256_fmsub_pd(a, b, c) } + } + #[doc = "See [`arch::_mm_fmsub_ps`]."] + #[inline(always)] + pub fn _mm_fmsub_ps(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fmsub_ps(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmsub_ps`]."] + #[inline(always)] + pub fn _mm256_fmsub_ps(self, a: __m256, b: __m256, c: __m256) -> __m256 { + unsafe { _mm256_fmsub_ps(a, b, c) } + } + #[doc = "See [`arch::_mm_fmsub_sd`]."] + #[inline(always)] + pub fn _mm_fmsub_sd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fmsub_sd(a, b, c) } + } + #[doc = "See [`arch::_mm_fmsub_ss`]."] + #[inline(always)] + pub fn _mm_fmsub_ss(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fmsub_ss(a, b, c) } + } + #[doc = "See [`arch::_mm_fmsubadd_pd`]."] + #[inline(always)] + pub fn _mm_fmsubadd_pd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fmsubadd_pd(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmsubadd_pd`]."] + #[inline(always)] + pub fn _mm256_fmsubadd_pd(self, a: __m256d, b: __m256d, c: __m256d) -> __m256d { + unsafe { _mm256_fmsubadd_pd(a, b, c) } + } + #[doc = "See [`arch::_mm_fmsubadd_ps`]."] + #[inline(always)] + pub fn _mm_fmsubadd_ps(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fmsubadd_ps(a, b, c) } + } + #[doc = "See [`arch::_mm256_fmsubadd_ps`]."] + #[inline(always)] + pub fn _mm256_fmsubadd_ps(self, a: __m256, b: __m256, c: __m256) -> __m256 { + unsafe { _mm256_fmsubadd_ps(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmadd_pd`]."] + #[inline(always)] + pub fn _mm_fnmadd_pd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fnmadd_pd(a, b, c) } + } + #[doc = "See [`arch::_mm256_fnmadd_pd`]."] + #[inline(always)] + pub fn _mm256_fnmadd_pd(self, a: __m256d, b: __m256d, c: __m256d) -> __m256d { + unsafe { _mm256_fnmadd_pd(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmadd_ps`]."] + #[inline(always)] + pub fn _mm_fnmadd_ps(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fnmadd_ps(a, b, c) } + } + #[doc = "See [`arch::_mm256_fnmadd_ps`]."] + #[inline(always)] + pub fn _mm256_fnmadd_ps(self, a: __m256, b: __m256, c: __m256) -> __m256 { + unsafe { _mm256_fnmadd_ps(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmadd_sd`]."] + #[inline(always)] + pub fn _mm_fnmadd_sd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fnmadd_sd(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmadd_ss`]."] + #[inline(always)] + pub fn _mm_fnmadd_ss(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fnmadd_ss(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmsub_pd`]."] + #[inline(always)] + pub fn _mm_fnmsub_pd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fnmsub_pd(a, b, c) } + } + #[doc = "See [`arch::_mm256_fnmsub_pd`]."] + #[inline(always)] + pub fn _mm256_fnmsub_pd(self, a: __m256d, b: __m256d, c: __m256d) -> __m256d { + unsafe { _mm256_fnmsub_pd(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmsub_ps`]."] + #[inline(always)] + pub fn _mm_fnmsub_ps(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fnmsub_ps(a, b, c) } + } + #[doc = "See [`arch::_mm256_fnmsub_ps`]."] + #[inline(always)] + pub fn _mm256_fnmsub_ps(self, a: __m256, b: __m256, c: __m256) -> __m256 { + unsafe { _mm256_fnmsub_ps(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmsub_sd`]."] + #[inline(always)] + pub fn _mm_fnmsub_sd(self, a: __m128d, b: __m128d, c: __m128d) -> __m128d { + unsafe { _mm_fnmsub_sd(a, b, c) } + } + #[doc = "See [`arch::_mm_fnmsub_ss`]."] + #[inline(always)] + pub fn _mm_fnmsub_ss(self, a: __m128, b: __m128, c: __m128) -> __m128 { + unsafe { _mm_fnmsub_ss(a, b, c) } } } diff --git a/fearless_simd/src/core_arch/x86/mod.rs b/fearless_simd/src/core_arch/x86/mod.rs index 14f9a2a16..e72751da9 100644 --- a/fearless_simd/src/core_arch/x86/mod.rs +++ b/fearless_simd/src/core_arch/x86/mod.rs @@ -1,8 +1,9 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to intrinsics on `x86` and `x86_64`. +// This file is autogenerated by fearless_simd_gen +#![doc = "Access to intrinsics on `x86` and `x86_64`."] mod avx; mod avx2; mod fma; @@ -12,7 +13,6 @@ mod sse3; mod sse4_1; mod sse4_2; mod ssse3; - pub use avx::Avx; pub use avx2::Avx2; pub use fma::Fma; diff --git a/fearless_simd/src/core_arch/x86/sse.rs b/fearless_simd/src/core_arch/x86/sse.rs index 09eeb953a..652b21bde 100644 --- a/fearless_simd/src/core_arch/x86/sse.rs +++ b/fearless_simd/src/core_arch/x86/sse.rs @@ -1,140 +1,623 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to SSE intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for SSE intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Sse` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Sse { _private: (), } - -#[expect( +#[allow( clippy::missing_safety_doc, - reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" + reason = "The underlying functions have their own safety docs" )] impl Sse { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm_add_ss(a: __m128, b: __m128) -> __m128; - fn _mm_add_ps(a: __m128, b: __m128) -> __m128; - fn _mm_sub_ss(a: __m128, b: __m128) -> __m128; - fn _mm_sub_ps(a: __m128, b: __m128) -> __m128; - fn _mm_mul_ss(a: __m128, b: __m128) -> __m128; - fn _mm_mul_ps(a: __m128, b: __m128) -> __m128; - fn _mm_div_ss(a: __m128, b: __m128) -> __m128; - fn _mm_div_ps(a: __m128, b: __m128) -> __m128; - fn _mm_sqrt_ss(a: __m128) -> __m128; - fn _mm_sqrt_ps(a: __m128) -> __m128; - fn _mm_rcp_ss(a: __m128) -> __m128; - fn _mm_rcp_ps(a: __m128) -> __m128; - fn _mm_rsqrt_ss(a: __m128) -> __m128; - fn _mm_rsqrt_ps(a: __m128) -> __m128; - fn _mm_min_ss(a: __m128, b: __m128) -> __m128; - fn _mm_min_ps(a: __m128, b: __m128) -> __m128; - fn _mm_max_ss(a: __m128, b: __m128) -> __m128; - fn _mm_max_ps(a: __m128, b: __m128) -> __m128; - fn _mm_and_ps(a: __m128, b: __m128) -> __m128; - fn _mm_andnot_ps(a: __m128, b: __m128) -> __m128; - fn _mm_or_ps(a: __m128, b: __m128) -> __m128; - fn _mm_xor_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpeq_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmplt_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmple_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpgt_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpge_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpneq_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpnlt_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpnle_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpngt_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpnge_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpord_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpunord_ss(a: __m128, b: __m128) -> __m128; - fn _mm_cmpeq_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmplt_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmple_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpgt_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpge_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpneq_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpnlt_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpnle_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpngt_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpnge_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpord_ps(a: __m128, b: __m128) -> __m128; - fn _mm_cmpunord_ps(a: __m128, b: __m128) -> __m128; - fn _mm_comieq_ss(a: __m128, b: __m128) -> i32; - fn _mm_comilt_ss(a: __m128, b: __m128) -> i32; - fn _mm_comile_ss(a: __m128, b: __m128) -> i32; - fn _mm_comigt_ss(a: __m128, b: __m128) -> i32; - fn _mm_comige_ss(a: __m128, b: __m128) -> i32; - fn _mm_comineq_ss(a: __m128, b: __m128) -> i32; - fn _mm_ucomieq_ss(a: __m128, b: __m128) -> i32; - fn _mm_ucomilt_ss(a: __m128, b: __m128) -> i32; - fn _mm_ucomile_ss(a: __m128, b: __m128) -> i32; - fn _mm_ucomigt_ss(a: __m128, b: __m128) -> i32; - fn _mm_ucomige_ss(a: __m128, b: __m128) -> i32; - fn _mm_ucomineq_ss(a: __m128, b: __m128) -> i32; - fn _mm_cvtss_si32(a: __m128) -> i32; - fn _mm_cvt_ss2si(a: __m128) -> i32; - fn _mm_cvttss_si32(a: __m128) -> i32; - fn _mm_cvtt_ss2si(a: __m128) -> i32; - fn _mm_cvtss_f32(a: __m128) -> f32; - fn _mm_cvtsi32_ss(a: __m128, b: i32) -> __m128; - fn _mm_cvt_si2ss(a: __m128, b: i32) -> __m128; - fn _mm_set_ss(a: f32) -> __m128; - fn _mm_set1_ps(a: f32) -> __m128; - fn _mm_set_ps1(a: f32) -> __m128; - fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128; - fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128; - fn _mm_setzero_ps() -> __m128; - fn _mm_shuffle_ps(a: __m128, b: __m128) -> __m128; - fn _mm_unpackhi_ps(a: __m128, b: __m128) -> __m128; - fn _mm_unpacklo_ps(a: __m128, b: __m128) -> __m128; - fn _mm_movehl_ps(a: __m128, b: __m128) -> __m128; - fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128; - fn _mm_movemask_ps(a: __m128) -> i32; - unsafe fn _mm_load_ss(p: *const f32) -> __m128; - unsafe fn _mm_load1_ps(p: *const f32) -> __m128; - unsafe fn _mm_load_ps1(p: *const f32) -> __m128; - unsafe fn _mm_load_ps(p: *const f32) -> __m128; - unsafe fn _mm_loadu_ps(p: *const f32) -> __m128; - unsafe fn _mm_loadr_ps(p: *const f32) -> __m128; - unsafe fn _mm_loadu_si64(mem_addr: *const u8) -> __m128i; - unsafe fn _mm_store_ss(p: *mut f32, a: __m128); - unsafe fn _mm_store1_ps(p: *mut f32, a: __m128); - unsafe fn _mm_store_ps1(p: *mut f32, a: __m128); - unsafe fn _mm_store_ps(p: *mut f32, a: __m128); - unsafe fn _mm_storeu_ps(p: *mut f32, a: __m128); - unsafe fn _mm_storer_ps(p: *mut f32, a: __m128); - fn _mm_move_ss(a: __m128, b: __m128) -> __m128; - fn _mm_sfence(); - #[expect(clippy::not_unsafe_ptr_arg_deref, reason="Prefetch has no preconditions, so is valid to accept a pointer.")] - fn _mm_prefetch(p: *const i8); - fn _mm_undefined_ps() -> __m128; - #[allow(non_snake_case)] - fn _MM_TRANSPOSE4_PS( - row0: &mut __m128, - row1: &mut __m128, - row2: &mut __m128, - row3: &mut __m128, - ); - unsafe fn _mm_stream_ps(mem_addr: *mut f32, a: __m128); + #[doc = "See [`arch::_mm_add_ss`]."] + #[inline(always)] + pub fn _mm_add_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_add_ss(a, b) } + } + #[doc = "See [`arch::_mm_add_ps`]."] + #[inline(always)] + pub fn _mm_add_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_add_ps(a, b) } + } + #[doc = "See [`arch::_mm_sub_ss`]."] + #[inline(always)] + pub fn _mm_sub_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_sub_ss(a, b) } + } + #[doc = "See [`arch::_mm_sub_ps`]."] + #[inline(always)] + pub fn _mm_sub_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_sub_ps(a, b) } + } + #[doc = "See [`arch::_mm_mul_ss`]."] + #[inline(always)] + pub fn _mm_mul_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_mul_ss(a, b) } + } + #[doc = "See [`arch::_mm_mul_ps`]."] + #[inline(always)] + pub fn _mm_mul_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_mul_ps(a, b) } + } + #[doc = "See [`arch::_mm_div_ss`]."] + #[inline(always)] + pub fn _mm_div_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_div_ss(a, b) } + } + #[doc = "See [`arch::_mm_div_ps`]."] + #[inline(always)] + pub fn _mm_div_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_div_ps(a, b) } + } + #[doc = "See [`arch::_mm_sqrt_ss`]."] + #[inline(always)] + pub fn _mm_sqrt_ss(self, a: __m128) -> __m128 { + unsafe { _mm_sqrt_ss(a) } + } + #[doc = "See [`arch::_mm_sqrt_ps`]."] + #[inline(always)] + pub fn _mm_sqrt_ps(self, a: __m128) -> __m128 { + unsafe { _mm_sqrt_ps(a) } + } + #[doc = "See [`arch::_mm_rcp_ss`]."] + #[inline(always)] + pub fn _mm_rcp_ss(self, a: __m128) -> __m128 { + unsafe { _mm_rcp_ss(a) } + } + #[doc = "See [`arch::_mm_rcp_ps`]."] + #[inline(always)] + pub fn _mm_rcp_ps(self, a: __m128) -> __m128 { + unsafe { _mm_rcp_ps(a) } + } + #[doc = "See [`arch::_mm_rsqrt_ss`]."] + #[inline(always)] + pub fn _mm_rsqrt_ss(self, a: __m128) -> __m128 { + unsafe { _mm_rsqrt_ss(a) } + } + #[doc = "See [`arch::_mm_rsqrt_ps`]."] + #[inline(always)] + pub fn _mm_rsqrt_ps(self, a: __m128) -> __m128 { + unsafe { _mm_rsqrt_ps(a) } + } + #[doc = "See [`arch::_mm_min_ss`]."] + #[inline(always)] + pub fn _mm_min_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_min_ss(a, b) } + } + #[doc = "See [`arch::_mm_min_ps`]."] + #[inline(always)] + pub fn _mm_min_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_min_ps(a, b) } + } + #[doc = "See [`arch::_mm_max_ss`]."] + #[inline(always)] + pub fn _mm_max_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_max_ss(a, b) } + } + #[doc = "See [`arch::_mm_max_ps`]."] + #[inline(always)] + pub fn _mm_max_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_max_ps(a, b) } + } + #[doc = "See [`arch::_mm_and_ps`]."] + #[inline(always)] + pub fn _mm_and_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_and_ps(a, b) } + } + #[doc = "See [`arch::_mm_andnot_ps`]."] + #[inline(always)] + pub fn _mm_andnot_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_andnot_ps(a, b) } + } + #[doc = "See [`arch::_mm_or_ps`]."] + #[inline(always)] + pub fn _mm_or_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_or_ps(a, b) } + } + #[doc = "See [`arch::_mm_xor_ps`]."] + #[inline(always)] + pub fn _mm_xor_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_xor_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_ss`]."] + #[inline(always)] + pub fn _mm_cmpeq_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpeq_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmplt_ss`]."] + #[inline(always)] + pub fn _mm_cmplt_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmplt_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmple_ss`]."] + #[inline(always)] + pub fn _mm_cmple_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmple_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpgt_ss`]."] + #[inline(always)] + pub fn _mm_cmpgt_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpgt_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpge_ss`]."] + #[inline(always)] + pub fn _mm_cmpge_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpge_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpneq_ss`]."] + #[inline(always)] + pub fn _mm_cmpneq_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpneq_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpnlt_ss`]."] + #[inline(always)] + pub fn _mm_cmpnlt_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpnlt_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpnle_ss`]."] + #[inline(always)] + pub fn _mm_cmpnle_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpnle_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpngt_ss`]."] + #[inline(always)] + pub fn _mm_cmpngt_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpngt_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpnge_ss`]."] + #[inline(always)] + pub fn _mm_cmpnge_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpnge_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpord_ss`]."] + #[inline(always)] + pub fn _mm_cmpord_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpord_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpunord_ss`]."] + #[inline(always)] + pub fn _mm_cmpunord_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpunord_ss(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_ps`]."] + #[inline(always)] + pub fn _mm_cmpeq_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpeq_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmplt_ps`]."] + #[inline(always)] + pub fn _mm_cmplt_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmplt_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmple_ps`]."] + #[inline(always)] + pub fn _mm_cmple_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmple_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpgt_ps`]."] + #[inline(always)] + pub fn _mm_cmpgt_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpgt_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpge_ps`]."] + #[inline(always)] + pub fn _mm_cmpge_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpge_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpneq_ps`]."] + #[inline(always)] + pub fn _mm_cmpneq_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpneq_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpnlt_ps`]."] + #[inline(always)] + pub fn _mm_cmpnlt_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpnlt_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpnle_ps`]."] + #[inline(always)] + pub fn _mm_cmpnle_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpnle_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpngt_ps`]."] + #[inline(always)] + pub fn _mm_cmpngt_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpngt_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpnge_ps`]."] + #[inline(always)] + pub fn _mm_cmpnge_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpnge_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpord_ps`]."] + #[inline(always)] + pub fn _mm_cmpord_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpord_ps(a, b) } + } + #[doc = "See [`arch::_mm_cmpunord_ps`]."] + #[inline(always)] + pub fn _mm_cmpunord_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_cmpunord_ps(a, b) } + } + #[doc = "See [`arch::_mm_comieq_ss`]."] + #[inline(always)] + pub fn _mm_comieq_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_comieq_ss(a, b) } + } + #[doc = "See [`arch::_mm_comilt_ss`]."] + #[inline(always)] + pub fn _mm_comilt_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_comilt_ss(a, b) } + } + #[doc = "See [`arch::_mm_comile_ss`]."] + #[inline(always)] + pub fn _mm_comile_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_comile_ss(a, b) } + } + #[doc = "See [`arch::_mm_comigt_ss`]."] + #[inline(always)] + pub fn _mm_comigt_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_comigt_ss(a, b) } + } + #[doc = "See [`arch::_mm_comige_ss`]."] + #[inline(always)] + pub fn _mm_comige_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_comige_ss(a, b) } + } + #[doc = "See [`arch::_mm_comineq_ss`]."] + #[inline(always)] + pub fn _mm_comineq_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_comineq_ss(a, b) } + } + #[doc = "See [`arch::_mm_ucomieq_ss`]."] + #[inline(always)] + pub fn _mm_ucomieq_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_ucomieq_ss(a, b) } + } + #[doc = "See [`arch::_mm_ucomilt_ss`]."] + #[inline(always)] + pub fn _mm_ucomilt_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_ucomilt_ss(a, b) } + } + #[doc = "See [`arch::_mm_ucomile_ss`]."] + #[inline(always)] + pub fn _mm_ucomile_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_ucomile_ss(a, b) } + } + #[doc = "See [`arch::_mm_ucomigt_ss`]."] + #[inline(always)] + pub fn _mm_ucomigt_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_ucomigt_ss(a, b) } + } + #[doc = "See [`arch::_mm_ucomige_ss`]."] + #[inline(always)] + pub fn _mm_ucomige_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_ucomige_ss(a, b) } + } + #[doc = "See [`arch::_mm_ucomineq_ss`]."] + #[inline(always)] + pub fn _mm_ucomineq_ss(self, a: __m128, b: __m128) -> i32 { + unsafe { _mm_ucomineq_ss(a, b) } + } + #[doc = "See [`arch::_mm_cvtss_si32`]."] + #[inline(always)] + pub fn _mm_cvtss_si32(self, a: __m128) -> i32 { + unsafe { _mm_cvtss_si32(a) } + } + #[doc = "See [`arch::_mm_cvt_ss2si`]."] + #[inline(always)] + pub fn _mm_cvt_ss2si(self, a: __m128) -> i32 { + unsafe { _mm_cvt_ss2si(a) } + } + #[doc = "See [`arch::_mm_cvttss_si32`]."] + #[inline(always)] + pub fn _mm_cvttss_si32(self, a: __m128) -> i32 { + unsafe { _mm_cvttss_si32(a) } + } + #[doc = "See [`arch::_mm_cvtt_ss2si`]."] + #[inline(always)] + pub fn _mm_cvtt_ss2si(self, a: __m128) -> i32 { + unsafe { _mm_cvtt_ss2si(a) } + } + #[doc = "See [`arch::_mm_cvtss_f32`]."] + #[inline(always)] + pub fn _mm_cvtss_f32(self, a: __m128) -> f32 { + unsafe { _mm_cvtss_f32(a) } + } + #[doc = "See [`arch::_mm_cvtsi32_ss`]."] + #[inline(always)] + pub fn _mm_cvtsi32_ss(self, a: __m128, b: i32) -> __m128 { + unsafe { _mm_cvtsi32_ss(a, b) } + } + #[doc = "See [`arch::_mm_cvt_si2ss`]."] + #[inline(always)] + pub fn _mm_cvt_si2ss(self, a: __m128, b: i32) -> __m128 { + unsafe { _mm_cvt_si2ss(a, b) } + } + #[doc = "See [`arch::_mm_set_ss`]."] + #[inline(always)] + pub fn _mm_set_ss(self, a: f32) -> __m128 { + unsafe { _mm_set_ss(a) } + } + #[doc = "See [`arch::_mm_set1_ps`]."] + #[inline(always)] + pub fn _mm_set1_ps(self, a: f32) -> __m128 { + unsafe { _mm_set1_ps(a) } + } + #[doc = "See [`arch::_mm_set_ps1`]."] + #[inline(always)] + pub fn _mm_set_ps1(self, a: f32) -> __m128 { + unsafe { _mm_set_ps1(a) } + } + #[doc = "See [`arch::_mm_set_ps`]."] + #[inline(always)] + pub fn _mm_set_ps(self, a: f32, b: f32, c: f32, d: f32) -> __m128 { + unsafe { _mm_set_ps(a, b, c, d) } + } + #[doc = "See [`arch::_mm_setr_ps`]."] + #[inline(always)] + pub fn _mm_setr_ps(self, a: f32, b: f32, c: f32, d: f32) -> __m128 { + unsafe { _mm_setr_ps(a, b, c, d) } + } + #[doc = "See [`arch::_mm_setzero_ps`]."] + #[inline(always)] + pub fn _mm_setzero_ps(self) -> __m128 { + unsafe { _mm_setzero_ps() } + } + #[doc = "See [`arch::_mm_shuffle_ps`]."] + #[inline(always)] + pub fn _mm_shuffle_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_shuffle_ps::(a, b) } + } + #[doc = "See [`arch::_mm_unpackhi_ps`]."] + #[inline(always)] + pub fn _mm_unpackhi_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_unpackhi_ps(a, b) } + } + #[doc = "See [`arch::_mm_unpacklo_ps`]."] + #[inline(always)] + pub fn _mm_unpacklo_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_unpacklo_ps(a, b) } + } + #[doc = "See [`arch::_mm_movehl_ps`]."] + #[inline(always)] + pub fn _mm_movehl_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_movehl_ps(a, b) } + } + #[doc = "See [`arch::_mm_movelh_ps`]."] + #[inline(always)] + pub fn _mm_movelh_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_movelh_ps(a, b) } + } + #[doc = "See [`arch::_mm_movemask_ps`]."] + #[inline(always)] + pub fn _mm_movemask_ps(self, a: __m128) -> i32 { + unsafe { _mm_movemask_ps(a) } + } + #[doc = "See [`arch::_mm_load_ss`]."] + #[inline(always)] + pub unsafe fn _mm_load_ss(self, p: *const f32) -> __m128 { + unsafe { _mm_load_ss(p) } + } + #[doc = "See [`arch::_mm_load1_ps`]."] + #[inline(always)] + pub unsafe fn _mm_load1_ps(self, p: *const f32) -> __m128 { + unsafe { _mm_load1_ps(p) } + } + #[doc = "See [`arch::_mm_load_ps1`]."] + #[inline(always)] + pub unsafe fn _mm_load_ps1(self, p: *const f32) -> __m128 { + unsafe { _mm_load_ps1(p) } + } + #[doc = "See [`arch::_mm_load_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_load_ps(self, p: *const f32) -> __m128 { + unsafe { _mm_load_ps(p) } + } + #[doc = "See [`arch::_mm_loadu_ps`]."] + #[inline(always)] + pub unsafe fn _mm_loadu_ps(self, p: *const f32) -> __m128 { + unsafe { _mm_loadu_ps(p) } + } + #[doc = "See [`arch::_mm_loadr_ps`]."] + #[inline(always)] + pub unsafe fn _mm_loadr_ps(self, p: *const f32) -> __m128 { + unsafe { _mm_loadr_ps(p) } + } + #[doc = "See [`arch::_mm_store_ss`]."] + #[inline(always)] + pub unsafe fn _mm_store_ss(self, p: *mut f32, a: __m128) { + unsafe { _mm_store_ss(p, a) } + } + #[doc = "See [`arch::_mm_store1_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_store1_ps(self, p: *mut f32, a: __m128) { + unsafe { _mm_store1_ps(p, a) } + } + #[doc = "See [`arch::_mm_store_ps1`]."] + #[inline(always)] + pub unsafe fn _mm_store_ps1(self, p: *mut f32, a: __m128) { + unsafe { _mm_store_ps1(p, a) } + } + #[doc = "See [`arch::_mm_store_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_store_ps(self, p: *mut f32, a: __m128) { + unsafe { _mm_store_ps(p, a) } + } + #[doc = "See [`arch::_mm_storeu_ps`]."] + #[inline(always)] + pub unsafe fn _mm_storeu_ps(self, p: *mut f32, a: __m128) { + unsafe { _mm_storeu_ps(p, a) } + } + #[doc = "See [`arch::_mm_storer_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_storer_ps(self, p: *mut f32, a: __m128) { + unsafe { _mm_storer_ps(p, a) } + } + #[doc = "See [`arch::_mm_move_ss`]."] + #[inline(always)] + pub fn _mm_move_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_move_ss(a, b) } + } + #[doc = "See [`arch::_mm_sfence`]."] + #[inline(always)] + pub fn _mm_sfence(self) { + unsafe { _mm_sfence() } + } + #[doc = "See [`arch::_mm_getcsr`]."] + #[deprecated( + since = "1.75.0", + note = "see `_mm_getcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _mm_getcsr(self) -> u32 { + unsafe { _mm_getcsr() } + } + #[doc = "See [`arch::_mm_setcsr`]."] + #[deprecated( + since = "1.75.0", + note = "see `_mm_setcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _mm_setcsr(self, val: u32) { + unsafe { _mm_setcsr(val) } + } + #[doc = "See [`arch::_MM_GET_EXCEPTION_MASK`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_getcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_GET_EXCEPTION_MASK(self) -> u32 { + unsafe { _MM_GET_EXCEPTION_MASK() } + } + #[doc = "See [`arch::_MM_GET_EXCEPTION_STATE`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_getcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_GET_EXCEPTION_STATE(self) -> u32 { + unsafe { _MM_GET_EXCEPTION_STATE() } + } + #[doc = "See [`arch::_MM_GET_FLUSH_ZERO_MODE`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_getcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_GET_FLUSH_ZERO_MODE(self) -> u32 { + unsafe { _MM_GET_FLUSH_ZERO_MODE() } + } + #[doc = "See [`arch::_MM_GET_ROUNDING_MODE`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_getcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_GET_ROUNDING_MODE(self) -> u32 { + unsafe { _MM_GET_ROUNDING_MODE() } + } + #[doc = "See [`arch::_MM_SET_EXCEPTION_MASK`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_setcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_SET_EXCEPTION_MASK(self, x: u32) { + unsafe { _MM_SET_EXCEPTION_MASK(x) } + } + #[doc = "See [`arch::_MM_SET_EXCEPTION_STATE`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_setcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_SET_EXCEPTION_STATE(self, x: u32) { + unsafe { _MM_SET_EXCEPTION_STATE(x) } + } + #[doc = "See [`arch::_MM_SET_FLUSH_ZERO_MODE`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_setcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_SET_FLUSH_ZERO_MODE(self, x: u32) { + unsafe { _MM_SET_FLUSH_ZERO_MODE(x) } + } + #[doc = "See [`arch::_MM_SET_ROUNDING_MODE`]."] + #[allow(deprecated)] + #[allow(non_snake_case)] + #[deprecated( + since = "1.75.0", + note = "see `_mm_setcsr` documentation - use inline assembly instead" + )] + #[inline(always)] + pub unsafe fn _MM_SET_ROUNDING_MODE(self, x: u32) { + unsafe { _MM_SET_ROUNDING_MODE(x) } + } + #[doc = "See [`arch::_mm_prefetch`]."] + #[inline(always)] + pub fn _mm_prefetch(self, p: *const i8) { + unsafe { _mm_prefetch::(p) } + } + #[doc = "See [`arch::_mm_undefined_ps`]."] + #[inline(always)] + pub fn _mm_undefined_ps(self) -> __m128 { + unsafe { _mm_undefined_ps() } + } + #[doc = "See [`arch::_MM_TRANSPOSE4_PS`]."] + #[allow(non_snake_case)] + #[inline(always)] + pub fn _MM_TRANSPOSE4_PS( + self, + row0: &mut __m128, + row1: &mut __m128, + row2: &mut __m128, + row3: &mut __m128, + ) { + unsafe { _MM_TRANSPOSE4_PS(row0, row1, row2, row3) } + } + #[doc = "See [`arch::_mm_stream_ps`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_stream_ps(self, mem_addr: *mut f32, a: __m128) { + unsafe { _mm_stream_ps(mem_addr, a) } } } diff --git a/fearless_simd/src/core_arch/x86/sse2.rs b/fearless_simd/src/core_arch/x86/sse2.rs index cf320ef80..799e50fc4 100644 --- a/fearless_simd/src/core_arch/x86/sse2.rs +++ b/fearless_simd/src/core_arch/x86/sse2.rs @@ -1,299 +1,1175 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to SSE2 intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for SSE2 intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Sse2` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Sse2 { _private: (), } - -#[expect( +#[allow( clippy::missing_safety_doc, - reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" + reason = "The underlying functions have their own safety docs" )] impl Sse2 { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm_pause(); - unsafe fn _mm_clflush(p: *const u8); - fn _mm_lfence(); - fn _mm_mfence(); - fn _mm_add_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_add_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_add_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_add_epi64(a: __m128i, b: __m128i) -> __m128i; - fn _mm_adds_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_adds_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_adds_epu8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_adds_epu16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_avg_epu8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_avg_epu16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_madd_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_mulhi_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_mulhi_epu16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_mullo_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_mul_epu32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sad_epu8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sub_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sub_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sub_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sub_epi64(a: __m128i, b: __m128i) -> __m128i; - fn _mm_subs_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_subs_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_subs_epu8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_subs_epu16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_slli_si128(a: __m128i) -> __m128i; - fn _mm_bslli_si128(a: __m128i) -> __m128i; - fn _mm_bsrli_si128(a: __m128i) -> __m128i; - fn _mm_slli_epi16(a: __m128i) -> __m128i; - fn _mm_sll_epi16(a: __m128i, count: __m128i) -> __m128i; - fn _mm_slli_epi32(a: __m128i) -> __m128i; - fn _mm_sll_epi32(a: __m128i, count: __m128i) -> __m128i; - fn _mm_slli_epi64(a: __m128i) -> __m128i; - fn _mm_sll_epi64(a: __m128i, count: __m128i) -> __m128i; - fn _mm_srai_epi16(a: __m128i) -> __m128i; - fn _mm_sra_epi16(a: __m128i, count: __m128i) -> __m128i; - fn _mm_srai_epi32(a: __m128i) -> __m128i; - fn _mm_sra_epi32(a: __m128i, count: __m128i) -> __m128i; - fn _mm_srli_si128(a: __m128i) -> __m128i; - fn _mm_srli_epi16(a: __m128i) -> __m128i; - fn _mm_srl_epi16(a: __m128i, count: __m128i) -> __m128i; - fn _mm_srli_epi32(a: __m128i) -> __m128i; - fn _mm_srl_epi32(a: __m128i, count: __m128i) -> __m128i; - fn _mm_srli_epi64(a: __m128i) -> __m128i; - fn _mm_srl_epi64(a: __m128i, count: __m128i) -> __m128i; - fn _mm_and_si128(a: __m128i, b: __m128i) -> __m128i; - fn _mm_andnot_si128(a: __m128i, b: __m128i) -> __m128i; - fn _mm_or_si128(a: __m128i, b: __m128i) -> __m128i; - fn _mm_xor_si128(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpeq_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpeq_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpeq_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpgt_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpgt_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpgt_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmplt_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmplt_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmplt_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cvtepi32_pd(a: __m128i) -> __m128d; - fn _mm_cvtsi32_sd(a: __m128d, b: i32) -> __m128d; - fn _mm_cvtepi32_ps(a: __m128i) -> __m128; - fn _mm_cvtps_epi32(a: __m128) -> __m128i; - fn _mm_cvtsi32_si128(a: i32) -> __m128i; - fn _mm_cvtsi128_si32(a: __m128i) -> i32; - fn _mm_set_epi64x(e1: i64, e0: i64) -> __m128i; - fn _mm_set_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i; - fn _mm_set_epi16( - e7: i16, - e6: i16, - e5: i16, - e4: i16, - e3: i16, - e2: i16, - e1: i16, - e0: i16, - ) -> __m128i; - fn _mm_set_epi8( - e15: i8, - e14: i8, - e13: i8, - e12: i8, - e11: i8, - e10: i8, - e9: i8, - e8: i8, - e7: i8, - e6: i8, - e5: i8, - e4: i8, - e3: i8, - e2: i8, - e1: i8, - e0: i8, - ) -> __m128i; - fn _mm_set1_epi64x(a: i64) -> __m128i; - fn _mm_set1_epi32(a: i32) -> __m128i; - fn _mm_set1_epi16(a: i16) -> __m128i; - fn _mm_set1_epi8(a: i8) -> __m128i; - fn _mm_setr_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i; - fn _mm_setr_epi16( - e7: i16, - e6: i16, - e5: i16, - e4: i16, - e3: i16, - e2: i16, - e1: i16, - e0: i16, - ) -> __m128i; - fn _mm_setr_epi8( - e15: i8, - e14: i8, - e13: i8, - e12: i8, - e11: i8, - e10: i8, - e9: i8, - e8: i8, - e7: i8, - e6: i8, - e5: i8, - e4: i8, - e3: i8, - e2: i8, - e1: i8, - e0: i8, - ) -> __m128i; - fn _mm_setzero_si128() -> __m128i; - unsafe fn _mm_loadl_epi64(mem_addr: *const __m128i) -> __m128i; - unsafe fn _mm_load_si128(mem_addr: *const __m128i) -> __m128i; - unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i; - unsafe fn _mm_maskmoveu_si128(a: __m128i, mask: __m128i, mem_addr: *mut i8); - unsafe fn _mm_store_si128(mem_addr: *mut __m128i, a: __m128i); - unsafe fn _mm_storeu_si128(mem_addr: *mut __m128i, a: __m128i); - unsafe fn _mm_storel_epi64(mem_addr: *mut __m128i, a: __m128i); - unsafe fn _mm_stream_si128(mem_addr: *mut __m128i, a: __m128i); - unsafe fn _mm_stream_si32(mem_addr: *mut i32, a: i32); - fn _mm_move_epi64(a: __m128i) -> __m128i; - fn _mm_packs_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_packs_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_packus_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_extract_epi16(a: __m128i) -> i32; - fn _mm_insert_epi16(a: __m128i, i: i32) -> __m128i; - fn _mm_movemask_epi8(a: __m128i) -> i32; - fn _mm_shuffle_epi32(a: __m128i) -> __m128i; - fn _mm_shufflehi_epi16(a: __m128i) -> __m128i; - fn _mm_shufflelo_epi16(a: __m128i) -> __m128i; - fn _mm_unpackhi_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_unpackhi_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_unpackhi_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_unpackhi_epi64(a: __m128i, b: __m128i) -> __m128i; - fn _mm_unpacklo_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_unpacklo_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_unpacklo_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_unpacklo_epi64(a: __m128i, b: __m128i) -> __m128i; - fn _mm_add_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_add_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_div_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_div_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_max_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_max_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_min_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_min_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_mul_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_mul_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_sqrt_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_sqrt_pd(a: __m128d) -> __m128d; - fn _mm_sub_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_sub_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_and_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_andnot_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_or_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_xor_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpeq_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmplt_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmple_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpgt_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpge_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpord_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpunord_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpneq_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpnlt_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpnle_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpngt_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpnge_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpeq_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmplt_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmple_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpgt_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpge_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpord_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpunord_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpneq_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpnlt_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpnle_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpngt_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_cmpnge_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_comieq_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_comilt_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_comile_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_comigt_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_comige_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_comineq_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_ucomieq_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_ucomilt_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_ucomile_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_ucomigt_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_ucomige_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_ucomineq_sd(a: __m128d, b: __m128d) -> i32; - fn _mm_cvtpd_ps(a: __m128d) -> __m128; - fn _mm_cvtps_pd(a: __m128) -> __m128d; - fn _mm_cvtpd_epi32(a: __m128d) -> __m128i; - fn _mm_cvtsd_si32(a: __m128d) -> i32; - fn _mm_cvtsd_ss(a: __m128, b: __m128d) -> __m128; - fn _mm_cvtsd_f64(a: __m128d) -> f64; - fn _mm_cvtss_sd(a: __m128d, b: __m128) -> __m128d; - fn _mm_cvttpd_epi32(a: __m128d) -> __m128i; - fn _mm_cvttsd_si32(a: __m128d) -> i32; - fn _mm_cvttps_epi32(a: __m128) -> __m128i; - fn _mm_set_sd(a: f64) -> __m128d; - fn _mm_set1_pd(a: f64) -> __m128d; - fn _mm_set_pd1(a: f64) -> __m128d; - fn _mm_set_pd(a: f64, b: f64) -> __m128d; - fn _mm_setr_pd(a: f64, b: f64) -> __m128d; - fn _mm_setzero_pd() -> __m128d; - fn _mm_movemask_pd(a: __m128d) -> i32; - unsafe fn _mm_load_pd(mem_addr: *const f64) -> __m128d; - unsafe fn _mm_load_sd(mem_addr: *const f64) -> __m128d; - unsafe fn _mm_loadh_pd(a: __m128d, mem_addr: *const f64) -> __m128d; - unsafe fn _mm_loadl_pd(a: __m128d, mem_addr: *const f64) -> __m128d; - unsafe fn _mm_stream_pd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_store_sd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_store_pd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_storeu_pd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_store1_pd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_store_pd1(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_storer_pd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_storeh_pd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_storel_pd(mem_addr: *mut f64, a: __m128d); - unsafe fn _mm_load1_pd(mem_addr: *const f64) -> __m128d; - unsafe fn _mm_load_pd1(mem_addr: *const f64) -> __m128d; - unsafe fn _mm_loadr_pd(mem_addr: *const f64) -> __m128d; - unsafe fn _mm_loadu_pd(mem_addr: *const f64) -> __m128d; - fn _mm_shuffle_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_move_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_castpd_ps(a: __m128d) -> __m128; - fn _mm_castpd_si128(a: __m128d) -> __m128i; - fn _mm_castps_pd(a: __m128) -> __m128d; - fn _mm_castps_si128(a: __m128) -> __m128i; - fn _mm_castsi128_pd(a: __m128i) -> __m128d; - fn _mm_castsi128_ps(a: __m128i) -> __m128; - fn _mm_undefined_pd() -> __m128d; - fn _mm_undefined_si128() -> __m128i; - fn _mm_unpackhi_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_unpacklo_pd(a: __m128d, b: __m128d) -> __m128d; + #[doc = "See [`arch::_mm_clflush`]."] + #[inline(always)] + pub unsafe fn _mm_clflush(self, p: *const u8) { + unsafe { _mm_clflush(p) } + } + #[doc = "See [`arch::_mm_lfence`]."] + #[inline(always)] + pub fn _mm_lfence(self) { + unsafe { _mm_lfence() } + } + #[doc = "See [`arch::_mm_mfence`]."] + #[inline(always)] + pub fn _mm_mfence(self) { + unsafe { _mm_mfence() } + } + #[doc = "See [`arch::_mm_add_epi8`]."] + #[inline(always)] + pub fn _mm_add_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_add_epi8(a, b) } + } + #[doc = "See [`arch::_mm_add_epi16`]."] + #[inline(always)] + pub fn _mm_add_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_add_epi16(a, b) } + } + #[doc = "See [`arch::_mm_add_epi32`]."] + #[inline(always)] + pub fn _mm_add_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_add_epi32(a, b) } + } + #[doc = "See [`arch::_mm_add_epi64`]."] + #[inline(always)] + pub fn _mm_add_epi64(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_add_epi64(a, b) } + } + #[doc = "See [`arch::_mm_adds_epi8`]."] + #[inline(always)] + pub fn _mm_adds_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_adds_epi8(a, b) } + } + #[doc = "See [`arch::_mm_adds_epi16`]."] + #[inline(always)] + pub fn _mm_adds_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_adds_epi16(a, b) } + } + #[doc = "See [`arch::_mm_adds_epu8`]."] + #[inline(always)] + pub fn _mm_adds_epu8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_adds_epu8(a, b) } + } + #[doc = "See [`arch::_mm_adds_epu16`]."] + #[inline(always)] + pub fn _mm_adds_epu16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_adds_epu16(a, b) } + } + #[doc = "See [`arch::_mm_avg_epu8`]."] + #[inline(always)] + pub fn _mm_avg_epu8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_avg_epu8(a, b) } + } + #[doc = "See [`arch::_mm_avg_epu16`]."] + #[inline(always)] + pub fn _mm_avg_epu16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_avg_epu16(a, b) } + } + #[doc = "See [`arch::_mm_madd_epi16`]."] + #[inline(always)] + pub fn _mm_madd_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_madd_epi16(a, b) } + } + #[doc = "See [`arch::_mm_max_epi16`]."] + #[inline(always)] + pub fn _mm_max_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_max_epi16(a, b) } + } + #[doc = "See [`arch::_mm_max_epu8`]."] + #[inline(always)] + pub fn _mm_max_epu8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_max_epu8(a, b) } + } + #[doc = "See [`arch::_mm_min_epi16`]."] + #[inline(always)] + pub fn _mm_min_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_min_epi16(a, b) } + } + #[doc = "See [`arch::_mm_min_epu8`]."] + #[inline(always)] + pub fn _mm_min_epu8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_min_epu8(a, b) } + } + #[doc = "See [`arch::_mm_mulhi_epi16`]."] + #[inline(always)] + pub fn _mm_mulhi_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mulhi_epi16(a, b) } + } + #[doc = "See [`arch::_mm_mulhi_epu16`]."] + #[inline(always)] + pub fn _mm_mulhi_epu16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mulhi_epu16(a, b) } + } + #[doc = "See [`arch::_mm_mullo_epi16`]."] + #[inline(always)] + pub fn _mm_mullo_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mullo_epi16(a, b) } + } + #[doc = "See [`arch::_mm_mul_epu32`]."] + #[inline(always)] + pub fn _mm_mul_epu32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mul_epu32(a, b) } + } + #[doc = "See [`arch::_mm_sad_epu8`]."] + #[inline(always)] + pub fn _mm_sad_epu8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sad_epu8(a, b) } + } + #[doc = "See [`arch::_mm_sub_epi8`]."] + #[inline(always)] + pub fn _mm_sub_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sub_epi8(a, b) } + } + #[doc = "See [`arch::_mm_sub_epi16`]."] + #[inline(always)] + pub fn _mm_sub_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sub_epi16(a, b) } + } + #[doc = "See [`arch::_mm_sub_epi32`]."] + #[inline(always)] + pub fn _mm_sub_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sub_epi32(a, b) } + } + #[doc = "See [`arch::_mm_sub_epi64`]."] + #[inline(always)] + pub fn _mm_sub_epi64(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sub_epi64(a, b) } + } + #[doc = "See [`arch::_mm_subs_epi8`]."] + #[inline(always)] + pub fn _mm_subs_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_subs_epi8(a, b) } + } + #[doc = "See [`arch::_mm_subs_epi16`]."] + #[inline(always)] + pub fn _mm_subs_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_subs_epi16(a, b) } + } + #[doc = "See [`arch::_mm_subs_epu8`]."] + #[inline(always)] + pub fn _mm_subs_epu8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_subs_epu8(a, b) } + } + #[doc = "See [`arch::_mm_subs_epu16`]."] + #[inline(always)] + pub fn _mm_subs_epu16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_subs_epu16(a, b) } + } + #[doc = "See [`arch::_mm_slli_si128`]."] + #[inline(always)] + pub fn _mm_slli_si128(self, a: __m128i) -> __m128i { + unsafe { _mm_slli_si128::(a) } + } + #[doc = "See [`arch::_mm_bslli_si128`]."] + #[inline(always)] + pub fn _mm_bslli_si128(self, a: __m128i) -> __m128i { + unsafe { _mm_bslli_si128::(a) } + } + #[doc = "See [`arch::_mm_bsrli_si128`]."] + #[inline(always)] + pub fn _mm_bsrli_si128(self, a: __m128i) -> __m128i { + unsafe { _mm_bsrli_si128::(a) } + } + #[doc = "See [`arch::_mm_slli_epi16`]."] + #[inline(always)] + pub fn _mm_slli_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_slli_epi16::(a) } + } + #[doc = "See [`arch::_mm_sll_epi16`]."] + #[inline(always)] + pub fn _mm_sll_epi16(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_sll_epi16(a, count) } + } + #[doc = "See [`arch::_mm_slli_epi32`]."] + #[inline(always)] + pub fn _mm_slli_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_slli_epi32::(a) } + } + #[doc = "See [`arch::_mm_sll_epi32`]."] + #[inline(always)] + pub fn _mm_sll_epi32(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_sll_epi32(a, count) } + } + #[doc = "See [`arch::_mm_slli_epi64`]."] + #[inline(always)] + pub fn _mm_slli_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_slli_epi64::(a) } + } + #[doc = "See [`arch::_mm_sll_epi64`]."] + #[inline(always)] + pub fn _mm_sll_epi64(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_sll_epi64(a, count) } + } + #[doc = "See [`arch::_mm_srai_epi16`]."] + #[inline(always)] + pub fn _mm_srai_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_srai_epi16::(a) } + } + #[doc = "See [`arch::_mm_sra_epi16`]."] + #[inline(always)] + pub fn _mm_sra_epi16(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_sra_epi16(a, count) } + } + #[doc = "See [`arch::_mm_srai_epi32`]."] + #[inline(always)] + pub fn _mm_srai_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_srai_epi32::(a) } + } + #[doc = "See [`arch::_mm_sra_epi32`]."] + #[inline(always)] + pub fn _mm_sra_epi32(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_sra_epi32(a, count) } + } + #[doc = "See [`arch::_mm_srli_si128`]."] + #[inline(always)] + pub fn _mm_srli_si128(self, a: __m128i) -> __m128i { + unsafe { _mm_srli_si128::(a) } + } + #[doc = "See [`arch::_mm_srli_epi16`]."] + #[inline(always)] + pub fn _mm_srli_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_srli_epi16::(a) } + } + #[doc = "See [`arch::_mm_srl_epi16`]."] + #[inline(always)] + pub fn _mm_srl_epi16(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_srl_epi16(a, count) } + } + #[doc = "See [`arch::_mm_srli_epi32`]."] + #[inline(always)] + pub fn _mm_srli_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_srli_epi32::(a) } + } + #[doc = "See [`arch::_mm_srl_epi32`]."] + #[inline(always)] + pub fn _mm_srl_epi32(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_srl_epi32(a, count) } + } + #[doc = "See [`arch::_mm_srli_epi64`]."] + #[inline(always)] + pub fn _mm_srli_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_srli_epi64::(a) } + } + #[doc = "See [`arch::_mm_srl_epi64`]."] + #[inline(always)] + pub fn _mm_srl_epi64(self, a: __m128i, count: __m128i) -> __m128i { + unsafe { _mm_srl_epi64(a, count) } + } + #[doc = "See [`arch::_mm_and_si128`]."] + #[inline(always)] + pub fn _mm_and_si128(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_and_si128(a, b) } + } + #[doc = "See [`arch::_mm_andnot_si128`]."] + #[inline(always)] + pub fn _mm_andnot_si128(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_andnot_si128(a, b) } + } + #[doc = "See [`arch::_mm_or_si128`]."] + #[inline(always)] + pub fn _mm_or_si128(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_or_si128(a, b) } + } + #[doc = "See [`arch::_mm_xor_si128`]."] + #[inline(always)] + pub fn _mm_xor_si128(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_xor_si128(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_epi8`]."] + #[inline(always)] + pub fn _mm_cmpeq_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpeq_epi8(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_epi16`]."] + #[inline(always)] + pub fn _mm_cmpeq_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpeq_epi16(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_epi32`]."] + #[inline(always)] + pub fn _mm_cmpeq_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpeq_epi32(a, b) } + } + #[doc = "See [`arch::_mm_cmpgt_epi8`]."] + #[inline(always)] + pub fn _mm_cmpgt_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpgt_epi8(a, b) } + } + #[doc = "See [`arch::_mm_cmpgt_epi16`]."] + #[inline(always)] + pub fn _mm_cmpgt_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpgt_epi16(a, b) } + } + #[doc = "See [`arch::_mm_cmpgt_epi32`]."] + #[inline(always)] + pub fn _mm_cmpgt_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpgt_epi32(a, b) } + } + #[doc = "See [`arch::_mm_cmplt_epi8`]."] + #[inline(always)] + pub fn _mm_cmplt_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmplt_epi8(a, b) } + } + #[doc = "See [`arch::_mm_cmplt_epi16`]."] + #[inline(always)] + pub fn _mm_cmplt_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmplt_epi16(a, b) } + } + #[doc = "See [`arch::_mm_cmplt_epi32`]."] + #[inline(always)] + pub fn _mm_cmplt_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmplt_epi32(a, b) } + } + #[doc = "See [`arch::_mm_cvtepi32_pd`]."] + #[inline(always)] + pub fn _mm_cvtepi32_pd(self, a: __m128i) -> __m128d { + unsafe { _mm_cvtepi32_pd(a) } + } + #[doc = "See [`arch::_mm_cvtsi32_sd`]."] + #[inline(always)] + pub fn _mm_cvtsi32_sd(self, a: __m128d, b: i32) -> __m128d { + unsafe { _mm_cvtsi32_sd(a, b) } + } + #[doc = "See [`arch::_mm_cvtepi32_ps`]."] + #[inline(always)] + pub fn _mm_cvtepi32_ps(self, a: __m128i) -> __m128 { + unsafe { _mm_cvtepi32_ps(a) } + } + #[doc = "See [`arch::_mm_cvtps_epi32`]."] + #[inline(always)] + pub fn _mm_cvtps_epi32(self, a: __m128) -> __m128i { + unsafe { _mm_cvtps_epi32(a) } + } + #[doc = "See [`arch::_mm_cvtsi32_si128`]."] + #[inline(always)] + pub fn _mm_cvtsi32_si128(self, a: i32) -> __m128i { + unsafe { _mm_cvtsi32_si128(a) } + } + #[doc = "See [`arch::_mm_cvtsi128_si32`]."] + #[inline(always)] + pub fn _mm_cvtsi128_si32(self, a: __m128i) -> i32 { + unsafe { _mm_cvtsi128_si32(a) } + } + #[doc = "See [`arch::_mm_set_epi64x`]."] + #[inline(always)] + pub fn _mm_set_epi64x(self, e1: i64, e0: i64) -> __m128i { + unsafe { _mm_set_epi64x(e1, e0) } + } + #[doc = "See [`arch::_mm_set_epi32`]."] + #[inline(always)] + pub fn _mm_set_epi32(self, e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { + unsafe { _mm_set_epi32(e3, e2, e1, e0) } + } + #[doc = "See [`arch::_mm_set_epi16`]."] + #[inline(always)] + pub fn _mm_set_epi16( + self, + e7: i16, + e6: i16, + e5: i16, + e4: i16, + e3: i16, + e2: i16, + e1: i16, + e0: i16, + ) -> __m128i { + unsafe { _mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0) } + } + #[doc = "See [`arch::_mm_set_epi8`]."] + #[inline(always)] + pub fn _mm_set_epi8( + self, + e15: i8, + e14: i8, + e13: i8, + e12: i8, + e11: i8, + e10: i8, + e9: i8, + e8: i8, + e7: i8, + e6: i8, + e5: i8, + e4: i8, + e3: i8, + e2: i8, + e1: i8, + e0: i8, + ) -> __m128i { + unsafe { + _mm_set_epi8( + e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0, + ) + } + } + #[doc = "See [`arch::_mm_set1_epi64x`]."] + #[inline(always)] + pub fn _mm_set1_epi64x(self, a: i64) -> __m128i { + unsafe { _mm_set1_epi64x(a) } + } + #[doc = "See [`arch::_mm_set1_epi32`]."] + #[inline(always)] + pub fn _mm_set1_epi32(self, a: i32) -> __m128i { + unsafe { _mm_set1_epi32(a) } + } + #[doc = "See [`arch::_mm_set1_epi16`]."] + #[inline(always)] + pub fn _mm_set1_epi16(self, a: i16) -> __m128i { + unsafe { _mm_set1_epi16(a) } + } + #[doc = "See [`arch::_mm_set1_epi8`]."] + #[inline(always)] + pub fn _mm_set1_epi8(self, a: i8) -> __m128i { + unsafe { _mm_set1_epi8(a) } + } + #[doc = "See [`arch::_mm_setr_epi32`]."] + #[inline(always)] + pub fn _mm_setr_epi32(self, e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { + unsafe { _mm_setr_epi32(e3, e2, e1, e0) } + } + #[doc = "See [`arch::_mm_setr_epi16`]."] + #[inline(always)] + pub fn _mm_setr_epi16( + self, + e7: i16, + e6: i16, + e5: i16, + e4: i16, + e3: i16, + e2: i16, + e1: i16, + e0: i16, + ) -> __m128i { + unsafe { _mm_setr_epi16(e7, e6, e5, e4, e3, e2, e1, e0) } + } + #[doc = "See [`arch::_mm_setr_epi8`]."] + #[inline(always)] + pub fn _mm_setr_epi8( + self, + e15: i8, + e14: i8, + e13: i8, + e12: i8, + e11: i8, + e10: i8, + e9: i8, + e8: i8, + e7: i8, + e6: i8, + e5: i8, + e4: i8, + e3: i8, + e2: i8, + e1: i8, + e0: i8, + ) -> __m128i { + unsafe { + _mm_setr_epi8( + e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0, + ) + } + } + #[doc = "See [`arch::_mm_setzero_si128`]."] + #[inline(always)] + pub fn _mm_setzero_si128(self) -> __m128i { + unsafe { _mm_setzero_si128() } + } + #[doc = "See [`arch::_mm_loadl_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_loadl_epi64(self, mem_addr: *const __m128i) -> __m128i { + unsafe { _mm_loadl_epi64(mem_addr) } + } + #[doc = "See [`arch::_mm_load_si128`]."] + #[inline(always)] + pub unsafe fn _mm_load_si128(self, mem_addr: *const __m128i) -> __m128i { + unsafe { _mm_load_si128(mem_addr) } + } + #[doc = "See [`arch::_mm_loadu_si128`]."] + #[inline(always)] + pub unsafe fn _mm_loadu_si128(self, mem_addr: *const __m128i) -> __m128i { + unsafe { _mm_loadu_si128(mem_addr) } + } + #[doc = "See [`arch::_mm_maskmoveu_si128`]."] + #[inline(always)] + pub unsafe fn _mm_maskmoveu_si128(self, a: __m128i, mask: __m128i, mem_addr: *mut i8) { + unsafe { _mm_maskmoveu_si128(a, mask, mem_addr) } + } + #[doc = "See [`arch::_mm_store_si128`]."] + #[inline(always)] + pub unsafe fn _mm_store_si128(self, mem_addr: *mut __m128i, a: __m128i) { + unsafe { _mm_store_si128(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storeu_si128`]."] + #[inline(always)] + pub unsafe fn _mm_storeu_si128(self, mem_addr: *mut __m128i, a: __m128i) { + unsafe { _mm_storeu_si128(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storel_epi64`]."] + #[inline(always)] + pub unsafe fn _mm_storel_epi64(self, mem_addr: *mut __m128i, a: __m128i) { + unsafe { _mm_storel_epi64(mem_addr, a) } + } + #[doc = "See [`arch::_mm_stream_si128`]."] + #[inline(always)] + pub unsafe fn _mm_stream_si128(self, mem_addr: *mut __m128i, a: __m128i) { + unsafe { _mm_stream_si128(mem_addr, a) } + } + #[doc = "See [`arch::_mm_stream_si32`]."] + #[inline(always)] + pub unsafe fn _mm_stream_si32(self, mem_addr: *mut i32, a: i32) { + unsafe { _mm_stream_si32(mem_addr, a) } + } + #[doc = "See [`arch::_mm_move_epi64`]."] + #[inline(always)] + pub fn _mm_move_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_move_epi64(a) } + } + #[doc = "See [`arch::_mm_packs_epi16`]."] + #[inline(always)] + pub fn _mm_packs_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_packs_epi16(a, b) } + } + #[doc = "See [`arch::_mm_packs_epi32`]."] + #[inline(always)] + pub fn _mm_packs_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_packs_epi32(a, b) } + } + #[doc = "See [`arch::_mm_packus_epi16`]."] + #[inline(always)] + pub fn _mm_packus_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_packus_epi16(a, b) } + } + #[doc = "See [`arch::_mm_extract_epi16`]."] + #[inline(always)] + pub fn _mm_extract_epi16(self, a: __m128i) -> i32 { + unsafe { _mm_extract_epi16::(a) } + } + #[doc = "See [`arch::_mm_insert_epi16`]."] + #[inline(always)] + pub fn _mm_insert_epi16(self, a: __m128i, i: i32) -> __m128i { + unsafe { _mm_insert_epi16::(a, i) } + } + #[doc = "See [`arch::_mm_movemask_epi8`]."] + #[inline(always)] + pub fn _mm_movemask_epi8(self, a: __m128i) -> i32 { + unsafe { _mm_movemask_epi8(a) } + } + #[doc = "See [`arch::_mm_shuffle_epi32`]."] + #[inline(always)] + pub fn _mm_shuffle_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_shuffle_epi32::(a) } + } + #[doc = "See [`arch::_mm_shufflehi_epi16`]."] + #[inline(always)] + pub fn _mm_shufflehi_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_shufflehi_epi16::(a) } + } + #[doc = "See [`arch::_mm_shufflelo_epi16`]."] + #[inline(always)] + pub fn _mm_shufflelo_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_shufflelo_epi16::(a) } + } + #[doc = "See [`arch::_mm_unpackhi_epi8`]."] + #[inline(always)] + pub fn _mm_unpackhi_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpackhi_epi8(a, b) } + } + #[doc = "See [`arch::_mm_unpackhi_epi16`]."] + #[inline(always)] + pub fn _mm_unpackhi_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpackhi_epi16(a, b) } + } + #[doc = "See [`arch::_mm_unpackhi_epi32`]."] + #[inline(always)] + pub fn _mm_unpackhi_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpackhi_epi32(a, b) } + } + #[doc = "See [`arch::_mm_unpackhi_epi64`]."] + #[inline(always)] + pub fn _mm_unpackhi_epi64(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpackhi_epi64(a, b) } + } + #[doc = "See [`arch::_mm_unpacklo_epi8`]."] + #[inline(always)] + pub fn _mm_unpacklo_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpacklo_epi8(a, b) } + } + #[doc = "See [`arch::_mm_unpacklo_epi16`]."] + #[inline(always)] + pub fn _mm_unpacklo_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpacklo_epi16(a, b) } + } + #[doc = "See [`arch::_mm_unpacklo_epi32`]."] + #[inline(always)] + pub fn _mm_unpacklo_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpacklo_epi32(a, b) } + } + #[doc = "See [`arch::_mm_unpacklo_epi64`]."] + #[inline(always)] + pub fn _mm_unpacklo_epi64(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_unpacklo_epi64(a, b) } + } + #[doc = "See [`arch::_mm_add_sd`]."] + #[inline(always)] + pub fn _mm_add_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_add_sd(a, b) } + } + #[doc = "See [`arch::_mm_add_pd`]."] + #[inline(always)] + pub fn _mm_add_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_add_pd(a, b) } + } + #[doc = "See [`arch::_mm_div_sd`]."] + #[inline(always)] + pub fn _mm_div_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_div_sd(a, b) } + } + #[doc = "See [`arch::_mm_div_pd`]."] + #[inline(always)] + pub fn _mm_div_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_div_pd(a, b) } + } + #[doc = "See [`arch::_mm_max_sd`]."] + #[inline(always)] + pub fn _mm_max_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_max_sd(a, b) } + } + #[doc = "See [`arch::_mm_max_pd`]."] + #[inline(always)] + pub fn _mm_max_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_max_pd(a, b) } + } + #[doc = "See [`arch::_mm_min_sd`]."] + #[inline(always)] + pub fn _mm_min_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_min_sd(a, b) } + } + #[doc = "See [`arch::_mm_min_pd`]."] + #[inline(always)] + pub fn _mm_min_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_min_pd(a, b) } + } + #[doc = "See [`arch::_mm_mul_sd`]."] + #[inline(always)] + pub fn _mm_mul_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_mul_sd(a, b) } + } + #[doc = "See [`arch::_mm_mul_pd`]."] + #[inline(always)] + pub fn _mm_mul_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_mul_pd(a, b) } + } + #[doc = "See [`arch::_mm_sqrt_sd`]."] + #[inline(always)] + pub fn _mm_sqrt_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_sqrt_sd(a, b) } + } + #[doc = "See [`arch::_mm_sqrt_pd`]."] + #[inline(always)] + pub fn _mm_sqrt_pd(self, a: __m128d) -> __m128d { + unsafe { _mm_sqrt_pd(a) } + } + #[doc = "See [`arch::_mm_sub_sd`]."] + #[inline(always)] + pub fn _mm_sub_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_sub_sd(a, b) } + } + #[doc = "See [`arch::_mm_sub_pd`]."] + #[inline(always)] + pub fn _mm_sub_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_sub_pd(a, b) } + } + #[doc = "See [`arch::_mm_and_pd`]."] + #[inline(always)] + pub fn _mm_and_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_and_pd(a, b) } + } + #[doc = "See [`arch::_mm_andnot_pd`]."] + #[inline(always)] + pub fn _mm_andnot_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_andnot_pd(a, b) } + } + #[doc = "See [`arch::_mm_or_pd`]."] + #[inline(always)] + pub fn _mm_or_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_or_pd(a, b) } + } + #[doc = "See [`arch::_mm_xor_pd`]."] + #[inline(always)] + pub fn _mm_xor_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_xor_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_sd`]."] + #[inline(always)] + pub fn _mm_cmpeq_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpeq_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmplt_sd`]."] + #[inline(always)] + pub fn _mm_cmplt_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmplt_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmple_sd`]."] + #[inline(always)] + pub fn _mm_cmple_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmple_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpgt_sd`]."] + #[inline(always)] + pub fn _mm_cmpgt_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpgt_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpge_sd`]."] + #[inline(always)] + pub fn _mm_cmpge_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpge_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpord_sd`]."] + #[inline(always)] + pub fn _mm_cmpord_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpord_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpunord_sd`]."] + #[inline(always)] + pub fn _mm_cmpunord_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpunord_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpneq_sd`]."] + #[inline(always)] + pub fn _mm_cmpneq_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpneq_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpnlt_sd`]."] + #[inline(always)] + pub fn _mm_cmpnlt_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpnlt_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpnle_sd`]."] + #[inline(always)] + pub fn _mm_cmpnle_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpnle_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpngt_sd`]."] + #[inline(always)] + pub fn _mm_cmpngt_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpngt_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpnge_sd`]."] + #[inline(always)] + pub fn _mm_cmpnge_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpnge_sd(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_pd`]."] + #[inline(always)] + pub fn _mm_cmpeq_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpeq_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmplt_pd`]."] + #[inline(always)] + pub fn _mm_cmplt_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmplt_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmple_pd`]."] + #[inline(always)] + pub fn _mm_cmple_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmple_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpgt_pd`]."] + #[inline(always)] + pub fn _mm_cmpgt_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpgt_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpge_pd`]."] + #[inline(always)] + pub fn _mm_cmpge_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpge_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpord_pd`]."] + #[inline(always)] + pub fn _mm_cmpord_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpord_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpunord_pd`]."] + #[inline(always)] + pub fn _mm_cmpunord_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpunord_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpneq_pd`]."] + #[inline(always)] + pub fn _mm_cmpneq_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpneq_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpnlt_pd`]."] + #[inline(always)] + pub fn _mm_cmpnlt_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpnlt_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpnle_pd`]."] + #[inline(always)] + pub fn _mm_cmpnle_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpnle_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpngt_pd`]."] + #[inline(always)] + pub fn _mm_cmpngt_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpngt_pd(a, b) } + } + #[doc = "See [`arch::_mm_cmpnge_pd`]."] + #[inline(always)] + pub fn _mm_cmpnge_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_cmpnge_pd(a, b) } + } + #[doc = "See [`arch::_mm_comieq_sd`]."] + #[inline(always)] + pub fn _mm_comieq_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_comieq_sd(a, b) } + } + #[doc = "See [`arch::_mm_comilt_sd`]."] + #[inline(always)] + pub fn _mm_comilt_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_comilt_sd(a, b) } + } + #[doc = "See [`arch::_mm_comile_sd`]."] + #[inline(always)] + pub fn _mm_comile_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_comile_sd(a, b) } + } + #[doc = "See [`arch::_mm_comigt_sd`]."] + #[inline(always)] + pub fn _mm_comigt_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_comigt_sd(a, b) } + } + #[doc = "See [`arch::_mm_comige_sd`]."] + #[inline(always)] + pub fn _mm_comige_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_comige_sd(a, b) } + } + #[doc = "See [`arch::_mm_comineq_sd`]."] + #[inline(always)] + pub fn _mm_comineq_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_comineq_sd(a, b) } + } + #[doc = "See [`arch::_mm_ucomieq_sd`]."] + #[inline(always)] + pub fn _mm_ucomieq_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_ucomieq_sd(a, b) } + } + #[doc = "See [`arch::_mm_ucomilt_sd`]."] + #[inline(always)] + pub fn _mm_ucomilt_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_ucomilt_sd(a, b) } + } + #[doc = "See [`arch::_mm_ucomile_sd`]."] + #[inline(always)] + pub fn _mm_ucomile_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_ucomile_sd(a, b) } + } + #[doc = "See [`arch::_mm_ucomigt_sd`]."] + #[inline(always)] + pub fn _mm_ucomigt_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_ucomigt_sd(a, b) } + } + #[doc = "See [`arch::_mm_ucomige_sd`]."] + #[inline(always)] + pub fn _mm_ucomige_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_ucomige_sd(a, b) } + } + #[doc = "See [`arch::_mm_ucomineq_sd`]."] + #[inline(always)] + pub fn _mm_ucomineq_sd(self, a: __m128d, b: __m128d) -> i32 { + unsafe { _mm_ucomineq_sd(a, b) } + } + #[doc = "See [`arch::_mm_cvtpd_ps`]."] + #[inline(always)] + pub fn _mm_cvtpd_ps(self, a: __m128d) -> __m128 { + unsafe { _mm_cvtpd_ps(a) } + } + #[doc = "See [`arch::_mm_cvtps_pd`]."] + #[inline(always)] + pub fn _mm_cvtps_pd(self, a: __m128) -> __m128d { + unsafe { _mm_cvtps_pd(a) } + } + #[doc = "See [`arch::_mm_cvtpd_epi32`]."] + #[inline(always)] + pub fn _mm_cvtpd_epi32(self, a: __m128d) -> __m128i { + unsafe { _mm_cvtpd_epi32(a) } + } + #[doc = "See [`arch::_mm_cvtsd_si32`]."] + #[inline(always)] + pub fn _mm_cvtsd_si32(self, a: __m128d) -> i32 { + unsafe { _mm_cvtsd_si32(a) } + } + #[doc = "See [`arch::_mm_cvtsd_ss`]."] + #[inline(always)] + pub fn _mm_cvtsd_ss(self, a: __m128, b: __m128d) -> __m128 { + unsafe { _mm_cvtsd_ss(a, b) } + } + #[doc = "See [`arch::_mm_cvtsd_f64`]."] + #[inline(always)] + pub fn _mm_cvtsd_f64(self, a: __m128d) -> f64 { + unsafe { _mm_cvtsd_f64(a) } + } + #[doc = "See [`arch::_mm_cvtss_sd`]."] + #[inline(always)] + pub fn _mm_cvtss_sd(self, a: __m128d, b: __m128) -> __m128d { + unsafe { _mm_cvtss_sd(a, b) } + } + #[doc = "See [`arch::_mm_cvttpd_epi32`]."] + #[inline(always)] + pub fn _mm_cvttpd_epi32(self, a: __m128d) -> __m128i { + unsafe { _mm_cvttpd_epi32(a) } + } + #[doc = "See [`arch::_mm_cvttsd_si32`]."] + #[inline(always)] + pub fn _mm_cvttsd_si32(self, a: __m128d) -> i32 { + unsafe { _mm_cvttsd_si32(a) } + } + #[doc = "See [`arch::_mm_cvttps_epi32`]."] + #[inline(always)] + pub fn _mm_cvttps_epi32(self, a: __m128) -> __m128i { + unsafe { _mm_cvttps_epi32(a) } + } + #[doc = "See [`arch::_mm_set_sd`]."] + #[inline(always)] + pub fn _mm_set_sd(self, a: f64) -> __m128d { + unsafe { _mm_set_sd(a) } + } + #[doc = "See [`arch::_mm_set1_pd`]."] + #[inline(always)] + pub fn _mm_set1_pd(self, a: f64) -> __m128d { + unsafe { _mm_set1_pd(a) } + } + #[doc = "See [`arch::_mm_set_pd1`]."] + #[inline(always)] + pub fn _mm_set_pd1(self, a: f64) -> __m128d { + unsafe { _mm_set_pd1(a) } + } + #[doc = "See [`arch::_mm_set_pd`]."] + #[inline(always)] + pub fn _mm_set_pd(self, a: f64, b: f64) -> __m128d { + unsafe { _mm_set_pd(a, b) } + } + #[doc = "See [`arch::_mm_setr_pd`]."] + #[inline(always)] + pub fn _mm_setr_pd(self, a: f64, b: f64) -> __m128d { + unsafe { _mm_setr_pd(a, b) } + } + #[doc = "See [`arch::_mm_setzero_pd`]."] + #[inline(always)] + pub fn _mm_setzero_pd(self) -> __m128d { + unsafe { _mm_setzero_pd() } + } + #[doc = "See [`arch::_mm_movemask_pd`]."] + #[inline(always)] + pub fn _mm_movemask_pd(self, a: __m128d) -> i32 { + unsafe { _mm_movemask_pd(a) } + } + #[doc = "See [`arch::_mm_load_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_load_pd(self, mem_addr: *const f64) -> __m128d { + unsafe { _mm_load_pd(mem_addr) } + } + #[doc = "See [`arch::_mm_load_sd`]."] + #[inline(always)] + pub unsafe fn _mm_load_sd(self, mem_addr: *const f64) -> __m128d { + unsafe { _mm_load_sd(mem_addr) } + } + #[doc = "See [`arch::_mm_loadh_pd`]."] + #[inline(always)] + pub unsafe fn _mm_loadh_pd(self, a: __m128d, mem_addr: *const f64) -> __m128d { + unsafe { _mm_loadh_pd(a, mem_addr) } + } + #[doc = "See [`arch::_mm_loadl_pd`]."] + #[inline(always)] + pub unsafe fn _mm_loadl_pd(self, a: __m128d, mem_addr: *const f64) -> __m128d { + unsafe { _mm_loadl_pd(a, mem_addr) } + } + #[doc = "See [`arch::_mm_stream_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_stream_pd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_stream_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_store_sd`]."] + #[inline(always)] + pub unsafe fn _mm_store_sd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_store_sd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_store_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_store_pd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_store_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storeu_pd`]."] + #[inline(always)] + pub unsafe fn _mm_storeu_pd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_storeu_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storeu_si16`]."] + #[inline(always)] + pub unsafe fn _mm_storeu_si16(self, mem_addr: *mut u8, a: __m128i) { + unsafe { _mm_storeu_si16(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storeu_si32`]."] + #[inline(always)] + pub unsafe fn _mm_storeu_si32(self, mem_addr: *mut u8, a: __m128i) { + unsafe { _mm_storeu_si32(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storeu_si64`]."] + #[inline(always)] + pub unsafe fn _mm_storeu_si64(self, mem_addr: *mut u8, a: __m128i) { + unsafe { _mm_storeu_si64(mem_addr, a) } + } + #[doc = "See [`arch::_mm_store1_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_store1_pd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_store1_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_store_pd1`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_store_pd1(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_store_pd1(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storer_pd`]."] + #[allow(clippy::cast_ptr_alignment)] + #[inline(always)] + pub unsafe fn _mm_storer_pd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_storer_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storeh_pd`]."] + #[inline(always)] + pub unsafe fn _mm_storeh_pd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_storeh_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_storel_pd`]."] + #[inline(always)] + pub unsafe fn _mm_storel_pd(self, mem_addr: *mut f64, a: __m128d) { + unsafe { _mm_storel_pd(mem_addr, a) } + } + #[doc = "See [`arch::_mm_load1_pd`]."] + #[inline(always)] + pub unsafe fn _mm_load1_pd(self, mem_addr: *const f64) -> __m128d { + unsafe { _mm_load1_pd(mem_addr) } + } + #[doc = "See [`arch::_mm_load_pd1`]."] + #[inline(always)] + pub unsafe fn _mm_load_pd1(self, mem_addr: *const f64) -> __m128d { + unsafe { _mm_load_pd1(mem_addr) } + } + #[doc = "See [`arch::_mm_loadr_pd`]."] + #[inline(always)] + pub unsafe fn _mm_loadr_pd(self, mem_addr: *const f64) -> __m128d { + unsafe { _mm_loadr_pd(mem_addr) } + } + #[doc = "See [`arch::_mm_loadu_pd`]."] + #[inline(always)] + pub unsafe fn _mm_loadu_pd(self, mem_addr: *const f64) -> __m128d { + unsafe { _mm_loadu_pd(mem_addr) } + } + #[doc = "See [`arch::_mm_loadu_si16`]."] + #[inline(always)] + pub unsafe fn _mm_loadu_si16(self, mem_addr: *const u8) -> __m128i { + unsafe { _mm_loadu_si16(mem_addr) } + } + #[doc = "See [`arch::_mm_loadu_si32`]."] + #[inline(always)] + pub unsafe fn _mm_loadu_si32(self, mem_addr: *const u8) -> __m128i { + unsafe { _mm_loadu_si32(mem_addr) } + } + #[doc = "See [`arch::_mm_loadu_si64`]."] + #[inline(always)] + pub unsafe fn _mm_loadu_si64(self, mem_addr: *const u8) -> __m128i { + unsafe { _mm_loadu_si64(mem_addr) } + } + #[doc = "See [`arch::_mm_shuffle_pd`]."] + #[inline(always)] + pub fn _mm_shuffle_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_shuffle_pd::(a, b) } + } + #[doc = "See [`arch::_mm_move_sd`]."] + #[inline(always)] + pub fn _mm_move_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_move_sd(a, b) } + } + #[doc = "See [`arch::_mm_castpd_ps`]."] + #[inline(always)] + pub fn _mm_castpd_ps(self, a: __m128d) -> __m128 { + unsafe { _mm_castpd_ps(a) } + } + #[doc = "See [`arch::_mm_castpd_si128`]."] + #[inline(always)] + pub fn _mm_castpd_si128(self, a: __m128d) -> __m128i { + unsafe { _mm_castpd_si128(a) } + } + #[doc = "See [`arch::_mm_castps_pd`]."] + #[inline(always)] + pub fn _mm_castps_pd(self, a: __m128) -> __m128d { + unsafe { _mm_castps_pd(a) } + } + #[doc = "See [`arch::_mm_castps_si128`]."] + #[inline(always)] + pub fn _mm_castps_si128(self, a: __m128) -> __m128i { + unsafe { _mm_castps_si128(a) } + } + #[doc = "See [`arch::_mm_castsi128_pd`]."] + #[inline(always)] + pub fn _mm_castsi128_pd(self, a: __m128i) -> __m128d { + unsafe { _mm_castsi128_pd(a) } + } + #[doc = "See [`arch::_mm_castsi128_ps`]."] + #[inline(always)] + pub fn _mm_castsi128_ps(self, a: __m128i) -> __m128 { + unsafe { _mm_castsi128_ps(a) } + } + #[doc = "See [`arch::_mm_undefined_pd`]."] + #[inline(always)] + pub fn _mm_undefined_pd(self) -> __m128d { + unsafe { _mm_undefined_pd() } + } + #[doc = "See [`arch::_mm_undefined_si128`]."] + #[inline(always)] + pub fn _mm_undefined_si128(self) -> __m128i { + unsafe { _mm_undefined_si128() } + } + #[doc = "See [`arch::_mm_unpackhi_pd`]."] + #[inline(always)] + pub fn _mm_unpackhi_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_unpackhi_pd(a, b) } + } + #[doc = "See [`arch::_mm_unpacklo_pd`]."] + #[inline(always)] + pub fn _mm_unpacklo_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_unpacklo_pd(a, b) } } } diff --git a/fearless_simd/src/core_arch/x86/sse3.rs b/fearless_simd/src/core_arch/x86/sse3.rs index 45ec21aeb..c8c680789 100644 --- a/fearless_simd/src/core_arch/x86/sse3.rs +++ b/fearless_simd/src/core_arch/x86/sse3.rs @@ -1,48 +1,85 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to SSE3 intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for SSE3 intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Sse3` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Sse3 { _private: (), } - -#[expect( +#[allow( clippy::missing_safety_doc, - reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" + reason = "The underlying functions have their own safety docs" )] impl Sse3 { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm_addsub_ps(a: __m128, b: __m128) -> __m128; - fn _mm_addsub_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_hadd_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_hadd_ps(a: __m128, b: __m128) -> __m128; - fn _mm_hsub_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_hsub_ps(a: __m128, b: __m128) -> __m128; - unsafe fn _mm_lddqu_si128(mem_addr: *const __m128i) -> __m128i; - fn _mm_movedup_pd(a: __m128d) -> __m128d; - unsafe fn _mm_loaddup_pd(mem_addr: *const f64) -> __m128d; - fn _mm_movehdup_ps(a: __m128) -> __m128; - fn _mm_moveldup_ps(a: __m128) -> __m128; + #[doc = "See [`arch::_mm_addsub_ps`]."] + #[inline(always)] + pub fn _mm_addsub_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_addsub_ps(a, b) } + } + #[doc = "See [`arch::_mm_addsub_pd`]."] + #[inline(always)] + pub fn _mm_addsub_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_addsub_pd(a, b) } + } + #[doc = "See [`arch::_mm_hadd_pd`]."] + #[inline(always)] + pub fn _mm_hadd_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_hadd_pd(a, b) } + } + #[doc = "See [`arch::_mm_hadd_ps`]."] + #[inline(always)] + pub fn _mm_hadd_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_hadd_ps(a, b) } + } + #[doc = "See [`arch::_mm_hsub_pd`]."] + #[inline(always)] + pub fn _mm_hsub_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_hsub_pd(a, b) } + } + #[doc = "See [`arch::_mm_hsub_ps`]."] + #[inline(always)] + pub fn _mm_hsub_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_hsub_ps(a, b) } + } + #[doc = "See [`arch::_mm_lddqu_si128`]."] + #[inline(always)] + pub unsafe fn _mm_lddqu_si128(self, mem_addr: *const __m128i) -> __m128i { + unsafe { _mm_lddqu_si128(mem_addr) } + } + #[doc = "See [`arch::_mm_movedup_pd`]."] + #[inline(always)] + pub fn _mm_movedup_pd(self, a: __m128d) -> __m128d { + unsafe { _mm_movedup_pd(a) } + } + #[doc = "See [`arch::_mm_loaddup_pd`]."] + #[inline(always)] + pub unsafe fn _mm_loaddup_pd(self, mem_addr: *const f64) -> __m128d { + unsafe { _mm_loaddup_pd(mem_addr) } + } + #[doc = "See [`arch::_mm_movehdup_ps`]."] + #[inline(always)] + pub fn _mm_movehdup_ps(self, a: __m128) -> __m128 { + unsafe { _mm_movehdup_ps(a) } + } + #[doc = "See [`arch::_mm_moveldup_ps`]."] + #[inline(always)] + pub fn _mm_moveldup_ps(self, a: __m128) -> __m128 { + unsafe { _mm_moveldup_ps(a) } } } diff --git a/fearless_simd/src/core_arch/x86/sse4_1.rs b/fearless_simd/src/core_arch/x86/sse4_1.rs index 814e8d297..fcbff22ed 100644 --- a/fearless_simd/src/core_arch/x86/sse4_1.rs +++ b/fearless_simd/src/core_arch/x86/sse4_1.rs @@ -1,91 +1,325 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to SSE4.1 intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for SSE4.1 intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Sse4_1` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Sse4_1 { _private: (), } - +#[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" +)] impl Sse4_1 { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i; - fn _mm_blend_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_blendv_pd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d; - fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128; - fn _mm_blend_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_blend_ps(a: __m128, b: __m128) -> __m128; - fn _mm_extract_ps(a: __m128) -> i32; - fn _mm_extract_epi8(a: __m128i) -> i32; - fn _mm_extract_epi32(a: __m128i) -> i32; - fn _mm_insert_ps(a: __m128, b: __m128) -> __m128; - fn _mm_insert_epi8(a: __m128i, i: i32) -> __m128i; - fn _mm_insert_epi32(a: __m128i, i: i32) -> __m128i; - fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_min_epu32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_packus_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpeq_epi64(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cvtepi8_epi16(a: __m128i) -> __m128i; - fn _mm_cvtepi8_epi32(a: __m128i) -> __m128i; - fn _mm_cvtepi8_epi64(a: __m128i) -> __m128i; - fn _mm_cvtepi16_epi32(a: __m128i) -> __m128i; - fn _mm_cvtepi16_epi64(a: __m128i) -> __m128i; - fn _mm_cvtepi32_epi64(a: __m128i) -> __m128i; - fn _mm_cvtepu8_epi16(a: __m128i) -> __m128i; - fn _mm_cvtepu8_epi32(a: __m128i) -> __m128i; - fn _mm_cvtepu8_epi64(a: __m128i) -> __m128i; - fn _mm_cvtepu16_epi32(a: __m128i) -> __m128i; - fn _mm_cvtepu16_epi64(a: __m128i) -> __m128i; - fn _mm_cvtepu32_epi64(a: __m128i) -> __m128i; - fn _mm_dp_pd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_dp_ps(a: __m128, b: __m128) -> __m128; - fn _mm_floor_pd(a: __m128d) -> __m128d; - fn _mm_floor_ps(a: __m128) -> __m128; - fn _mm_floor_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_floor_ss(a: __m128, b: __m128) -> __m128; - fn _mm_ceil_pd(a: __m128d) -> __m128d; - fn _mm_ceil_ps(a: __m128) -> __m128; - fn _mm_ceil_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_ceil_ss(a: __m128, b: __m128) -> __m128; - fn _mm_round_pd(a: __m128d) -> __m128d; - fn _mm_round_ps(a: __m128) -> __m128; - fn _mm_round_sd(a: __m128d, b: __m128d) -> __m128d; - fn _mm_round_ss(a: __m128, b: __m128) -> __m128; - fn _mm_minpos_epu16(a: __m128i) -> __m128i; - fn _mm_mul_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_mullo_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_mpsadbw_epu8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_testz_si128(a: __m128i, mask: __m128i) -> i32; - fn _mm_testc_si128(a: __m128i, mask: __m128i) -> i32; - fn _mm_testnzc_si128(a: __m128i, mask: __m128i) -> i32; - fn _mm_test_all_zeros(a: __m128i, mask: __m128i) -> i32; - fn _mm_test_all_ones(a: __m128i) -> i32; - fn _mm_test_mix_ones_zeros(a: __m128i, mask: __m128i) -> i32; + #[doc = "See [`arch::_mm_blendv_epi8`]."] + #[inline(always)] + pub fn _mm_blendv_epi8(self, a: __m128i, b: __m128i, mask: __m128i) -> __m128i { + unsafe { _mm_blendv_epi8(a, b, mask) } + } + #[doc = "See [`arch::_mm_blend_epi16`]."] + #[inline(always)] + pub fn _mm_blend_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_blend_epi16::(a, b) } + } + #[doc = "See [`arch::_mm_blendv_pd`]."] + #[inline(always)] + pub fn _mm_blendv_pd(self, a: __m128d, b: __m128d, mask: __m128d) -> __m128d { + unsafe { _mm_blendv_pd(a, b, mask) } + } + #[doc = "See [`arch::_mm_blendv_ps`]."] + #[inline(always)] + pub fn _mm_blendv_ps(self, a: __m128, b: __m128, mask: __m128) -> __m128 { + unsafe { _mm_blendv_ps(a, b, mask) } + } + #[doc = "See [`arch::_mm_blend_pd`]."] + #[inline(always)] + pub fn _mm_blend_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_blend_pd::(a, b) } + } + #[doc = "See [`arch::_mm_blend_ps`]."] + #[inline(always)] + pub fn _mm_blend_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_blend_ps::(a, b) } + } + #[doc = "See [`arch::_mm_extract_ps`]."] + #[inline(always)] + pub fn _mm_extract_ps(self, a: __m128) -> i32 { + unsafe { _mm_extract_ps::(a) } + } + #[doc = "See [`arch::_mm_extract_epi8`]."] + #[inline(always)] + pub fn _mm_extract_epi8(self, a: __m128i) -> i32 { + unsafe { _mm_extract_epi8::(a) } + } + #[doc = "See [`arch::_mm_extract_epi32`]."] + #[inline(always)] + pub fn _mm_extract_epi32(self, a: __m128i) -> i32 { + unsafe { _mm_extract_epi32::(a) } + } + #[doc = "See [`arch::_mm_insert_ps`]."] + #[inline(always)] + pub fn _mm_insert_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_insert_ps::(a, b) } + } + #[doc = "See [`arch::_mm_insert_epi8`]."] + #[inline(always)] + pub fn _mm_insert_epi8(self, a: __m128i, i: i32) -> __m128i { + unsafe { _mm_insert_epi8::(a, i) } + } + #[doc = "See [`arch::_mm_insert_epi32`]."] + #[inline(always)] + pub fn _mm_insert_epi32(self, a: __m128i, i: i32) -> __m128i { + unsafe { _mm_insert_epi32::(a, i) } + } + #[doc = "See [`arch::_mm_max_epi8`]."] + #[inline(always)] + pub fn _mm_max_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_max_epi8(a, b) } + } + #[doc = "See [`arch::_mm_max_epu16`]."] + #[inline(always)] + pub fn _mm_max_epu16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_max_epu16(a, b) } + } + #[doc = "See [`arch::_mm_max_epi32`]."] + #[inline(always)] + pub fn _mm_max_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_max_epi32(a, b) } + } + #[doc = "See [`arch::_mm_max_epu32`]."] + #[inline(always)] + pub fn _mm_max_epu32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_max_epu32(a, b) } + } + #[doc = "See [`arch::_mm_min_epi8`]."] + #[inline(always)] + pub fn _mm_min_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_min_epi8(a, b) } + } + #[doc = "See [`arch::_mm_min_epu16`]."] + #[inline(always)] + pub fn _mm_min_epu16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_min_epu16(a, b) } + } + #[doc = "See [`arch::_mm_min_epi32`]."] + #[inline(always)] + pub fn _mm_min_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_min_epi32(a, b) } + } + #[doc = "See [`arch::_mm_min_epu32`]."] + #[inline(always)] + pub fn _mm_min_epu32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_min_epu32(a, b) } + } + #[doc = "See [`arch::_mm_packus_epi32`]."] + #[inline(always)] + pub fn _mm_packus_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_packus_epi32(a, b) } + } + #[doc = "See [`arch::_mm_cmpeq_epi64`]."] + #[inline(always)] + pub fn _mm_cmpeq_epi64(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpeq_epi64(a, b) } + } + #[doc = "See [`arch::_mm_cvtepi8_epi16`]."] + #[inline(always)] + pub fn _mm_cvtepi8_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepi8_epi16(a) } + } + #[doc = "See [`arch::_mm_cvtepi8_epi32`]."] + #[inline(always)] + pub fn _mm_cvtepi8_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepi8_epi32(a) } + } + #[doc = "See [`arch::_mm_cvtepi8_epi64`]."] + #[inline(always)] + pub fn _mm_cvtepi8_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepi8_epi64(a) } + } + #[doc = "See [`arch::_mm_cvtepi16_epi32`]."] + #[inline(always)] + pub fn _mm_cvtepi16_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepi16_epi32(a) } + } + #[doc = "See [`arch::_mm_cvtepi16_epi64`]."] + #[inline(always)] + pub fn _mm_cvtepi16_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepi16_epi64(a) } + } + #[doc = "See [`arch::_mm_cvtepi32_epi64`]."] + #[inline(always)] + pub fn _mm_cvtepi32_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepi32_epi64(a) } + } + #[doc = "See [`arch::_mm_cvtepu8_epi16`]."] + #[inline(always)] + pub fn _mm_cvtepu8_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepu8_epi16(a) } + } + #[doc = "See [`arch::_mm_cvtepu8_epi32`]."] + #[inline(always)] + pub fn _mm_cvtepu8_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepu8_epi32(a) } + } + #[doc = "See [`arch::_mm_cvtepu8_epi64`]."] + #[inline(always)] + pub fn _mm_cvtepu8_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepu8_epi64(a) } + } + #[doc = "See [`arch::_mm_cvtepu16_epi32`]."] + #[inline(always)] + pub fn _mm_cvtepu16_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepu16_epi32(a) } + } + #[doc = "See [`arch::_mm_cvtepu16_epi64`]."] + #[inline(always)] + pub fn _mm_cvtepu16_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepu16_epi64(a) } + } + #[doc = "See [`arch::_mm_cvtepu32_epi64`]."] + #[inline(always)] + pub fn _mm_cvtepu32_epi64(self, a: __m128i) -> __m128i { + unsafe { _mm_cvtepu32_epi64(a) } + } + #[doc = "See [`arch::_mm_dp_pd`]."] + #[inline(always)] + pub fn _mm_dp_pd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_dp_pd::(a, b) } + } + #[doc = "See [`arch::_mm_dp_ps`]."] + #[inline(always)] + pub fn _mm_dp_ps(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_dp_ps::(a, b) } + } + #[doc = "See [`arch::_mm_floor_pd`]."] + #[inline(always)] + pub fn _mm_floor_pd(self, a: __m128d) -> __m128d { + unsafe { _mm_floor_pd(a) } + } + #[doc = "See [`arch::_mm_floor_ps`]."] + #[inline(always)] + pub fn _mm_floor_ps(self, a: __m128) -> __m128 { + unsafe { _mm_floor_ps(a) } + } + #[doc = "See [`arch::_mm_floor_sd`]."] + #[inline(always)] + pub fn _mm_floor_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_floor_sd(a, b) } + } + #[doc = "See [`arch::_mm_floor_ss`]."] + #[inline(always)] + pub fn _mm_floor_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_floor_ss(a, b) } + } + #[doc = "See [`arch::_mm_ceil_pd`]."] + #[inline(always)] + pub fn _mm_ceil_pd(self, a: __m128d) -> __m128d { + unsafe { _mm_ceil_pd(a) } + } + #[doc = "See [`arch::_mm_ceil_ps`]."] + #[inline(always)] + pub fn _mm_ceil_ps(self, a: __m128) -> __m128 { + unsafe { _mm_ceil_ps(a) } + } + #[doc = "See [`arch::_mm_ceil_sd`]."] + #[inline(always)] + pub fn _mm_ceil_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_ceil_sd(a, b) } + } + #[doc = "See [`arch::_mm_ceil_ss`]."] + #[inline(always)] + pub fn _mm_ceil_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_ceil_ss(a, b) } + } + #[doc = "See [`arch::_mm_round_pd`]."] + #[inline(always)] + pub fn _mm_round_pd(self, a: __m128d) -> __m128d { + unsafe { _mm_round_pd::(a) } + } + #[doc = "See [`arch::_mm_round_ps`]."] + #[inline(always)] + pub fn _mm_round_ps(self, a: __m128) -> __m128 { + unsafe { _mm_round_ps::(a) } + } + #[doc = "See [`arch::_mm_round_sd`]."] + #[inline(always)] + pub fn _mm_round_sd(self, a: __m128d, b: __m128d) -> __m128d { + unsafe { _mm_round_sd::(a, b) } + } + #[doc = "See [`arch::_mm_round_ss`]."] + #[inline(always)] + pub fn _mm_round_ss(self, a: __m128, b: __m128) -> __m128 { + unsafe { _mm_round_ss::(a, b) } + } + #[doc = "See [`arch::_mm_minpos_epu16`]."] + #[inline(always)] + pub fn _mm_minpos_epu16(self, a: __m128i) -> __m128i { + unsafe { _mm_minpos_epu16(a) } + } + #[doc = "See [`arch::_mm_mul_epi32`]."] + #[inline(always)] + pub fn _mm_mul_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mul_epi32(a, b) } + } + #[doc = "See [`arch::_mm_mullo_epi32`]."] + #[inline(always)] + pub fn _mm_mullo_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mullo_epi32(a, b) } + } + #[doc = "See [`arch::_mm_mpsadbw_epu8`]."] + #[inline(always)] + pub fn _mm_mpsadbw_epu8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mpsadbw_epu8::(a, b) } + } + #[doc = "See [`arch::_mm_testz_si128`]."] + #[inline(always)] + pub fn _mm_testz_si128(self, a: __m128i, mask: __m128i) -> i32 { + unsafe { _mm_testz_si128(a, mask) } + } + #[doc = "See [`arch::_mm_testc_si128`]."] + #[inline(always)] + pub fn _mm_testc_si128(self, a: __m128i, mask: __m128i) -> i32 { + unsafe { _mm_testc_si128(a, mask) } + } + #[doc = "See [`arch::_mm_testnzc_si128`]."] + #[inline(always)] + pub fn _mm_testnzc_si128(self, a: __m128i, mask: __m128i) -> i32 { + unsafe { _mm_testnzc_si128(a, mask) } + } + #[doc = "See [`arch::_mm_test_all_zeros`]."] + #[inline(always)] + pub fn _mm_test_all_zeros(self, a: __m128i, mask: __m128i) -> i32 { + unsafe { _mm_test_all_zeros(a, mask) } + } + #[doc = "See [`arch::_mm_test_all_ones`]."] + #[inline(always)] + pub fn _mm_test_all_ones(self, a: __m128i) -> i32 { + unsafe { _mm_test_all_ones(a) } + } + #[doc = "See [`arch::_mm_test_mix_ones_zeros`]."] + #[inline(always)] + pub fn _mm_test_mix_ones_zeros(self, a: __m128i, mask: __m128i) -> i32 { + unsafe { _mm_test_mix_ones_zeros(a, mask) } + } + #[doc = "See [`arch::_mm_stream_load_si128`]."] + #[inline(always)] + pub unsafe fn _mm_stream_load_si128(self, mem_addr: *const __m128i) -> __m128i { + unsafe { _mm_stream_load_si128(mem_addr) } } } diff --git a/fearless_simd/src/core_arch/x86/sse4_2.rs b/fearless_simd/src/core_arch/x86/sse4_2.rs index 16f6d46a0..530524465 100644 --- a/fearless_simd/src/core_arch/x86/sse4_2.rs +++ b/fearless_simd/src/core_arch/x86/sse4_2.rs @@ -1,51 +1,126 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to SSE4.2 intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for SSE4.2 intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Sse4_2` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Sse4_2 { _private: (), } - +#[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" +)] impl Sse4_2 { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm_cmpistrm(a: __m128i, b: __m128i) -> __m128i; - fn _mm_cmpistri(a: __m128i, b: __m128i) -> i32; - fn _mm_cmpistrz(a: __m128i, b: __m128i) -> i32; - fn _mm_cmpistrc(a: __m128i, b: __m128i) -> i32; - fn _mm_cmpistrs(a: __m128i, b: __m128i) -> i32; - fn _mm_cmpistro(a: __m128i, b: __m128i) -> i32; - fn _mm_cmpistra(a: __m128i, b: __m128i) -> i32; - fn _mm_cmpestrm(a: __m128i, la: i32, b: __m128i, lb: i32) -> __m128i; - fn _mm_cmpestri(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32; - fn _mm_cmpestrz(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32; - fn _mm_cmpestrc(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32; - fn _mm_cmpestrs(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32; - fn _mm_cmpestro(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32; - fn _mm_cmpestra(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32; - fn _mm_crc32_u8(crc: u32, v: u8) -> u32; - fn _mm_crc32_u16(crc: u32, v: u16) -> u32; - fn _mm_crc32_u32(crc: u32, v: u32) -> u32; - fn _mm_cmpgt_epi64(a: __m128i, b: __m128i) -> __m128i; + #[doc = "See [`arch::_mm_cmpistrm`]."] + #[inline(always)] + pub fn _mm_cmpistrm(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpistrm::(a, b) } + } + #[doc = "See [`arch::_mm_cmpistri`]."] + #[inline(always)] + pub fn _mm_cmpistri(self, a: __m128i, b: __m128i) -> i32 { + unsafe { _mm_cmpistri::(a, b) } + } + #[doc = "See [`arch::_mm_cmpistrz`]."] + #[inline(always)] + pub fn _mm_cmpistrz(self, a: __m128i, b: __m128i) -> i32 { + unsafe { _mm_cmpistrz::(a, b) } + } + #[doc = "See [`arch::_mm_cmpistrc`]."] + #[inline(always)] + pub fn _mm_cmpistrc(self, a: __m128i, b: __m128i) -> i32 { + unsafe { _mm_cmpistrc::(a, b) } + } + #[doc = "See [`arch::_mm_cmpistrs`]."] + #[inline(always)] + pub fn _mm_cmpistrs(self, a: __m128i, b: __m128i) -> i32 { + unsafe { _mm_cmpistrs::(a, b) } + } + #[doc = "See [`arch::_mm_cmpistro`]."] + #[inline(always)] + pub fn _mm_cmpistro(self, a: __m128i, b: __m128i) -> i32 { + unsafe { _mm_cmpistro::(a, b) } + } + #[doc = "See [`arch::_mm_cmpistra`]."] + #[inline(always)] + pub fn _mm_cmpistra(self, a: __m128i, b: __m128i) -> i32 { + unsafe { _mm_cmpistra::(a, b) } + } + #[doc = "See [`arch::_mm_cmpestrm`]."] + #[inline(always)] + pub fn _mm_cmpestrm( + self, + a: __m128i, + la: i32, + b: __m128i, + lb: i32, + ) -> __m128i { + unsafe { _mm_cmpestrm::(a, la, b, lb) } + } + #[doc = "See [`arch::_mm_cmpestri`]."] + #[inline(always)] + pub fn _mm_cmpestri(self, a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { + unsafe { _mm_cmpestri::(a, la, b, lb) } + } + #[doc = "See [`arch::_mm_cmpestrz`]."] + #[inline(always)] + pub fn _mm_cmpestrz(self, a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { + unsafe { _mm_cmpestrz::(a, la, b, lb) } + } + #[doc = "See [`arch::_mm_cmpestrc`]."] + #[inline(always)] + pub fn _mm_cmpestrc(self, a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { + unsafe { _mm_cmpestrc::(a, la, b, lb) } + } + #[doc = "See [`arch::_mm_cmpestrs`]."] + #[inline(always)] + pub fn _mm_cmpestrs(self, a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { + unsafe { _mm_cmpestrs::(a, la, b, lb) } + } + #[doc = "See [`arch::_mm_cmpestro`]."] + #[inline(always)] + pub fn _mm_cmpestro(self, a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { + unsafe { _mm_cmpestro::(a, la, b, lb) } + } + #[doc = "See [`arch::_mm_cmpestra`]."] + #[inline(always)] + pub fn _mm_cmpestra(self, a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 { + unsafe { _mm_cmpestra::(a, la, b, lb) } + } + #[doc = "See [`arch::_mm_crc32_u8`]."] + #[inline(always)] + pub fn _mm_crc32_u8(self, crc: u32, v: u8) -> u32 { + unsafe { _mm_crc32_u8(crc, v) } + } + #[doc = "See [`arch::_mm_crc32_u16`]."] + #[inline(always)] + pub fn _mm_crc32_u16(self, crc: u32, v: u16) -> u32 { + unsafe { _mm_crc32_u16(crc, v) } + } + #[doc = "See [`arch::_mm_crc32_u32`]."] + #[inline(always)] + pub fn _mm_crc32_u32(self, crc: u32, v: u32) -> u32 { + unsafe { _mm_crc32_u32(crc, v) } + } + #[doc = "See [`arch::_mm_cmpgt_epi64`]."] + #[inline(always)] + pub fn _mm_cmpgt_epi64(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_cmpgt_epi64(a, b) } } } diff --git a/fearless_simd/src/core_arch/x86/ssse3.rs b/fearless_simd/src/core_arch/x86/ssse3.rs index 3314acc8c..d09b5ceab 100644 --- a/fearless_simd/src/core_arch/x86/ssse3.rs +++ b/fearless_simd/src/core_arch/x86/ssse3.rs @@ -1,49 +1,110 @@ -// Copyright 2024 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT -//! Access to SSSE3 intrinsics. +// This file is autogenerated by fearless_simd_gen -use crate::impl_macros::delegate; +use arch::*; #[cfg(target_arch = "x86")] use core::arch::x86 as arch; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as arch; - -use arch::*; - -/// A token for SSSE3 intrinsics on `x86` and `x86_64`. +#[doc = "A token for `Ssse3` intrinsics on `x86` and `x86_64`."] #[derive(Clone, Copy, Debug)] pub struct Ssse3 { _private: (), } - +#[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" +)] impl Ssse3 { - /// Create a SIMD token. - /// - /// # Safety - /// - /// The required CPU features must be available. + #[doc = r" Create a SIMD token."] + #[doc = r""] + #[doc = r" # Safety"] + #[doc = r""] + #[doc = r" The required CPU features must be available."] #[inline] pub const unsafe fn new_unchecked() -> Self { Self { _private: () } } - - delegate! { arch: - fn _mm_abs_epi8(a: __m128i) -> __m128i; - fn _mm_abs_epi16(a: __m128i) -> __m128i; - fn _mm_abs_epi32(a: __m128i) -> __m128i; - fn _mm_shuffle_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_alignr_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_hadd_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_hadds_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_hadd_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_hsub_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_hsubs_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_hsub_epi32(a: __m128i, b: __m128i) -> __m128i; - fn _mm_maddubs_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_mulhrs_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sign_epi8(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sign_epi16(a: __m128i, b: __m128i) -> __m128i; - fn _mm_sign_epi32(a: __m128i, b: __m128i) -> __m128i; + #[doc = "See [`arch::_mm_abs_epi8`]."] + #[inline(always)] + pub fn _mm_abs_epi8(self, a: __m128i) -> __m128i { + unsafe { _mm_abs_epi8(a) } + } + #[doc = "See [`arch::_mm_abs_epi16`]."] + #[inline(always)] + pub fn _mm_abs_epi16(self, a: __m128i) -> __m128i { + unsafe { _mm_abs_epi16(a) } + } + #[doc = "See [`arch::_mm_abs_epi32`]."] + #[inline(always)] + pub fn _mm_abs_epi32(self, a: __m128i) -> __m128i { + unsafe { _mm_abs_epi32(a) } + } + #[doc = "See [`arch::_mm_shuffle_epi8`]."] + #[inline(always)] + pub fn _mm_shuffle_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_shuffle_epi8(a, b) } + } + #[doc = "See [`arch::_mm_alignr_epi8`]."] + #[inline(always)] + pub fn _mm_alignr_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_alignr_epi8::(a, b) } + } + #[doc = "See [`arch::_mm_hadd_epi16`]."] + #[inline(always)] + pub fn _mm_hadd_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_hadd_epi16(a, b) } + } + #[doc = "See [`arch::_mm_hadds_epi16`]."] + #[inline(always)] + pub fn _mm_hadds_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_hadds_epi16(a, b) } + } + #[doc = "See [`arch::_mm_hadd_epi32`]."] + #[inline(always)] + pub fn _mm_hadd_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_hadd_epi32(a, b) } + } + #[doc = "See [`arch::_mm_hsub_epi16`]."] + #[inline(always)] + pub fn _mm_hsub_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_hsub_epi16(a, b) } + } + #[doc = "See [`arch::_mm_hsubs_epi16`]."] + #[inline(always)] + pub fn _mm_hsubs_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_hsubs_epi16(a, b) } + } + #[doc = "See [`arch::_mm_hsub_epi32`]."] + #[inline(always)] + pub fn _mm_hsub_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_hsub_epi32(a, b) } + } + #[doc = "See [`arch::_mm_maddubs_epi16`]."] + #[inline(always)] + pub fn _mm_maddubs_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_maddubs_epi16(a, b) } + } + #[doc = "See [`arch::_mm_mulhrs_epi16`]."] + #[inline(always)] + pub fn _mm_mulhrs_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_mulhrs_epi16(a, b) } + } + #[doc = "See [`arch::_mm_sign_epi8`]."] + #[inline(always)] + pub fn _mm_sign_epi8(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sign_epi8(a, b) } + } + #[doc = "See [`arch::_mm_sign_epi16`]."] + #[inline(always)] + pub fn _mm_sign_epi16(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sign_epi16(a, b) } + } + #[doc = "See [`arch::_mm_sign_epi32`]."] + #[inline(always)] + pub fn _mm_sign_epi32(self, a: __m128i, b: __m128i) -> __m128i { + unsafe { _mm_sign_epi32(a, b) } } } diff --git a/fearless_simd/src/generated/avx2.rs b/fearless_simd/src/generated/avx2.rs index 7bae56354..65c25f01c 100644 --- a/fearless_simd/src/generated/avx2.rs +++ b/fearless_simd/src/generated/avx2.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/generated/fallback.rs b/fearless_simd/src/generated/fallback.rs index d13424ddf..666c24ccf 100644 --- a/fearless_simd/src/generated/fallback.rs +++ b/fearless_simd/src/generated/fallback.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/generated/neon.rs b/fearless_simd/src/generated/neon.rs index 625ad67c8..6aacf9cbf 100644 --- a/fearless_simd/src/generated/neon.rs +++ b/fearless_simd/src/generated/neon.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/generated/ops.rs b/fearless_simd/src/generated/ops.rs index 1e6f40752..e6fc3572a 100644 --- a/fearless_simd/src/generated/ops.rs +++ b/fearless_simd/src/generated/ops.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/generated/simd_trait.rs b/fearless_simd/src/generated/simd_trait.rs index f16d4a6a6..899f0c242 100644 --- a/fearless_simd/src/generated/simd_trait.rs +++ b/fearless_simd/src/generated/simd_trait.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/generated/simd_types.rs b/fearless_simd/src/generated/simd_types.rs index 10b058deb..72883cb30 100644 --- a/fearless_simd/src/generated/simd_types.rs +++ b/fearless_simd/src/generated/simd_types.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/generated/sse4_2.rs b/fearless_simd/src/generated/sse4_2.rs index ca60398fe..1f82d9ef1 100644 --- a/fearless_simd/src/generated/sse4_2.rs +++ b/fearless_simd/src/generated/sse4_2.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/generated/wasm.rs b/fearless_simd/src/generated/wasm.rs index eb893a79c..aab5f54fa 100644 --- a/fearless_simd/src/generated/wasm.rs +++ b/fearless_simd/src/generated/wasm.rs @@ -1,4 +1,4 @@ -// Copyright 2025 the Fearless_SIMD Authors +// Copyright 2026 the Fearless_SIMD Authors // SPDX-License-Identifier: Apache-2.0 OR MIT // This file is autogenerated by fearless_simd_gen diff --git a/fearless_simd/src/impl_macros.rs b/fearless_simd/src/impl_macros.rs deleted file mode 100644 index d3940df6d..000000000 --- a/fearless_simd/src/impl_macros.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2024 the Fearless_SIMD Authors -// SPDX-License-Identifier: Apache-2.0 OR MIT - -//! Macros used by implementations - -#![allow( - unused_macros, - unused_imports, - reason = "Not all macros will be used by all implementations" -)] - -// Adapted from similar macro in pulp -macro_rules! delegate { - ( $prefix:path : $( - $(#[$attr: meta])* - $(unsafe $($placeholder: lifetime)?)? - fn $func: ident $(<$(const $generic: ident: $generic_ty: ty),* $(,)?>)?( - $($arg: ident: $ty: ty),* $(,)? - ) $(-> $ret: ty)?; - )*) => { - $( - #[doc=concat!("See [`", stringify!($prefix), "::", stringify!($func), "`].")] - $(#[$attr])* - #[inline(always)] - pub $(unsafe $($placeholder)?)? - fn $func $(<$(const $generic: $generic_ty),*>)?(self, $($arg: $ty),*) $(-> $ret)? { - unsafe { $func $(::<$($generic,)*>)?($($arg,)*) } - } - )* - }; -} -pub(crate) use delegate; diff --git a/fearless_simd/src/lib.rs b/fearless_simd/src/lib.rs index 9a078dfa5..2e3900f7c 100644 --- a/fearless_simd/src/lib.rs +++ b/fearless_simd/src/lib.rs @@ -145,7 +145,6 @@ compile_error!("fearless_simd requires either the `std` or `libm` feature"); use libm as _; pub mod core_arch; -mod impl_macros; mod generated; mod macros; diff --git a/fearless_simd_gen/Cargo.toml b/fearless_simd_gen/Cargo.toml index 1bc71f5a5..e07d92f66 100644 --- a/fearless_simd_gen/Cargo.toml +++ b/fearless_simd_gen/Cargo.toml @@ -19,3 +19,4 @@ anyhow = "1.0.100" proc-macro2 = "1.0.94" quote = "1.0.40" clap = { version = "4.5.39", features = ["derive"] } +syn = { version = "2.0", features = ["full", "parsing", "visit"] } diff --git a/fearless_simd_gen/src/main.rs b/fearless_simd_gen/src/main.rs index 10efdfd99..e9aa27c0b 100644 --- a/fearless_simd_gen/src/main.rs +++ b/fearless_simd_gen/src/main.rs @@ -6,16 +6,17 @@ reason = "TODO: https://github.com/linebender/fearless_simd/issues/40" )] -use std::{fs::File, io::Write, path::Path}; +use std::{fs::File, path::Path}; -use clap::{Parser, ValueEnum}; +use clap::{Parser, Subcommand, ValueEnum}; use proc_macro2::TokenStream; -use crate::level::Level as _; +use crate::{level::Level as _, util::write_code}; mod arch; mod generic; mod level; +mod mk_core_arch; mod mk_fallback; mod mk_neon; mod mk_ops; @@ -24,7 +25,9 @@ mod mk_simd_types; mod mk_wasm; mod mk_x86; mod ops; +mod parse_stdarch; mod types; +mod util; #[derive(Clone, Copy, ValueEnum, Debug)] enum Module { @@ -53,6 +56,15 @@ enum Module { struct Cli { #[arg(short, long, help = "Generate a specific module and print to stdout")] module: Option, + + #[command(subcommand)] + command: Option, +} + +#[derive(Subcommand)] +enum Command { + /// Generate `core_arch` wrappers from stdarch source for all architectures. + CoreArch, } impl Module { @@ -69,32 +81,6 @@ impl Module { } } - fn generate(self, out: impl Into) { - let code = self.generate_code(); - let mut child = std::process::Command::new("rustfmt") - .stdin(std::process::Stdio::piped()) - .stdout(out) - .spawn() - .expect("`rustfmt` should spawn"); - let mut stdin = child.stdin.take().unwrap(); - stdin - .write_all( - format!( - r#" -// Copyright 2025 the Fearless_SIMD Authors -// SPDX-License-Identifier: Apache-2.0 OR MIT - -// This file is autogenerated by fearless_simd_gen - -{code}"# - ) - .as_bytes(), - ) - .unwrap(); - drop(stdin); - child.wait().expect("`rustfmt` should succeed"); - } - fn file_base(self) -> &'static str { match self { Self::SimdTypes => "simd_types", @@ -121,11 +107,35 @@ const MODULES: &[Module] = &[ ]; const FILE_BASE: &str = "./fearless_simd/src/generated"; +const CORE_ARCH_BASE: &str = "./fearless_simd/src/core_arch"; +const STDARCH_BASE: &str = "./fearless_simd_gen/stdarch"; fn main() { let cli = Cli::parse(); + + // Handle subcommands first + if let Some(command) = cli.command { + match command { + Command::CoreArch => { + let base_dir = Path::new(CORE_ARCH_BASE); + let stdarch_dir = Path::new(STDARCH_BASE); + if !base_dir.is_dir() || !stdarch_dir.is_dir() { + panic!("run in fearless_simd top directory"); + } + let stdarch_is_empty = std::fs::read_dir(stdarch_dir).unwrap().next().is_none(); + if stdarch_is_empty { + panic!( + "`stdarch` submodule is empty. Initialize or update your git submodules." + ); + } + generate_core_arch("./fearless_simd_gen/stdarch", CORE_ARCH_BASE); + return; + } + } + } + if let Some(module) = cli.module { - module.generate(std::process::Stdio::inherit()); + write_code(module.generate_code(), std::process::Stdio::inherit()); } else { // generate all modules let base_dir = Path::new(FILE_BASE); @@ -135,8 +145,24 @@ fn main() { for module in MODULES { let name = module.file_base(); let path = base_dir.join(format!("{name}.rs")); - let file = File::create(&path).expect("error creating {path:?}"); - module.generate(file); + let file = File::create(&path).unwrap_or_else(|_| panic!("error creating {path:?}")); + write_code(module.generate_code(), file); } } } + +fn generate_core_arch(stdarch_path: &str, output_base: &str) { + let stdarch_root = Path::new(stdarch_path); + let output_base = Path::new(output_base); + + if !stdarch_root.exists() { + eprintln!( + "Error: stdarch directory not found at {}", + stdarch_root.display() + ); + eprintln!("Please provide a valid path to the stdarch repository."); + std::process::exit(1); + } + + mk_core_arch::generate_all_modules(stdarch_root, output_base); +} diff --git a/fearless_simd_gen/src/mk_core_arch.rs b/fearless_simd_gen/src/mk_core_arch.rs new file mode 100644 index 000000000..344ab7bc6 --- /dev/null +++ b/fearless_simd_gen/src/mk_core_arch.rs @@ -0,0 +1,335 @@ +// Copyright 2026 the Fearless_SIMD Authors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Generate `core_arch` wrapper modules from parsed stdarch intrinsics. + +use std::{ + fs::{self, File}, + path::Path, +}; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::{ToTokens, quote}; + +use crate::util::write_code; + +/// Target architecture for code generation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum TargetArch { + X86, + Aarch64, + Wasm32, +} + +const ALL_ARCHS: &[TargetArch] = &[TargetArch::X86, TargetArch::Aarch64, TargetArch::Wasm32]; + +impl TargetArch { + fn name(self) -> &'static str { + match self { + Self::X86 => "x86", + Self::Aarch64 => "aarch64", + Self::Wasm32 => "wasm32", + } + } + + fn parse_config(self) -> &'static crate::parse_stdarch::ArchConfig { + match self { + Self::X86 => &crate::parse_stdarch::X86_CONFIG, + Self::Aarch64 => &crate::parse_stdarch::AARCH64_CONFIG, + Self::Wasm32 => &crate::parse_stdarch::WASM32_CONFIG, + } + } + + fn feature_module_config(self, feature: &str) -> FeatureModuleConfig { + FeatureModuleConfig { + arch: self, + struct_name: if let Self::Wasm32 = self { + format!("Wasm{}", feature_to_struct_name(feature)) + } else { + feature_to_struct_name(feature) + }, + file_name: feature_to_file_name(feature), + } + } + + /// Generate the arch import statements (and any other necessary setup) for this target. + fn gen_prelude(self) -> TokenStream { + match self { + Self::X86 => quote! { + #[cfg(target_arch = "x86")] + use core::arch::x86 as arch; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64 as arch; + + use arch::*; + }, + Self::Aarch64 => quote! { + use core::arch::aarch64 as arch; + use arch::*; + + type p8 = u8; + type p16 = u16; + type p64 = u64; + type p128 = u128; + }, + Self::Wasm32 => quote! { + use core::arch::wasm32 as arch; + use arch::*; + }, + } + } + + /// Generate the doc string for struct tokens on this architecture. + fn arch_doc_suffix(self) -> &'static str { + match self { + Self::X86 => "`x86` and `x86_64`", + Self::Aarch64 => "`aarch64`", + Self::Wasm32 => "`wasm32`", + } + } + + /// Generate the constructor for this architecture. + /// + /// x86 and aarch64 require runtime feature detection, so the constructor is unsafe. + /// wasm32 has static feature detection, so the constructor is safe. + fn gen_constructor(self) -> TokenStream { + match self { + Self::X86 | Self::Aarch64 => quote! { + /// Create a SIMD token. + /// + /// # Safety + /// + /// The required CPU features must be available. + #[inline] + pub const unsafe fn new_unchecked() -> Self { + Self { _private: () } + } + }, + Self::Wasm32 => quote! { + /// Create a SIMD token. + #[inline] + #[expect( + clippy::new_without_default, + reason = "other architectures have unsafe `new_unchecked` constructors and cannot implement `Default`; for symmetry, we do not do so either" + )] + pub const fn new() -> Self { + Self { _private: () } + } + }, + } + } + + fn gen_arch_mod(&self, configs: &[FeatureModuleConfig]) -> TokenStream { + let arch_doc_suffix = self.arch_doc_suffix(); + let doc = format!("Access to intrinsics on {arch_doc_suffix}."); + + let mod_decls: Vec = configs + .iter() + .map(|config| { + let mod_name = Ident::new(&config.file_name, Span::call_site()); + quote! { mod #mod_name; } + }) + .collect(); + + let pub_uses: Vec = configs + .iter() + .map(|config| { + let mod_name = Ident::new(&config.file_name, Span::call_site()); + let struct_name = Ident::new(&config.struct_name, Span::call_site()); + quote! { pub use #mod_name::#struct_name; } + }) + .collect(); + + quote! { + #![doc = #doc] + + #(#mod_decls)* + + #(#pub_uses)* + } + } +} + +/// Configuration for generating a target feature module. +#[derive(Debug, Clone)] +pub(crate) struct FeatureModuleConfig { + /// The target architecture. + pub arch: TargetArch, + /// The struct name (e.g., `"Sse4_1"`, `"Neon"`). + pub struct_name: String, + /// The module file name (e.g., `"sse4_1"`, `"neon"`). + pub file_name: String, +} + +impl FeatureModuleConfig { + /// Generate a complete module for a target feature. + pub(crate) fn gen_feature_module(&self, intrinsics: &[syn::ItemFn]) -> TokenStream { + let struct_name = Ident::new(&self.struct_name, Span::call_site()); + let struct_name_str = &self.struct_name; + let arch_doc_suffix = self.arch.arch_doc_suffix(); + + // Generate method implementations + let methods: Vec = intrinsics.iter().map(gen_method).collect(); + + // Architecture-specific imports + other setup + let prelude = self.arch.gen_prelude(); + + // Architecture-specific constructor + let constructor = self.arch.gen_constructor(); + + let doc = format!("A token for `{struct_name_str}` intrinsics on {arch_doc_suffix}."); + + quote! { + #prelude + + #[doc = #doc] + #[derive(Clone, Copy, Debug)] + pub struct #struct_name { + _private: (), + } + + #[allow( + clippy::missing_safety_doc, + reason = "The underlying functions have their own safety docs" + )] + impl #struct_name { + #constructor + + #(#methods)* + } + } + } +} + +/// Convert a feature name to a struct name (e.g., `"sse4.1"` -> `"Sse4_1"`). +fn feature_to_struct_name(feature: &str) -> String { + let mut result = String::new(); + let mut capitalize_next = true; + + for c in feature.chars() { + if c == '.' || c == '_' || c == '-' { + result.push('_'); + capitalize_next = true; + } else if capitalize_next { + result.push(c.to_ascii_uppercase()); + capitalize_next = false; + } else { + result.push(c); + } + } + + result +} + +/// Convert a feature name to a file name (e.g., `"sse4.1"` -> `"sse4_1"`). +fn feature_to_file_name(feature: &str) -> String { + feature.replace(['.', '-'], "_") +} + +/// Generate a forwarding method for an intrinsic. +fn gen_method(item: &syn::ItemFn) -> TokenStream { + let forwarding_params: Vec<&Ident> = item + .sig + .generics + .params + .iter() + .filter_map(|p| match p { + syn::GenericParam::Lifetime(_) => None, + syn::GenericParam::Type(type_param) => Some(&type_param.ident), + syn::GenericParam::Const(const_param) => Some(&const_param.ident), + }) + .collect(); + let turbofish = if forwarding_params.is_empty() { + quote! {} + } else { + quote! { ::<#(#forwarding_params),*> } + }; + + // Parameters for the call + let mut param_names = Vec::new(); + for arg in &item.sig.inputs { + match arg { + syn::FnArg::Typed(pat) => match &*pat.pat { + syn::Pat::Ident(syn::PatIdent { ident, .. }) => { + param_names.push(ident); + } + _ => panic!( + "Unsupported non-ident parameter pattern in intrinsic wrapper: {}", + pat.pat.to_token_stream() + ), + }, + syn::FnArg::Receiver(receiver) => { + panic!( + "Unexpected receiver argument in intrinsic wrapper: {}", + receiver.to_token_stream() + ); + } + } + } + + let name = &item.sig.ident; + let unsafe_mod = &item.sig.unsafety; + let inputs = &item.sig.inputs; + let return_type = &item.sig.output; + + // Attributes that should be copied to the generated wrapper + let extra_attrs = item.attrs.iter().filter(|attr| { + attr.path().is_ident("expect") + || attr.path().is_ident("allow") + || attr.path().is_ident("deprecated") + }); + + // Doc comment linking to the underlying intrinsic + let doc = format!("See [`arch::{name}`]."); + + let (impl_generics, _, where_clause) = item.sig.generics.split_for_impl(); + quote! { + #[doc = #doc] + #(#extra_attrs)* + #[inline(always)] + pub #unsafe_mod fn #name #impl_generics (self, #inputs) #return_type #where_clause { + unsafe { #name #turbofish (#(#param_names,)*) } + } + } +} + +/// Generate all `core_arch` modules for all supported architectures. +pub(crate) fn generate_all_modules(stdarch_root: &Path, output_base: &Path) { + for &arch in ALL_ARCHS { + let arch_name = arch.name(); + let intrinsics_by_feature = + crate::parse_stdarch::parse_arch(stdarch_root, arch.parse_config()).unwrap(); + + let output_dir = output_base.join(arch_name); + + // Ensure the output directory exists + fs::create_dir_all(&output_dir).unwrap_or_else(|_| panic!("error creating {output_dir:?}")); + + let mut present_modules = Vec::new(); + for (feature, config) in arch + .parse_config() + .supported_features + .iter() + .map(|feature| (*feature, arch.feature_module_config(feature))) + { + let Some(intrinsics) = intrinsics_by_feature + .get(feature) + .filter(|intrinsics| !intrinsics.is_empty()) + else { + panic!("No {feature} intrinsics found"); + }; + + let code = config.gen_feature_module(intrinsics); + let path = output_dir.join(format!("{}.rs", config.file_name)); + let file = File::create(&path).unwrap_or_else(|_| panic!("error creating {path:?}")); + write_code(code, file); + present_modules.push(config.clone()); + } + + let mod_code = arch.gen_arch_mod(&present_modules); + let mod_path = output_dir.join("mod.rs"); + let mod_file = + File::create(&mod_path).unwrap_or_else(|_| panic!("error creating {mod_path:?}")); + write_code(mod_code, mod_file); + } +} diff --git a/fearless_simd_gen/src/parse_stdarch.rs b/fearless_simd_gen/src/parse_stdarch.rs new file mode 100644 index 000000000..e11670a54 --- /dev/null +++ b/fearless_simd_gen/src/parse_stdarch.rs @@ -0,0 +1,279 @@ +// Copyright 2026 the Fearless_SIMD Authors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Parse stdarch source files to extract intrinsic function signatures. + +use std::{ + collections::{BTreeMap, HashSet}, + fs, + path::Path, +}; + +use anyhow::{Context, Result}; +use proc_macro2::{TokenStream, TokenTree}; +use quote::ToTokens; +use syn::{ItemFn, LitStr, visit::Visit}; + +/// Recursively check if a token stream contains a specific literal string. +fn contains_literal(tokens: &TokenStream, target: &str) -> bool { + for token in tokens.clone() { + match token { + TokenTree::Literal(lit) if lit.to_string() == target => return true, + TokenTree::Group(group) => { + if contains_literal(&group.stream(), target) { + return true; + } + } + _ => {} + } + } + false +} + +/// Recursively check if a token stream contains a specific identifier. +fn contains_ident(tokens: &TokenStream, target: &str) -> bool { + for token in tokens.clone() { + match token { + TokenTree::Ident(ident) if ident == target => return true, + TokenTree::Group(group) => { + if contains_ident(&group.stream(), target) { + return true; + } + } + _ => {} + } + } + false +} + +/// Recursively check if a token stream contains `not(target_arch = "arm")`. +fn contains_not_target_arch_arm(tokens: &TokenStream) -> bool { + let tokens: Vec<_> = tokens.clone().into_iter().collect(); + for (i, token) in tokens.iter().enumerate() { + match token { + TokenTree::Ident(ident) if ident == "not" => { + // Check the next token is a group with target_arch = "arm" + if let Some(TokenTree::Group(group)) = tokens.get(i + 1) { + let inner = group.stream(); + if contains_ident(&inner, "target_arch") && contains_literal(&inner, "\"arm\"") + { + return true; + } + } + } + TokenTree::Group(group) => { + if contains_not_target_arch_arm(&group.stream()) { + return true; + } + } + _ => {} + } + } + false +} + +/// A visitor that extracts intrinsic functions from a Rust file. +struct IntrinsicVisitor { + intrinsics: Vec, + /// Set of all previously-visited intrinsic names. Some modules contain duplicate intrinsics (e.g. NEON sometimes + /// defines intrinsics once per endianness). + visited: HashSet, + /// The target feature for the file/module being parsed. + module_feature: String, +} + +impl IntrinsicVisitor { + fn new(module_feature: String) -> Self { + Self { + intrinsics: Vec::new(), + visited: HashSet::new(), + module_feature, + } + } +} + +impl<'ast> Visit<'ast> for IntrinsicVisitor { + fn visit_item_fn(&mut self, node: &'ast ItemFn) { + // Skip non-public functions + if !matches!(node.vis, syn::Visibility::Public(_)) { + return; + } + + let name = node.sig.ident.to_string(); + if !self.visited.insert(name) { + return; + } + + // Skip functions that don't enable the target feature we're looking for. This should filter out non-intrinsics. + let mut target_features = Vec::new(); + for attr in &node.attrs { + // Skip directly unstable intrinsics + if attr.path().is_ident("unstable") { + return; + } + + // Skip intrinsics that aren't usable on stable Rust yet. + // Check for `since = "CURRENT_RUSTC_VERSION"` in both direct #[stable] and #[cfg_attr(..., stable(...))] + if (attr.path().is_ident("stable") || attr.path().is_ident("cfg_attr")) + && contains_literal(&attr.meta.to_token_stream(), "\"CURRENT_RUSTC_VERSION\"") + { + return; + } + + // Skip intrinsics that are unstable on non-ARM32 platforms: + // #[cfg_attr(not(target_arch = "arm"), unstable(...))] + // But keep intrinsics that are only unstable on ARM32: + // #[cfg_attr(target_arch = "arm", unstable(...))] + if attr.path().is_ident("cfg_attr") { + let tokens = attr.meta.to_token_stream(); + if contains_not_target_arch_arm(&tokens) && contains_ident(&tokens, "unstable") { + return; + } + } + + if !attr.path().is_ident("target_feature") { + continue; + } + + if let Err(err) = attr.parse_nested_meta(|meta| { + if meta.path.is_ident("enable") { + let value = meta.value()?; + let lit: LitStr = value.parse()?; + target_features.extend(lit.value().split(',').map(str::trim).map(String::from)); + } + Ok(()) + }) { + panic!( + "Failed to parse #[target_feature] attribute on {}: {err}", + attr.path().to_token_stream() + ); + } + } + + if target_features.is_empty() + || !target_features + .iter() + .any(|feature| feature == &self.module_feature) + { + return; + } + + self.intrinsics.push(node.clone()); + + // Continue visiting + syn::visit::visit_item_fn(self, node); + } +} + +/// Parse a single Rust source file and extract intrinsics. +pub(crate) fn parse_file(path: &Path, module_feature: &str) -> Result> { + let content = fs::read_to_string(path) + .with_context(|| format!("Failed to read file: {}", path.display()))?; + + let syntax = syn::parse_file(&content) + .with_context(|| format!("Failed to parse file: {}", path.display()))?; + + let mut visitor = IntrinsicVisitor::new(module_feature.to_string()); + visitor.visit_file(&syntax); + + Ok(visitor.intrinsics) +} + +/// Architecture definition with its source directories and target features. +#[derive(Debug, Clone)] +pub(crate) struct ArchConfig { + pub source_dirs: &'static [&'static str], + pub module_feature: fn(&str) -> &str, + pub supported_features: &'static [&'static str], +} + +pub(crate) const X86_CONFIG: ArchConfig = ArchConfig { + source_dirs: &["crates/core_arch/src/x86"], + module_feature: |file_stem| match file_stem { + "sse41" => "sse4.1", + "sse42" => "sse4.2", + _ => file_stem, + }, + supported_features: &[ + "sse", "sse2", "sse3", "ssse3", "sse4.1", "sse4.2", "avx", "avx2", "fma", + ], +}; + +/// aarch64 architecture configuration. +pub(crate) const AARCH64_CONFIG: ArchConfig = ArchConfig { + source_dirs: &[ + "crates/core_arch/src/aarch64/neon", + "crates/core_arch/src/arm_shared/neon", + ], + module_feature: |_file_stem| "neon", + supported_features: &["neon"], +}; + +/// wasm32 architecture configuration. +pub(crate) const WASM32_CONFIG: ArchConfig = ArchConfig { + source_dirs: &["crates/core_arch/src/wasm32"], + module_feature: |_file_stem| "simd128", + supported_features: &["simd128"], +}; + +pub(crate) type IntrinsicsByFeature = BTreeMap>; + +fn module_file_stem(path: &Path) -> Option<&str> { + if !path.is_file() || path.extension().is_none_or(|ext| ext != "rs") { + return None; + } + + let file_stem = path.file_stem()?.to_str()?; + if file_stem == "mod" + || file_stem == "test" + || file_stem.starts_with("test_") + || file_stem == "macros" + { + return None; + } + + Some(file_stem) +} + +/// Parse all intrinsics from a stdarch directory for a given architecture. +pub(crate) fn parse_arch(stdarch_root: &Path, config: &ArchConfig) -> Result { + let mut result: IntrinsicsByFeature = BTreeMap::new(); + let supported_set = HashSet::<_>::from_iter(config.supported_features.iter().copied()); + + for source_dir in config.source_dirs { + let dir_path = stdarch_root.join(source_dir); + if !dir_path.exists() { + continue; + } + + let mut entries: Vec<_> = fs::read_dir(&dir_path) + .unwrap() + .map(|entry| entry.unwrap()) + .collect(); + // Nothing we're doing *should* vary based on file order, but just in case... + entries.sort_by_cached_key(|entry| entry.file_name()); + for entry in entries { + let path = entry.path(); + let Some(file_stem) = module_file_stem(&path) else { + continue; + }; + + let module_feature = (config.module_feature)(file_stem); + if !supported_set.contains(module_feature) { + continue; + } + + let intrinsics = parse_file(&path, module_feature)?; + match result.get_mut(module_feature) { + Some(existing_intrinsics) => { + existing_intrinsics.extend(intrinsics); + } + None => { + result.insert(module_feature.to_string(), intrinsics); + } + } + } + } + + Ok(result) +} diff --git a/fearless_simd_gen/src/util.rs b/fearless_simd_gen/src/util.rs new file mode 100644 index 000000000..b77697e4d --- /dev/null +++ b/fearless_simd_gen/src/util.rs @@ -0,0 +1,27 @@ +use proc_macro2::TokenStream; +use std::io::Write as _; + +pub(crate) fn write_code(code: TokenStream, out: impl Into) { + let mut child = std::process::Command::new("rustfmt") + .stdin(std::process::Stdio::piped()) + .stdout(out) + .spawn() + .expect("`rustfmt` should spawn"); + let mut stdin = child.stdin.take().unwrap(); + stdin + .write_all( + format!( + r#" +// Copyright 2026 the Fearless_SIMD Authors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// This file is autogenerated by fearless_simd_gen + +{code}"# + ) + .as_bytes(), + ) + .unwrap(); + drop(stdin); + child.wait().expect("`rustfmt` should succeed"); +} diff --git a/fearless_simd_gen/stdarch b/fearless_simd_gen/stdarch new file mode 160000 index 000000000..ccb432491 --- /dev/null +++ b/fearless_simd_gen/stdarch @@ -0,0 +1 @@ +Subproject commit ccb43249108449b858cb869515040ec6de16523d