core/stdarch/crates/core_arch/src/aarch64/neon/
mod.rs

1//! ARMv8 ASIMD intrinsics
2
3#![allow(non_camel_case_types)]
4
5#[rustfmt::skip]
6mod generated;
7#[rustfmt::skip]
8#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9pub use self::generated::*;
10
11// FIXME: replace neon with asimd
12
13use crate::{
14    core_arch::{arm_shared::*, simd::*},
15    hint::unreachable_unchecked,
16    intrinsics::simd::*,
17    mem::{transmute, zeroed},
18    ptr::{read_unaligned, write_unaligned},
19};
20#[cfg(test)]
21use stdarch_test::assert_instr;
22
23types! {
24    #![stable(feature = "neon_intrinsics", since = "1.59.0")]
25
26    /// ARM-specific 64-bit wide vector of one packed `f64`.
27    pub struct float64x1_t(1 x f64); // FIXME: check this!
28    /// ARM-specific 128-bit wide vector of two packed `f64`.
29    pub struct float64x2_t(2 x f64);
30}
31
32/// ARM-specific type containing two `float64x1_t` vectors.
33#[repr(C)]
34#[derive(Copy, Clone, Debug)]
35#[stable(feature = "neon_intrinsics", since = "1.59.0")]
36pub struct float64x1x2_t(pub float64x1_t, pub float64x1_t);
37/// ARM-specific type containing three `float64x1_t` vectors.
38#[repr(C)]
39#[derive(Copy, Clone, Debug)]
40#[stable(feature = "neon_intrinsics", since = "1.59.0")]
41pub struct float64x1x3_t(pub float64x1_t, pub float64x1_t, pub float64x1_t);
42/// ARM-specific type containing four `float64x1_t` vectors.
43#[repr(C)]
44#[derive(Copy, Clone, Debug)]
45#[stable(feature = "neon_intrinsics", since = "1.59.0")]
46pub struct float64x1x4_t(
47    pub float64x1_t,
48    pub float64x1_t,
49    pub float64x1_t,
50    pub float64x1_t,
51);
52
53/// ARM-specific type containing two `float64x2_t` vectors.
54#[repr(C)]
55#[derive(Copy, Clone, Debug)]
56#[stable(feature = "neon_intrinsics", since = "1.59.0")]
57pub struct float64x2x2_t(pub float64x2_t, pub float64x2_t);
58/// ARM-specific type containing three `float64x2_t` vectors.
59#[repr(C)]
60#[derive(Copy, Clone, Debug)]
61#[stable(feature = "neon_intrinsics", since = "1.59.0")]
62pub struct float64x2x3_t(pub float64x2_t, pub float64x2_t, pub float64x2_t);
63/// ARM-specific type containing four `float64x2_t` vectors.
64#[repr(C)]
65#[derive(Copy, Clone, Debug)]
66#[stable(feature = "neon_intrinsics", since = "1.59.0")]
67pub struct float64x2x4_t(
68    pub float64x2_t,
69    pub float64x2_t,
70    pub float64x2_t,
71    pub float64x2_t,
72);
73
74#[allow(improper_ctypes)]
75extern "unadjusted" {
76    // absolute value
77    #[link_name = "llvm.aarch64.neon.abs.i64"]
78    fn vabsd_s64_(a: i64) -> i64;
79    #[link_name = "llvm.aarch64.neon.abs.v1i64"]
80    fn vabs_s64_(a: int64x1_t) -> int64x1_t;
81    #[link_name = "llvm.aarch64.neon.abs.v2i64"]
82    fn vabsq_s64_(a: int64x2_t) -> int64x2_t;
83
84    #[link_name = "llvm.aarch64.neon.suqadd.v8i8"]
85    fn vuqadd_s8_(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
86    #[link_name = "llvm.aarch64.neon.suqadd.v16i8"]
87    fn vuqaddq_s8_(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
88    #[link_name = "llvm.aarch64.neon.suqadd.v4i16"]
89    fn vuqadd_s16_(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
90    #[link_name = "llvm.aarch64.neon.suqadd.v8i16"]
91    fn vuqaddq_s16_(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
92    #[link_name = "llvm.aarch64.neon.suqadd.v2i32"]
93    fn vuqadd_s32_(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
94    #[link_name = "llvm.aarch64.neon.suqadd.v4i32"]
95    fn vuqaddq_s32_(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
96    #[link_name = "llvm.aarch64.neon.suqadd.v1i64"]
97    fn vuqadd_s64_(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
98    #[link_name = "llvm.aarch64.neon.suqadd.v2i64"]
99    fn vuqaddq_s64_(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
100
101    #[link_name = "llvm.aarch64.neon.usqadd.v8i8"]
102    fn vsqadd_u8_(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
103    #[link_name = "llvm.aarch64.neon.usqadd.v16i8"]
104    fn vsqaddq_u8_(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
105    #[link_name = "llvm.aarch64.neon.usqadd.v4i16"]
106    fn vsqadd_u16_(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
107    #[link_name = "llvm.aarch64.neon.usqadd.v8i16"]
108    fn vsqaddq_u16_(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
109    #[link_name = "llvm.aarch64.neon.usqadd.v2i32"]
110    fn vsqadd_u32_(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
111    #[link_name = "llvm.aarch64.neon.usqadd.v4i32"]
112    fn vsqaddq_u32_(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
113    #[link_name = "llvm.aarch64.neon.usqadd.v1i64"]
114    fn vsqadd_u64_(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
115    #[link_name = "llvm.aarch64.neon.usqadd.v2i64"]
116    fn vsqaddq_u64_(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
117
118    #[link_name = "llvm.aarch64.neon.addp.v8i16"]
119    fn vpaddq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
120    #[link_name = "llvm.aarch64.neon.addp.v4i32"]
121    fn vpaddq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
122    #[link_name = "llvm.aarch64.neon.addp.v2i64"]
123    fn vpaddq_s64_(a: int64x2_t, b: int64x2_t) -> int64x2_t;
124    #[link_name = "llvm.aarch64.neon.addp.v16i8"]
125    fn vpaddq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
126
127    #[link_name = "llvm.aarch64.neon.saddv.i32.v4i16"]
128    fn vaddv_s16_(a: int16x4_t) -> i16;
129    #[link_name = "llvm.aarch64.neon.saddv.i32.v2i32"]
130    fn vaddv_s32_(a: int32x2_t) -> i32;
131    #[link_name = "llvm.aarch64.neon.saddv.i32.v8i8"]
132    fn vaddv_s8_(a: int8x8_t) -> i8;
133    #[link_name = "llvm.aarch64.neon.uaddv.i32.v4i16"]
134    fn vaddv_u16_(a: uint16x4_t) -> u16;
135    #[link_name = "llvm.aarch64.neon.uaddv.i32.v2i32"]
136    fn vaddv_u32_(a: uint32x2_t) -> u32;
137    #[link_name = "llvm.aarch64.neon.uaddv.i32.v8i8"]
138    fn vaddv_u8_(a: uint8x8_t) -> u8;
139    #[link_name = "llvm.aarch64.neon.saddv.i32.v8i16"]
140    fn vaddvq_s16_(a: int16x8_t) -> i16;
141    #[link_name = "llvm.aarch64.neon.saddv.i32.v4i32"]
142    fn vaddvq_s32_(a: int32x4_t) -> i32;
143    #[link_name = "llvm.aarch64.neon.saddv.i32.v16i8"]
144    fn vaddvq_s8_(a: int8x16_t) -> i8;
145    #[link_name = "llvm.aarch64.neon.uaddv.i32.v8i16"]
146    fn vaddvq_u16_(a: uint16x8_t) -> u16;
147    #[link_name = "llvm.aarch64.neon.uaddv.i32.v4i32"]
148    fn vaddvq_u32_(a: uint32x4_t) -> u32;
149    #[link_name = "llvm.aarch64.neon.uaddv.i32.v16i8"]
150    fn vaddvq_u8_(a: uint8x16_t) -> u8;
151    #[link_name = "llvm.aarch64.neon.saddv.i64.v2i64"]
152    fn vaddvq_s64_(a: int64x2_t) -> i64;
153    #[link_name = "llvm.aarch64.neon.uaddv.i64.v2i64"]
154    fn vaddvq_u64_(a: uint64x2_t) -> u64;
155
156    #[link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"]
157    fn vaddlv_s8_(a: int8x8_t) -> i32;
158    #[link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"]
159    fn vaddlv_u8_(a: uint8x8_t) -> u32;
160    #[link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"]
161    fn vaddlvq_s8_(a: int8x16_t) -> i32;
162    #[link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"]
163    fn vaddlvq_u8_(a: uint8x16_t) -> u32;
164
165    #[link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"]
166    fn vmaxv_s8_(a: int8x8_t) -> i8;
167    #[link_name = "llvm.aarch64.neon.smaxv.i8.v16i8"]
168    fn vmaxvq_s8_(a: int8x16_t) -> i8;
169    #[link_name = "llvm.aarch64.neon.smaxv.i16.v4i16"]
170    fn vmaxv_s16_(a: int16x4_t) -> i16;
171    #[link_name = "llvm.aarch64.neon.smaxv.i16.v8i16"]
172    fn vmaxvq_s16_(a: int16x8_t) -> i16;
173    #[link_name = "llvm.aarch64.neon.smaxv.i32.v2i32"]
174    fn vmaxv_s32_(a: int32x2_t) -> i32;
175    #[link_name = "llvm.aarch64.neon.smaxv.i32.v4i32"]
176    fn vmaxvq_s32_(a: int32x4_t) -> i32;
177
178    #[link_name = "llvm.aarch64.neon.umaxv.i8.v8i8"]
179    fn vmaxv_u8_(a: uint8x8_t) -> u8;
180    #[link_name = "llvm.aarch64.neon.umaxv.i8.v16i8"]
181    fn vmaxvq_u8_(a: uint8x16_t) -> u8;
182    #[link_name = "llvm.aarch64.neon.umaxv.i16.v4i16"]
183    fn vmaxv_u16_(a: uint16x4_t) -> u16;
184    #[link_name = "llvm.aarch64.neon.umaxv.i16.v8i16"]
185    fn vmaxvq_u16_(a: uint16x8_t) -> u16;
186    #[link_name = "llvm.aarch64.neon.umaxv.i32.v2i32"]
187    fn vmaxv_u32_(a: uint32x2_t) -> u32;
188    #[link_name = "llvm.aarch64.neon.umaxv.i32.v4i32"]
189    fn vmaxvq_u32_(a: uint32x4_t) -> u32;
190
191    #[link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"]
192    fn vmaxv_f32_(a: float32x2_t) -> f32;
193    #[link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"]
194    fn vmaxvq_f32_(a: float32x4_t) -> f32;
195    #[link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"]
196    fn vmaxvq_f64_(a: float64x2_t) -> f64;
197
198    #[link_name = "llvm.aarch64.neon.sminv.i8.v8i8"]
199    fn vminv_s8_(a: int8x8_t) -> i8;
200    #[link_name = "llvm.aarch64.neon.sminv.i8.v16i8"]
201    fn vminvq_s8_(a: int8x16_t) -> i8;
202    #[link_name = "llvm.aarch64.neon.sminv.i16.v4i16"]
203    fn vminv_s16_(a: int16x4_t) -> i16;
204    #[link_name = "llvm.aarch64.neon.sminv.i16.v8i16"]
205    fn vminvq_s16_(a: int16x8_t) -> i16;
206    #[link_name = "llvm.aarch64.neon.sminv.i32.v2i32"]
207    fn vminv_s32_(a: int32x2_t) -> i32;
208    #[link_name = "llvm.aarch64.neon.sminv.i32.v4i32"]
209    fn vminvq_s32_(a: int32x4_t) -> i32;
210
211    #[link_name = "llvm.aarch64.neon.uminv.i8.v8i8"]
212    fn vminv_u8_(a: uint8x8_t) -> u8;
213    #[link_name = "llvm.aarch64.neon.uminv.i8.v16i8"]
214    fn vminvq_u8_(a: uint8x16_t) -> u8;
215    #[link_name = "llvm.aarch64.neon.uminv.i16.v4i16"]
216    fn vminv_u16_(a: uint16x4_t) -> u16;
217    #[link_name = "llvm.aarch64.neon.uminv.i16.v8i16"]
218    fn vminvq_u16_(a: uint16x8_t) -> u16;
219    #[link_name = "llvm.aarch64.neon.uminv.i32.v2i32"]
220    fn vminv_u32_(a: uint32x2_t) -> u32;
221    #[link_name = "llvm.aarch64.neon.uminv.i32.v4i32"]
222    fn vminvq_u32_(a: uint32x4_t) -> u32;
223
224    #[link_name = "llvm.aarch64.neon.fminv.f32.v2f32"]
225    fn vminv_f32_(a: float32x2_t) -> f32;
226    #[link_name = "llvm.aarch64.neon.fminv.f32.v4f32"]
227    fn vminvq_f32_(a: float32x4_t) -> f32;
228    #[link_name = "llvm.aarch64.neon.fminv.f64.v2f64"]
229    fn vminvq_f64_(a: float64x2_t) -> f64;
230
231    #[link_name = "llvm.aarch64.neon.sminp.v16i8"]
232    fn vpminq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
233    #[link_name = "llvm.aarch64.neon.sminp.v8i16"]
234    fn vpminq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
235    #[link_name = "llvm.aarch64.neon.sminp.v4i32"]
236    fn vpminq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
237    #[link_name = "llvm.aarch64.neon.uminp.v16i8"]
238    fn vpminq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
239    #[link_name = "llvm.aarch64.neon.uminp.v8i16"]
240    fn vpminq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
241    #[link_name = "llvm.aarch64.neon.uminp.v4i32"]
242    fn vpminq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
243    #[link_name = "llvm.aarch64.neon.fminp.4f32"]
244    fn vpminq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
245    #[link_name = "llvm.aarch64.neon.fminp.v2f64"]
246    fn vpminq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
247
248    #[link_name = "llvm.aarch64.neon.smaxp.v16i8"]
249    fn vpmaxq_s8_(a: int8x16_t, b: int8x16_t) -> int8x16_t;
250    #[link_name = "llvm.aarch64.neon.smaxp.v8i16"]
251    fn vpmaxq_s16_(a: int16x8_t, b: int16x8_t) -> int16x8_t;
252    #[link_name = "llvm.aarch64.neon.smaxp.v4i32"]
253    fn vpmaxq_s32_(a: int32x4_t, b: int32x4_t) -> int32x4_t;
254    #[link_name = "llvm.aarch64.neon.umaxp.v16i8"]
255    fn vpmaxq_u8_(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
256    #[link_name = "llvm.aarch64.neon.umaxp.v8i16"]
257    fn vpmaxq_u16_(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
258    #[link_name = "llvm.aarch64.neon.umaxp.v4i32"]
259    fn vpmaxq_u32_(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
260    #[link_name = "llvm.aarch64.neon.fmaxp.4f32"]
261    fn vpmaxq_f32_(a: float32x4_t, b: float32x4_t) -> float32x4_t;
262    #[link_name = "llvm.aarch64.neon.fmaxp.v2f64"]
263    fn vpmaxq_f64_(a: float64x2_t, b: float64x2_t) -> float64x2_t;
264
265    #[link_name = "llvm.aarch64.neon.tbl1.v8i8"]
266    fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
267    #[link_name = "llvm.aarch64.neon.tbl1.v16i8"]
268    fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
269
270    #[link_name = "llvm.aarch64.neon.tbx1.v8i8"]
271    fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
272    #[link_name = "llvm.aarch64.neon.tbx1.v16i8"]
273    fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
274
275    #[link_name = "llvm.aarch64.neon.tbl2.v8i8"]
276    fn vqtbl2(a0: int8x16_t, a1: int8x16_t, b: uint8x8_t) -> int8x8_t;
277    #[link_name = "llvm.aarch64.neon.tbl2.v16i8"]
278    fn vqtbl2q(a0: int8x16_t, a1: int8x16_t, b: uint8x16_t) -> int8x16_t;
279
280    #[link_name = "llvm.aarch64.neon.tbx2.v8i8"]
281    fn vqtbx2(a: int8x8_t, b0: int8x16_t, b1: int8x16_t, c: uint8x8_t) -> int8x8_t;
282    #[link_name = "llvm.aarch64.neon.tbx2.v16i8"]
283    fn vqtbx2q(a: int8x16_t, b0: int8x16_t, b1: int8x16_t, c: uint8x16_t) -> int8x16_t;
284
285    #[link_name = "llvm.aarch64.neon.tbl3.v8i8"]
286    fn vqtbl3(a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, b: uint8x8_t) -> int8x8_t;
287    #[link_name = "llvm.aarch64.neon.tbl3.v16i8"]
288    fn vqtbl3q(a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, b: uint8x16_t) -> int8x16_t;
289
290    #[link_name = "llvm.aarch64.neon.tbx3.v8i8"]
291    fn vqtbx3(a: int8x8_t, b0: int8x16_t, b1: int8x16_t, b2: int8x16_t, c: uint8x8_t) -> int8x8_t;
292    #[link_name = "llvm.aarch64.neon.tbx3.v16i8"]
293    fn vqtbx3q(
294        a: int8x16_t,
295        b0: int8x16_t,
296        b1: int8x16_t,
297        b2: int8x16_t,
298        c: uint8x16_t,
299    ) -> int8x16_t;
300
301    #[link_name = "llvm.aarch64.neon.tbl4.v8i8"]
302    fn vqtbl4(a0: int8x16_t, a1: int8x16_t, a2: int8x16_t, a3: int8x16_t, b: uint8x8_t)
303        -> int8x8_t;
304    #[link_name = "llvm.aarch64.neon.tbl4.v16i8"]
305    fn vqtbl4q(
306        a0: int8x16_t,
307        a1: int8x16_t,
308        a2: int8x16_t,
309        a3: int8x16_t,
310        b: uint8x16_t,
311    ) -> int8x16_t;
312
313    #[link_name = "llvm.aarch64.neon.tbx4.v8i8"]
314    fn vqtbx4(
315        a: int8x8_t,
316        b0: int8x16_t,
317        b1: int8x16_t,
318        b2: int8x16_t,
319        b3: int8x16_t,
320        c: uint8x8_t,
321    ) -> int8x8_t;
322
323    #[link_name = "llvm.aarch64.neon.tbx4.v16i8"]
324    fn vqtbx4q(
325        a: int8x16_t,
326        b0: int8x16_t,
327        b1: int8x16_t,
328        b2: int8x16_t,
329        b3: int8x16_t,
330        c: uint8x16_t,
331    ) -> int8x16_t;
332
333    #[link_name = "llvm.aarch64.neon.vsli.v8i8"]
334    fn vsli_n_s8_(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
335    #[link_name = "llvm.aarch64.neon.vsli.v16i8"]
336    fn vsliq_n_s8_(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
337    #[link_name = "llvm.aarch64.neon.vsli.v4i16"]
338    fn vsli_n_s16_(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
339    #[link_name = "llvm.aarch64.neon.vsli.v8i16"]
340    fn vsliq_n_s16_(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
341    #[link_name = "llvm.aarch64.neon.vsli.v2i32"]
342    fn vsli_n_s32_(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
343    #[link_name = "llvm.aarch64.neon.vsli.v4i32"]
344    fn vsliq_n_s32_(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
345    #[link_name = "llvm.aarch64.neon.vsli.v1i64"]
346    fn vsli_n_s64_(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
347    #[link_name = "llvm.aarch64.neon.vsli.v2i64"]
348    fn vsliq_n_s64_(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
349
350    #[link_name = "llvm.aarch64.neon.vsri.v8i8"]
351    fn vsri_n_s8_(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
352    #[link_name = "llvm.aarch64.neon.vsri.v16i8"]
353    fn vsriq_n_s8_(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
354    #[link_name = "llvm.aarch64.neon.vsri.v4i16"]
355    fn vsri_n_s16_(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
356    #[link_name = "llvm.aarch64.neon.vsri.v8i16"]
357    fn vsriq_n_s16_(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
358    #[link_name = "llvm.aarch64.neon.vsri.v2i32"]
359    fn vsri_n_s32_(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
360    #[link_name = "llvm.aarch64.neon.vsri.v4i32"]
361    fn vsriq_n_s32_(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
362    #[link_name = "llvm.aarch64.neon.vsri.v1i64"]
363    fn vsri_n_s64_(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
364    #[link_name = "llvm.aarch64.neon.vsri.v2i64"]
365    fn vsriq_n_s64_(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
366}
367
368/// Duplicate vector element to vector or scalar
369#[inline]
370#[target_feature(enable = "neon")]
371#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
372#[rustc_legacy_const_generics(1, 3)]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374pub unsafe fn vcopy_lane_s64<const N1: i32, const N2: i32>(
375    _a: int64x1_t,
376    b: int64x1_t,
377) -> int64x1_t {
378    static_assert!(N1 == 0);
379    static_assert!(N2 == 0);
380    b
381}
382
383/// Duplicate vector element to vector or scalar
384#[inline]
385#[target_feature(enable = "neon")]
386#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
387#[rustc_legacy_const_generics(1, 3)]
388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
389pub unsafe fn vcopy_lane_u64<const N1: i32, const N2: i32>(
390    _a: uint64x1_t,
391    b: uint64x1_t,
392) -> uint64x1_t {
393    static_assert!(N1 == 0);
394    static_assert!(N2 == 0);
395    b
396}
397
398/// Duplicate vector element to vector or scalar
399#[inline]
400#[target_feature(enable = "neon")]
401#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
402#[rustc_legacy_const_generics(1, 3)]
403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
404pub unsafe fn vcopy_lane_p64<const N1: i32, const N2: i32>(
405    _a: poly64x1_t,
406    b: poly64x1_t,
407) -> poly64x1_t {
408    static_assert!(N1 == 0);
409    static_assert!(N2 == 0);
410    b
411}
412
413/// Duplicate vector element to vector or scalar
414#[inline]
415#[target_feature(enable = "neon")]
416#[cfg_attr(test, assert_instr(nop, N1 = 0, N2 = 0))]
417#[rustc_legacy_const_generics(1, 3)]
418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
419pub unsafe fn vcopy_lane_f64<const N1: i32, const N2: i32>(
420    _a: float64x1_t,
421    b: float64x1_t,
422) -> float64x1_t {
423    static_assert!(N1 == 0);
424    static_assert!(N2 == 0);
425    b
426}
427
428/// Duplicate vector element to vector or scalar
429#[inline]
430#[target_feature(enable = "neon")]
431#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
432#[rustc_legacy_const_generics(1, 3)]
433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
434pub unsafe fn vcopy_laneq_s64<const LANE1: i32, const LANE2: i32>(
435    _a: int64x1_t,
436    b: int64x2_t,
437) -> int64x1_t {
438    static_assert!(LANE1 == 0);
439    static_assert_uimm_bits!(LANE2, 1);
440    transmute::<i64, _>(simd_extract!(b, LANE2 as u32))
441}
442
443/// Duplicate vector element to vector or scalar
444#[inline]
445#[target_feature(enable = "neon")]
446#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
447#[rustc_legacy_const_generics(1, 3)]
448#[stable(feature = "neon_intrinsics", since = "1.59.0")]
449pub unsafe fn vcopy_laneq_u64<const LANE1: i32, const LANE2: i32>(
450    _a: uint64x1_t,
451    b: uint64x2_t,
452) -> uint64x1_t {
453    static_assert!(LANE1 == 0);
454    static_assert_uimm_bits!(LANE2, 1);
455    transmute::<u64, _>(simd_extract!(b, LANE2 as u32))
456}
457
458/// Duplicate vector element to vector or scalar
459#[inline]
460#[target_feature(enable = "neon")]
461#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
462#[rustc_legacy_const_generics(1, 3)]
463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
464pub unsafe fn vcopy_laneq_p64<const LANE1: i32, const LANE2: i32>(
465    _a: poly64x1_t,
466    b: poly64x2_t,
467) -> poly64x1_t {
468    static_assert!(LANE1 == 0);
469    static_assert_uimm_bits!(LANE2, 1);
470    transmute::<u64, _>(simd_extract!(b, LANE2 as u32))
471}
472
473/// Duplicate vector element to vector or scalar
474#[inline]
475#[target_feature(enable = "neon")]
476#[cfg_attr(test, assert_instr(nop, LANE1 = 0, LANE2 = 1))]
477#[rustc_legacy_const_generics(1, 3)]
478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
479pub unsafe fn vcopy_laneq_f64<const LANE1: i32, const LANE2: i32>(
480    _a: float64x1_t,
481    b: float64x2_t,
482) -> float64x1_t {
483    static_assert!(LANE1 == 0);
484    static_assert_uimm_bits!(LANE2, 1);
485    transmute::<f64, _>(simd_extract!(b, LANE2 as u32))
486}
487
488/// Load multiple single-element structures to one, two, three, or four registers.
489#[inline]
490#[target_feature(enable = "neon")]
491#[cfg_attr(test, assert_instr(ldr))]
492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
493pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
494    read_unaligned(ptr.cast())
495}
496
497/// Load multiple single-element structures to one, two, three, or four registers.
498#[inline]
499#[target_feature(enable = "neon")]
500#[cfg_attr(test, assert_instr(ldr))]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
503    read_unaligned(ptr.cast())
504}
505
506/// Load multiple single-element structures to one, two, three, or four registers.
507#[inline]
508#[target_feature(enable = "neon")]
509#[cfg_attr(test, assert_instr(ldr))]
510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
511pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
512    read_unaligned(ptr.cast())
513}
514
515/// Load multiple single-element structures to one, two, three, or four registers.
516#[inline]
517#[target_feature(enable = "neon")]
518#[cfg_attr(test, assert_instr(ldr))]
519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
520pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
521    read_unaligned(ptr.cast())
522}
523
524/// Load multiple single-element structures to one, two, three, or four registers.
525#[inline]
526#[target_feature(enable = "neon")]
527#[cfg_attr(test, assert_instr(ldr))]
528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
529pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
530    read_unaligned(ptr.cast())
531}
532
533/// Load multiple single-element structures to one, two, three, or four registers.
534#[inline]
535#[target_feature(enable = "neon")]
536#[cfg_attr(test, assert_instr(ldr))]
537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
538pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
539    read_unaligned(ptr.cast())
540}
541
542/// Load multiple single-element structures to one, two, three, or four registers.
543#[inline]
544#[target_feature(enable = "neon")]
545#[cfg_attr(test, assert_instr(ldr))]
546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
547pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
548    read_unaligned(ptr.cast())
549}
550
551/// Load multiple single-element structures to one, two, three, or four registers.
552#[inline]
553#[target_feature(enable = "neon")]
554#[cfg_attr(test, assert_instr(ldr))]
555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
556pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
557    read_unaligned(ptr.cast())
558}
559
560/// Load multiple single-element structures to one, two, three, or four registers.
561#[inline]
562#[target_feature(enable = "neon")]
563#[cfg_attr(test, assert_instr(ldr))]
564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
565pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
566    read_unaligned(ptr.cast())
567}
568
569/// Load multiple single-element structures to one, two, three, or four registers.
570#[inline]
571#[target_feature(enable = "neon")]
572#[cfg_attr(test, assert_instr(ldr))]
573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
574pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
575    read_unaligned(ptr.cast())
576}
577
578/// Load multiple single-element structures to one, two, three, or four registers.
579#[inline]
580#[target_feature(enable = "neon")]
581#[cfg_attr(test, assert_instr(ldr))]
582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
583pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
584    read_unaligned(ptr.cast())
585}
586
587/// Load multiple single-element structures to one, two, three, or four registers.
588#[inline]
589#[target_feature(enable = "neon")]
590#[cfg_attr(test, assert_instr(ldr))]
591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
592pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
593    read_unaligned(ptr.cast())
594}
595
596/// Load multiple single-element structures to one, two, three, or four registers.
597#[inline]
598#[target_feature(enable = "neon")]
599#[cfg_attr(test, assert_instr(ldr))]
600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
601pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
602    read_unaligned(ptr.cast())
603}
604
605/// Load multiple single-element structures to one, two, three, or four registers.
606#[inline]
607#[target_feature(enable = "neon")]
608#[cfg_attr(test, assert_instr(ldr))]
609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
610pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
611    read_unaligned(ptr.cast())
612}
613
614/// Load multiple single-element structures to one, two, three, or four registers.
615#[inline]
616#[target_feature(enable = "neon")]
617#[cfg_attr(test, assert_instr(ldr))]
618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
619pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
620    read_unaligned(ptr.cast())
621}
622
623/// Load multiple single-element structures to one, two, three, or four registers.
624#[inline]
625#[target_feature(enable = "neon")]
626#[cfg_attr(test, assert_instr(ldr))]
627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
628pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
629    read_unaligned(ptr.cast())
630}
631
632/// Load multiple single-element structures to one, two, three, or four registers.
633#[inline]
634#[target_feature(enable = "neon")]
635#[cfg_attr(test, assert_instr(ldr))]
636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
637pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
638    read_unaligned(ptr.cast())
639}
640
641/// Load multiple single-element structures to one, two, three, or four registers.
642#[inline]
643#[target_feature(enable = "neon")]
644#[cfg_attr(test, assert_instr(ldr))]
645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
646pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
647    read_unaligned(ptr.cast())
648}
649
650/// Load multiple single-element structures to one, two, three, or four registers.
651#[inline]
652#[target_feature(enable = "neon")]
653#[cfg_attr(test, assert_instr(ldr))]
654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
655pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
656    read_unaligned(ptr.cast())
657}
658
659/// Load multiple single-element structures to one, two, three, or four registers.
660#[inline]
661#[target_feature(enable = "neon")]
662#[cfg_attr(test, assert_instr(ldr))]
663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
664pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
665    read_unaligned(ptr.cast())
666}
667
668/// Load multiple single-element structures to one, two, three, or four registers.
669///
670/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)
671#[inline]
672#[target_feature(enable = "neon,aes")]
673#[cfg_attr(test, assert_instr(ldr))]
674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
675pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
676    read_unaligned(ptr.cast())
677}
678
679/// Load multiple single-element structures to one, two, three, or four registers.
680///
681/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)
682#[inline]
683#[target_feature(enable = "neon,aes")]
684#[cfg_attr(test, assert_instr(ldr))]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
687    read_unaligned(ptr.cast())
688}
689
690/// Load multiple single-element structures to one, two, three, or four registers.
691#[inline]
692#[target_feature(enable = "neon")]
693#[cfg_attr(test, assert_instr(ldr))]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
696    read_unaligned(ptr.cast())
697}
698
699/// Load multiple single-element structures to one, two, three, or four registers.
700#[inline]
701#[target_feature(enable = "neon")]
702#[cfg_attr(test, assert_instr(ldr))]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
705    read_unaligned(ptr.cast())
706}
707
708/// Load multiple single-element structures to one, two, three, or four registers.
709#[inline]
710#[target_feature(enable = "neon")]
711#[cfg_attr(test, assert_instr(ldr))]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
714    read_unaligned(ptr.cast())
715}
716
717/// Load multiple single-element structures to one, two, three, or four registers.
718#[inline]
719#[target_feature(enable = "neon")]
720#[cfg_attr(test, assert_instr(ldr))]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
723    read_unaligned(ptr.cast())
724}
725
726/// Load multiple single-element structures to one, two, three, or four registers
727#[inline]
728#[target_feature(enable = "neon")]
729#[cfg_attr(test, assert_instr(ldr))]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731pub unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t {
732    vld1_f64(ptr)
733}
734
735/// Load multiple single-element structures to one, two, three, or four registers
736#[inline]
737#[target_feature(enable = "neon")]
738#[cfg_attr(test, assert_instr(ld1r))]
739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
740pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t {
741    let x = vld1q_lane_f64::<0>(ptr, transmute(f64x2::splat(0.)));
742    simd_shuffle!(x, x, [0, 0])
743}
744
745/// Load one single-element structure to one lane of one register.
746#[inline]
747#[target_feature(enable = "neon")]
748#[rustc_legacy_const_generics(2)]
749#[cfg_attr(test, assert_instr(ldr, LANE = 0))]
750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
751pub unsafe fn vld1_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x1_t) -> float64x1_t {
752    static_assert!(LANE == 0);
753    simd_insert!(src, LANE as u32, *ptr)
754}
755
756/// Load one single-element structure to one lane of one register.
757#[inline]
758#[target_feature(enable = "neon")]
759#[rustc_legacy_const_generics(2)]
760#[cfg_attr(test, assert_instr(ld1, LANE = 1))]
761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
762pub unsafe fn vld1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
763    static_assert_uimm_bits!(LANE, 1);
764    simd_insert!(src, LANE as u32, *ptr)
765}
766
767/// Store multiple single-element structures from one, two, three, or four registers.
768#[inline]
769#[target_feature(enable = "neon")]
770#[cfg_attr(test, assert_instr(str))]
771#[allow(clippy::cast_ptr_alignment)]
772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
773pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
774    write_unaligned(ptr.cast(), a);
775}
776
777/// Store multiple single-element structures from one, two, three, or four registers.
778#[inline]
779#[target_feature(enable = "neon")]
780#[cfg_attr(test, assert_instr(str))]
781#[allow(clippy::cast_ptr_alignment)]
782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
783pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
784    write_unaligned(ptr.cast(), a);
785}
786
787/// Store multiple single-element structures from one, two, three, or four registers.
788#[inline]
789#[target_feature(enable = "neon")]
790#[cfg_attr(test, assert_instr(str))]
791#[allow(clippy::cast_ptr_alignment)]
792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
793pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
794    write_unaligned(ptr.cast(), a);
795}
796
797/// Store multiple single-element structures from one, two, three, or four registers.
798#[inline]
799#[target_feature(enable = "neon")]
800#[cfg_attr(test, assert_instr(str))]
801#[allow(clippy::cast_ptr_alignment)]
802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
803pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
804    write_unaligned(ptr.cast(), a);
805}
806
807/// Store multiple single-element structures from one, two, three, or four registers.
808#[inline]
809#[target_feature(enable = "neon")]
810#[cfg_attr(test, assert_instr(str))]
811#[allow(clippy::cast_ptr_alignment)]
812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
813pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
814    write_unaligned(ptr.cast(), a);
815}
816
817/// Store multiple single-element structures from one, two, three, or four registers.
818#[inline]
819#[target_feature(enable = "neon")]
820#[cfg_attr(test, assert_instr(str))]
821#[allow(clippy::cast_ptr_alignment)]
822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
823pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
824    write_unaligned(ptr.cast(), a);
825}
826
827/// Store multiple single-element structures from one, two, three, or four registers.
828#[inline]
829#[target_feature(enable = "neon")]
830#[cfg_attr(test, assert_instr(str))]
831#[allow(clippy::cast_ptr_alignment)]
832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
833pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
834    write_unaligned(ptr.cast(), a);
835}
836
837/// Store multiple single-element structures from one, two, three, or four registers.
838#[inline]
839#[target_feature(enable = "neon")]
840#[cfg_attr(test, assert_instr(str))]
841#[allow(clippy::cast_ptr_alignment)]
842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
843pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
844    write_unaligned(ptr.cast(), a);
845}
846
847/// Store multiple single-element structures from one, two, three, or four registers.
848#[inline]
849#[target_feature(enable = "neon")]
850#[cfg_attr(test, assert_instr(str))]
851#[allow(clippy::cast_ptr_alignment)]
852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
853pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
854    write_unaligned(ptr.cast(), a);
855}
856
857/// Store multiple single-element structures from one, two, three, or four registers.
858#[inline]
859#[target_feature(enable = "neon")]
860#[cfg_attr(test, assert_instr(str))]
861#[allow(clippy::cast_ptr_alignment)]
862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
863pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
864    write_unaligned(ptr.cast(), a);
865}
866
867/// Store multiple single-element structures from one, two, three, or four registers.
868#[inline]
869#[target_feature(enable = "neon")]
870#[cfg_attr(test, assert_instr(str))]
871#[allow(clippy::cast_ptr_alignment)]
872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
873pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
874    write_unaligned(ptr.cast(), a);
875}
876
877/// Store multiple single-element structures from one, two, three, or four registers.
878#[inline]
879#[target_feature(enable = "neon")]
880#[cfg_attr(test, assert_instr(str))]
881#[allow(clippy::cast_ptr_alignment)]
882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
883pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
884    write_unaligned(ptr.cast(), a);
885}
886
887/// Store multiple single-element structures from one, two, three, or four registers.
888#[inline]
889#[target_feature(enable = "neon")]
890#[cfg_attr(test, assert_instr(str))]
891#[allow(clippy::cast_ptr_alignment)]
892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
893pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
894    write_unaligned(ptr.cast(), a);
895}
896
897/// Store multiple single-element structures from one, two, three, or four registers.
898#[inline]
899#[target_feature(enable = "neon")]
900#[cfg_attr(test, assert_instr(str))]
901#[allow(clippy::cast_ptr_alignment)]
902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
903pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
904    write_unaligned(ptr.cast(), a);
905}
906
907/// Store multiple single-element structures from one, two, three, or four registers.
908#[inline]
909#[target_feature(enable = "neon")]
910#[cfg_attr(test, assert_instr(str))]
911#[allow(clippy::cast_ptr_alignment)]
912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
913pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
914    write_unaligned(ptr.cast(), a);
915}
916
917/// Store multiple single-element structures from one, two, three, or four registers.
918#[inline]
919#[target_feature(enable = "neon")]
920#[cfg_attr(test, assert_instr(str))]
921#[allow(clippy::cast_ptr_alignment)]
922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
923pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
924    write_unaligned(ptr.cast(), a);
925}
926
927/// Store multiple single-element structures from one, two, three, or four registers.
928#[inline]
929#[target_feature(enable = "neon")]
930#[cfg_attr(test, assert_instr(str))]
931#[allow(clippy::cast_ptr_alignment)]
932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
933pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
934    write_unaligned(ptr.cast(), a);
935}
936
937/// Store multiple single-element structures from one, two, three, or four registers.
938#[inline]
939#[target_feature(enable = "neon")]
940#[cfg_attr(test, assert_instr(str))]
941#[allow(clippy::cast_ptr_alignment)]
942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
943pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
944    write_unaligned(ptr.cast(), a);
945}
946
947/// Store multiple single-element structures from one, two, three, or four registers.
948#[inline]
949#[target_feature(enable = "neon")]
950#[cfg_attr(test, assert_instr(str))]
951#[allow(clippy::cast_ptr_alignment)]
952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
953pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
954    write_unaligned(ptr.cast(), a);
955}
956
957/// Store multiple single-element structures from one, two, three, or four registers.
958#[inline]
959#[target_feature(enable = "neon")]
960#[cfg_attr(test, assert_instr(str))]
961#[allow(clippy::cast_ptr_alignment)]
962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
963pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
964    write_unaligned(ptr.cast(), a);
965}
966
967// Store multiple single-element structures from one, two, three, or four registers.
968///
969/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)
970#[inline]
971#[target_feature(enable = "neon,aes")]
972#[cfg_attr(test, assert_instr(str))]
973#[allow(clippy::cast_ptr_alignment)]
974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
975pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
976    write_unaligned(ptr.cast(), a);
977}
978
979// Store multiple single-element structures from one, two, three, or four registers.
980///
981/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)
982#[inline]
983#[target_feature(enable = "neon,aes")]
984#[cfg_attr(test, assert_instr(str))]
985#[allow(clippy::cast_ptr_alignment)]
986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
987pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
988    write_unaligned(ptr.cast(), a);
989}
990
991// Store multiple single-element structures from one, two, three, or four registers.
992#[inline]
993#[target_feature(enable = "neon")]
994#[cfg_attr(test, assert_instr(str))]
995#[allow(clippy::cast_ptr_alignment)]
996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
997pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
998    write_unaligned(ptr.cast(), a);
999}
1000
1001// Store multiple single-element structures from one, two, three, or four registers.
1002#[inline]
1003#[target_feature(enable = "neon")]
1004#[cfg_attr(test, assert_instr(str))]
1005#[allow(clippy::cast_ptr_alignment)]
1006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1007pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
1008    write_unaligned(ptr.cast(), a);
1009}
1010
1011// Store multiple single-element structures from one, two, three, or four registers.
1012#[inline]
1013#[target_feature(enable = "neon")]
1014#[cfg_attr(test, assert_instr(str))]
1015#[allow(clippy::cast_ptr_alignment)]
1016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1017pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
1018    write_unaligned(ptr.cast(), a);
1019}
1020
1021// Store multiple single-element structures from one, two, three, or four registers.
1022#[inline]
1023#[target_feature(enable = "neon")]
1024#[cfg_attr(test, assert_instr(str))]
1025#[allow(clippy::cast_ptr_alignment)]
1026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1027pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
1028    write_unaligned(ptr.cast(), a);
1029}
1030
1031/// Absolute Value (wrapping).
1032#[inline]
1033#[target_feature(enable = "neon")]
1034#[cfg_attr(test, assert_instr(abs))]
1035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1036pub unsafe fn vabsd_s64(a: i64) -> i64 {
1037    vabsd_s64_(a)
1038}
1039/// Absolute Value (wrapping).
1040#[inline]
1041#[target_feature(enable = "neon")]
1042#[cfg_attr(test, assert_instr(abs))]
1043#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1044pub unsafe fn vabs_s64(a: int64x1_t) -> int64x1_t {
1045    vabs_s64_(a)
1046}
1047/// Absolute Value (wrapping).
1048#[inline]
1049#[target_feature(enable = "neon")]
1050#[cfg_attr(test, assert_instr(abs))]
1051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1052pub unsafe fn vabsq_s64(a: int64x2_t) -> int64x2_t {
1053    vabsq_s64_(a)
1054}
1055
1056/// Bitwise Select instructions. This instruction sets each bit in the destination SIMD&FP register
1057/// to the corresponding bit from the first source SIMD&FP register when the original
1058/// destination bit was 1, otherwise from the second source SIMD&FP register.
1059#[inline]
1060#[target_feature(enable = "neon")]
1061#[cfg_attr(test, assert_instr(bsl))]
1062#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1063pub unsafe fn vbsl_f64(a: uint64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
1064    let not = int64x1_t::splat(-1);
1065    transmute(simd_or(
1066        simd_and(a, transmute(b)),
1067        simd_and(simd_xor(a, transmute(not)), transmute(c)),
1068    ))
1069}
1070/// Bitwise Select.
1071#[inline]
1072#[target_feature(enable = "neon")]
1073#[cfg_attr(test, assert_instr(bsl))]
1074#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1075pub unsafe fn vbsl_p64(a: poly64x1_t, b: poly64x1_t, c: poly64x1_t) -> poly64x1_t {
1076    let not = int64x1_t::splat(-1);
1077    simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c))
1078}
1079/// Bitwise Select. (128-bit)
1080#[inline]
1081#[target_feature(enable = "neon")]
1082#[cfg_attr(test, assert_instr(bsl))]
1083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1084pub unsafe fn vbslq_f64(a: uint64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
1085    let not = int64x2_t::splat(-1);
1086    transmute(simd_or(
1087        simd_and(a, transmute(b)),
1088        simd_and(simd_xor(a, transmute(not)), transmute(c)),
1089    ))
1090}
1091/// Bitwise Select. (128-bit)
1092#[inline]
1093#[target_feature(enable = "neon")]
1094#[cfg_attr(test, assert_instr(bsl))]
1095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1096pub unsafe fn vbslq_p64(a: poly64x2_t, b: poly64x2_t, c: poly64x2_t) -> poly64x2_t {
1097    let not = int64x2_t::splat(-1);
1098    simd_or(simd_and(a, b), simd_and(simd_xor(a, transmute(not)), c))
1099}
1100
1101/// Signed saturating Accumulate of Unsigned value.
1102#[inline]
1103#[target_feature(enable = "neon")]
1104#[cfg_attr(test, assert_instr(suqadd))]
1105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1106pub unsafe fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
1107    vuqadd_s8_(a, b)
1108}
1109/// Signed saturating Accumulate of Unsigned value.
1110#[inline]
1111#[target_feature(enable = "neon")]
1112#[cfg_attr(test, assert_instr(suqadd))]
1113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1114pub unsafe fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
1115    vuqaddq_s8_(a, b)
1116}
1117/// Signed saturating Accumulate of Unsigned value.
1118#[inline]
1119#[target_feature(enable = "neon")]
1120#[cfg_attr(test, assert_instr(suqadd))]
1121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1122pub unsafe fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
1123    vuqadd_s16_(a, b)
1124}
1125/// Signed saturating Accumulate of Unsigned value.
1126#[inline]
1127#[target_feature(enable = "neon")]
1128#[cfg_attr(test, assert_instr(suqadd))]
1129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1130pub unsafe fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
1131    vuqaddq_s16_(a, b)
1132}
1133/// Signed saturating Accumulate of Unsigned value.
1134#[inline]
1135#[target_feature(enable = "neon")]
1136#[cfg_attr(test, assert_instr(suqadd))]
1137#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1138pub unsafe fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
1139    vuqadd_s32_(a, b)
1140}
1141/// Signed saturating Accumulate of Unsigned value.
1142#[inline]
1143#[target_feature(enable = "neon")]
1144#[cfg_attr(test, assert_instr(suqadd))]
1145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1146pub unsafe fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
1147    vuqaddq_s32_(a, b)
1148}
1149/// Signed saturating Accumulate of Unsigned value.
1150#[inline]
1151#[target_feature(enable = "neon")]
1152#[cfg_attr(test, assert_instr(suqadd))]
1153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1154pub unsafe fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
1155    vuqadd_s64_(a, b)
1156}
1157/// Signed saturating Accumulate of Unsigned value.
1158#[inline]
1159#[target_feature(enable = "neon")]
1160#[cfg_attr(test, assert_instr(suqadd))]
1161#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1162pub unsafe fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
1163    vuqaddq_s64_(a, b)
1164}
1165
1166/// Unsigned saturating Accumulate of Signed value.
1167#[inline]
1168#[target_feature(enable = "neon")]
1169#[cfg_attr(test, assert_instr(usqadd))]
1170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1171pub unsafe fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
1172    vsqadd_u8_(a, b)
1173}
1174/// Unsigned saturating Accumulate of Signed value.
1175#[inline]
1176#[target_feature(enable = "neon")]
1177#[cfg_attr(test, assert_instr(usqadd))]
1178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1179pub unsafe fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
1180    vsqaddq_u8_(a, b)
1181}
1182/// Unsigned saturating Accumulate of Signed value.
1183#[inline]
1184#[target_feature(enable = "neon")]
1185#[cfg_attr(test, assert_instr(usqadd))]
1186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1187pub unsafe fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
1188    vsqadd_u16_(a, b)
1189}
1190/// Unsigned saturating Accumulate of Signed value.
1191#[inline]
1192#[target_feature(enable = "neon")]
1193#[cfg_attr(test, assert_instr(usqadd))]
1194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1195pub unsafe fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
1196    vsqaddq_u16_(a, b)
1197}
1198/// Unsigned saturating Accumulate of Signed value.
1199#[inline]
1200#[target_feature(enable = "neon")]
1201#[cfg_attr(test, assert_instr(usqadd))]
1202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1203pub unsafe fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
1204    vsqadd_u32_(a, b)
1205}
1206/// Unsigned saturating Accumulate of Signed value.
1207#[inline]
1208#[target_feature(enable = "neon")]
1209#[cfg_attr(test, assert_instr(usqadd))]
1210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1211pub unsafe fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
1212    vsqaddq_u32_(a, b)
1213}
1214/// Unsigned saturating Accumulate of Signed value.
1215#[inline]
1216#[target_feature(enable = "neon")]
1217#[cfg_attr(test, assert_instr(usqadd))]
1218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1219pub unsafe fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
1220    vsqadd_u64_(a, b)
1221}
1222/// Unsigned saturating Accumulate of Signed value.
1223#[inline]
1224#[target_feature(enable = "neon")]
1225#[cfg_attr(test, assert_instr(usqadd))]
1226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1227pub unsafe fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
1228    vsqaddq_u64_(a, b)
1229}
1230
1231/// Add pairwise
1232#[inline]
1233#[target_feature(enable = "neon")]
1234#[cfg_attr(test, assert_instr(addp))]
1235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1236pub unsafe fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
1237    vpaddq_s16_(a, b)
1238}
1239/// Add pairwise
1240#[inline]
1241#[target_feature(enable = "neon")]
1242#[cfg_attr(test, assert_instr(addp))]
1243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1244pub unsafe fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
1245    transmute(vpaddq_s16_(transmute(a), transmute(b)))
1246}
1247/// Add pairwise
1248#[inline]
1249#[target_feature(enable = "neon")]
1250#[cfg_attr(test, assert_instr(addp))]
1251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1252pub unsafe fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
1253    vpaddq_s32_(a, b)
1254}
1255/// Add pairwise
1256#[inline]
1257#[target_feature(enable = "neon")]
1258#[cfg_attr(test, assert_instr(addp))]
1259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1260pub unsafe fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
1261    transmute(vpaddq_s32_(transmute(a), transmute(b)))
1262}
1263/// Add pairwise
1264#[inline]
1265#[target_feature(enable = "neon")]
1266#[cfg_attr(test, assert_instr(addp))]
1267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1268pub unsafe fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
1269    vpaddq_s64_(a, b)
1270}
1271/// Add pairwise
1272#[inline]
1273#[target_feature(enable = "neon")]
1274#[cfg_attr(test, assert_instr(addp))]
1275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1276pub unsafe fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1277    transmute(vpaddq_s64_(transmute(a), transmute(b)))
1278}
1279/// Add pairwise
1280#[inline]
1281#[target_feature(enable = "neon")]
1282#[cfg_attr(test, assert_instr(addp))]
1283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1284pub unsafe fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
1285    vpaddq_s8_(a, b)
1286}
1287/// Add pairwise
1288#[inline]
1289#[target_feature(enable = "neon")]
1290#[cfg_attr(test, assert_instr(addp))]
1291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1292pub unsafe fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
1293    transmute(vpaddq_s8_(transmute(a), transmute(b)))
1294}
1295/// Add pairwise
1296#[inline]
1297#[target_feature(enable = "neon")]
1298#[cfg_attr(test, assert_instr(addp))]
1299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1300pub unsafe fn vpaddd_s64(a: int64x2_t) -> i64 {
1301    transmute(vaddvq_u64_(transmute(a)))
1302}
1303/// Add pairwise
1304#[inline]
1305#[target_feature(enable = "neon")]
1306#[cfg_attr(test, assert_instr(addp))]
1307#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1308pub unsafe fn vpaddd_u64(a: uint64x2_t) -> u64 {
1309    vaddvq_u64_(a)
1310}
1311
1312/// Add across vector
1313#[inline]
1314#[target_feature(enable = "neon")]
1315#[cfg_attr(test, assert_instr(addv))]
1316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1317pub unsafe fn vaddv_s16(a: int16x4_t) -> i16 {
1318    vaddv_s16_(a)
1319}
1320
1321/// Add across vector
1322#[inline]
1323#[target_feature(enable = "neon")]
1324#[cfg_attr(test, assert_instr(addp))]
1325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1326pub unsafe fn vaddv_s32(a: int32x2_t) -> i32 {
1327    vaddv_s32_(a)
1328}
1329
1330/// Add across vector
1331#[inline]
1332#[target_feature(enable = "neon")]
1333#[cfg_attr(test, assert_instr(addv))]
1334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1335pub unsafe fn vaddv_s8(a: int8x8_t) -> i8 {
1336    vaddv_s8_(a)
1337}
1338
1339/// Add across vector
1340#[inline]
1341#[target_feature(enable = "neon")]
1342#[cfg_attr(test, assert_instr(addv))]
1343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1344pub unsafe fn vaddv_u16(a: uint16x4_t) -> u16 {
1345    vaddv_u16_(a)
1346}
1347
1348/// Add across vector
1349#[inline]
1350#[target_feature(enable = "neon")]
1351#[cfg_attr(test, assert_instr(addp))]
1352#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1353pub unsafe fn vaddv_u32(a: uint32x2_t) -> u32 {
1354    vaddv_u32_(a)
1355}
1356
1357/// Add across vector
1358#[inline]
1359#[target_feature(enable = "neon")]
1360#[cfg_attr(test, assert_instr(addv))]
1361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1362pub unsafe fn vaddv_u8(a: uint8x8_t) -> u8 {
1363    vaddv_u8_(a)
1364}
1365
1366/// Add across vector
1367#[inline]
1368#[target_feature(enable = "neon")]
1369#[cfg_attr(test, assert_instr(addv))]
1370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1371pub unsafe fn vaddvq_s16(a: int16x8_t) -> i16 {
1372    vaddvq_s16_(a)
1373}
1374
1375/// Add across vector
1376#[inline]
1377#[target_feature(enable = "neon")]
1378#[cfg_attr(test, assert_instr(addv))]
1379#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1380pub unsafe fn vaddvq_s32(a: int32x4_t) -> i32 {
1381    vaddvq_s32_(a)
1382}
1383
1384/// Add across vector
1385#[inline]
1386#[target_feature(enable = "neon")]
1387#[cfg_attr(test, assert_instr(addv))]
1388#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1389pub unsafe fn vaddvq_s8(a: int8x16_t) -> i8 {
1390    vaddvq_s8_(a)
1391}
1392
1393/// Add across vector
1394#[inline]
1395#[target_feature(enable = "neon")]
1396#[cfg_attr(test, assert_instr(addv))]
1397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1398pub unsafe fn vaddvq_u16(a: uint16x8_t) -> u16 {
1399    vaddvq_u16_(a)
1400}
1401
1402/// Add across vector
1403#[inline]
1404#[target_feature(enable = "neon")]
1405#[cfg_attr(test, assert_instr(addv))]
1406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1407pub unsafe fn vaddvq_u32(a: uint32x4_t) -> u32 {
1408    vaddvq_u32_(a)
1409}
1410
1411/// Add across vector
1412#[inline]
1413#[target_feature(enable = "neon")]
1414#[cfg_attr(test, assert_instr(addv))]
1415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1416pub unsafe fn vaddvq_u8(a: uint8x16_t) -> u8 {
1417    vaddvq_u8_(a)
1418}
1419
1420/// Add across vector
1421#[inline]
1422#[target_feature(enable = "neon")]
1423#[cfg_attr(test, assert_instr(addp))]
1424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1425pub unsafe fn vaddvq_s64(a: int64x2_t) -> i64 {
1426    vaddvq_s64_(a)
1427}
1428
1429/// Add across vector
1430#[inline]
1431#[target_feature(enable = "neon")]
1432#[cfg_attr(test, assert_instr(addp))]
1433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1434pub unsafe fn vaddvq_u64(a: uint64x2_t) -> u64 {
1435    vaddvq_u64_(a)
1436}
1437
1438/// Signed Add Long across Vector
1439#[inline]
1440#[target_feature(enable = "neon")]
1441#[cfg_attr(test, assert_instr(saddlv))]
1442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1443pub unsafe fn vaddlv_s8(a: int8x8_t) -> i16 {
1444    vaddlv_s8_(a) as i16
1445}
1446
1447/// Signed Add Long across Vector
1448#[inline]
1449#[target_feature(enable = "neon")]
1450#[cfg_attr(test, assert_instr(saddlv))]
1451#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1452pub unsafe fn vaddlvq_s8(a: int8x16_t) -> i16 {
1453    vaddlvq_s8_(a) as i16
1454}
1455
1456/// Unsigned Add Long across Vector
1457#[inline]
1458#[target_feature(enable = "neon")]
1459#[cfg_attr(test, assert_instr(uaddlv))]
1460#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1461pub unsafe fn vaddlv_u8(a: uint8x8_t) -> u16 {
1462    vaddlv_u8_(a) as u16
1463}
1464
1465/// Unsigned Add Long across Vector
1466#[inline]
1467#[target_feature(enable = "neon")]
1468#[cfg_attr(test, assert_instr(uaddlv))]
1469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1470pub unsafe fn vaddlvq_u8(a: uint8x16_t) -> u16 {
1471    vaddlvq_u8_(a) as u16
1472}
1473
1474/// Vector add.
1475#[inline]
1476#[target_feature(enable = "neon")]
1477#[cfg_attr(test, assert_instr(fadd))]
1478#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1479pub unsafe fn vadd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
1480    simd_add(a, b)
1481}
1482
1483/// Vector add.
1484#[inline]
1485#[target_feature(enable = "neon")]
1486#[cfg_attr(test, assert_instr(fadd))]
1487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1488pub unsafe fn vaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1489    simd_add(a, b)
1490}
1491
1492/// Vector add.
1493#[inline]
1494#[target_feature(enable = "neon")]
1495#[cfg_attr(test, assert_instr(add))]
1496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1497pub unsafe fn vadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
1498    simd_add(a, b)
1499}
1500
1501/// Vector add.
1502#[inline]
1503#[target_feature(enable = "neon")]
1504#[cfg_attr(test, assert_instr(add))]
1505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1506pub unsafe fn vadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1507    simd_add(a, b)
1508}
1509
1510/// Vector add.
1511#[inline]
1512#[target_feature(enable = "neon")]
1513#[cfg_attr(test, assert_instr(add))]
1514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1515pub unsafe fn vaddd_s64(a: i64, b: i64) -> i64 {
1516    a.wrapping_add(b)
1517}
1518
1519/// Vector add.
1520#[inline]
1521#[target_feature(enable = "neon")]
1522#[cfg_attr(test, assert_instr(add))]
1523#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1524pub unsafe fn vaddd_u64(a: u64, b: u64) -> u64 {
1525    a.wrapping_add(b)
1526}
1527
1528/// Horizontal vector max.
1529#[inline]
1530#[target_feature(enable = "neon")]
1531#[cfg_attr(test, assert_instr(smaxv))]
1532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1533pub unsafe fn vmaxv_s8(a: int8x8_t) -> i8 {
1534    vmaxv_s8_(a)
1535}
1536
1537/// Horizontal vector max.
1538#[inline]
1539#[target_feature(enable = "neon")]
1540#[cfg_attr(test, assert_instr(smaxv))]
1541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1542pub unsafe fn vmaxvq_s8(a: int8x16_t) -> i8 {
1543    vmaxvq_s8_(a)
1544}
1545
1546/// Horizontal vector max.
1547#[inline]
1548#[target_feature(enable = "neon")]
1549#[cfg_attr(test, assert_instr(smaxv))]
1550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1551pub unsafe fn vmaxv_s16(a: int16x4_t) -> i16 {
1552    vmaxv_s16_(a)
1553}
1554
1555/// Horizontal vector max.
1556#[inline]
1557#[target_feature(enable = "neon")]
1558#[cfg_attr(test, assert_instr(smaxv))]
1559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1560pub unsafe fn vmaxvq_s16(a: int16x8_t) -> i16 {
1561    vmaxvq_s16_(a)
1562}
1563
1564/// Horizontal vector max.
1565#[inline]
1566#[target_feature(enable = "neon")]
1567#[cfg_attr(test, assert_instr(smaxp))]
1568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1569pub unsafe fn vmaxv_s32(a: int32x2_t) -> i32 {
1570    vmaxv_s32_(a)
1571}
1572
1573/// Horizontal vector max.
1574#[inline]
1575#[target_feature(enable = "neon")]
1576#[cfg_attr(test, assert_instr(smaxv))]
1577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1578pub unsafe fn vmaxvq_s32(a: int32x4_t) -> i32 {
1579    vmaxvq_s32_(a)
1580}
1581
1582/// Horizontal vector max.
1583#[inline]
1584#[target_feature(enable = "neon")]
1585#[cfg_attr(test, assert_instr(umaxv))]
1586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1587pub unsafe fn vmaxv_u8(a: uint8x8_t) -> u8 {
1588    vmaxv_u8_(a)
1589}
1590
1591/// Horizontal vector max.
1592#[inline]
1593#[target_feature(enable = "neon")]
1594#[cfg_attr(test, assert_instr(umaxv))]
1595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1596pub unsafe fn vmaxvq_u8(a: uint8x16_t) -> u8 {
1597    vmaxvq_u8_(a)
1598}
1599
1600/// Horizontal vector max.
1601#[inline]
1602#[target_feature(enable = "neon")]
1603#[cfg_attr(test, assert_instr(umaxv))]
1604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1605pub unsafe fn vmaxv_u16(a: uint16x4_t) -> u16 {
1606    vmaxv_u16_(a)
1607}
1608
1609/// Horizontal vector max.
1610#[inline]
1611#[target_feature(enable = "neon")]
1612#[cfg_attr(test, assert_instr(umaxv))]
1613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1614pub unsafe fn vmaxvq_u16(a: uint16x8_t) -> u16 {
1615    vmaxvq_u16_(a)
1616}
1617
1618/// Horizontal vector max.
1619#[inline]
1620#[target_feature(enable = "neon")]
1621#[cfg_attr(test, assert_instr(umaxp))]
1622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1623pub unsafe fn vmaxv_u32(a: uint32x2_t) -> u32 {
1624    vmaxv_u32_(a)
1625}
1626
1627/// Horizontal vector max.
1628#[inline]
1629#[target_feature(enable = "neon")]
1630#[cfg_attr(test, assert_instr(umaxv))]
1631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1632pub unsafe fn vmaxvq_u32(a: uint32x4_t) -> u32 {
1633    vmaxvq_u32_(a)
1634}
1635
1636/// Horizontal vector max.
1637#[inline]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(fmaxp))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub unsafe fn vmaxv_f32(a: float32x2_t) -> f32 {
1642    vmaxv_f32_(a)
1643}
1644
1645/// Horizontal vector max.
1646#[inline]
1647#[target_feature(enable = "neon")]
1648#[cfg_attr(test, assert_instr(fmaxv))]
1649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1650pub unsafe fn vmaxvq_f32(a: float32x4_t) -> f32 {
1651    vmaxvq_f32_(a)
1652}
1653
1654/// Horizontal vector max.
1655#[inline]
1656#[target_feature(enable = "neon")]
1657#[cfg_attr(test, assert_instr(fmaxp))]
1658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1659pub unsafe fn vmaxvq_f64(a: float64x2_t) -> f64 {
1660    vmaxvq_f64_(a)
1661}
1662
1663/// Horizontal vector min.
1664#[inline]
1665#[target_feature(enable = "neon")]
1666#[cfg_attr(test, assert_instr(sminv))]
1667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1668pub unsafe fn vminv_s8(a: int8x8_t) -> i8 {
1669    vminv_s8_(a)
1670}
1671
1672/// Horizontal vector min.
1673#[inline]
1674#[target_feature(enable = "neon")]
1675#[cfg_attr(test, assert_instr(sminv))]
1676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1677pub unsafe fn vminvq_s8(a: int8x16_t) -> i8 {
1678    vminvq_s8_(a)
1679}
1680
1681/// Horizontal vector min.
1682#[inline]
1683#[target_feature(enable = "neon")]
1684#[cfg_attr(test, assert_instr(sminv))]
1685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1686pub unsafe fn vminv_s16(a: int16x4_t) -> i16 {
1687    vminv_s16_(a)
1688}
1689
1690/// Horizontal vector min.
1691#[inline]
1692#[target_feature(enable = "neon")]
1693#[cfg_attr(test, assert_instr(sminv))]
1694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1695pub unsafe fn vminvq_s16(a: int16x8_t) -> i16 {
1696    vminvq_s16_(a)
1697}
1698
1699/// Horizontal vector min.
1700#[inline]
1701#[target_feature(enable = "neon")]
1702#[cfg_attr(test, assert_instr(sminp))]
1703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1704pub unsafe fn vminv_s32(a: int32x2_t) -> i32 {
1705    vminv_s32_(a)
1706}
1707
1708/// Horizontal vector min.
1709#[inline]
1710#[target_feature(enable = "neon")]
1711#[cfg_attr(test, assert_instr(sminv))]
1712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1713pub unsafe fn vminvq_s32(a: int32x4_t) -> i32 {
1714    vminvq_s32_(a)
1715}
1716
1717/// Horizontal vector min.
1718#[inline]
1719#[target_feature(enable = "neon")]
1720#[cfg_attr(test, assert_instr(uminv))]
1721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1722pub unsafe fn vminv_u8(a: uint8x8_t) -> u8 {
1723    vminv_u8_(a)
1724}
1725
1726/// Horizontal vector min.
1727#[inline]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(uminv))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub unsafe fn vminvq_u8(a: uint8x16_t) -> u8 {
1732    vminvq_u8_(a)
1733}
1734
1735/// Horizontal vector min.
1736#[inline]
1737#[target_feature(enable = "neon")]
1738#[cfg_attr(test, assert_instr(uminv))]
1739#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1740pub unsafe fn vminv_u16(a: uint16x4_t) -> u16 {
1741    vminv_u16_(a)
1742}
1743
1744/// Horizontal vector min.
1745#[inline]
1746#[target_feature(enable = "neon")]
1747#[cfg_attr(test, assert_instr(uminv))]
1748#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1749pub unsafe fn vminvq_u16(a: uint16x8_t) -> u16 {
1750    vminvq_u16_(a)
1751}
1752
1753/// Horizontal vector min.
1754#[inline]
1755#[target_feature(enable = "neon")]
1756#[cfg_attr(test, assert_instr(uminp))]
1757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1758pub unsafe fn vminv_u32(a: uint32x2_t) -> u32 {
1759    vminv_u32_(a)
1760}
1761
1762/// Horizontal vector min.
1763#[inline]
1764#[target_feature(enable = "neon")]
1765#[cfg_attr(test, assert_instr(uminv))]
1766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1767pub unsafe fn vminvq_u32(a: uint32x4_t) -> u32 {
1768    vminvq_u32_(a)
1769}
1770
1771/// Horizontal vector min.
1772#[inline]
1773#[target_feature(enable = "neon")]
1774#[cfg_attr(test, assert_instr(fminp))]
1775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1776pub unsafe fn vminv_f32(a: float32x2_t) -> f32 {
1777    vminv_f32_(a)
1778}
1779
1780/// Horizontal vector min.
1781#[inline]
1782#[target_feature(enable = "neon")]
1783#[cfg_attr(test, assert_instr(fminv))]
1784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1785pub unsafe fn vminvq_f32(a: float32x4_t) -> f32 {
1786    vminvq_f32_(a)
1787}
1788
1789/// Horizontal vector min.
1790#[inline]
1791#[target_feature(enable = "neon")]
1792#[cfg_attr(test, assert_instr(fminp))]
1793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1794pub unsafe fn vminvq_f64(a: float64x2_t) -> f64 {
1795    vminvq_f64_(a)
1796}
1797
1798/// Folding minimum of adjacent pairs
1799#[inline]
1800#[target_feature(enable = "neon")]
1801#[cfg_attr(test, assert_instr(sminp))]
1802#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1803pub unsafe fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
1804    vpminq_s8_(a, b)
1805}
1806
1807/// Folding minimum of adjacent pairs
1808#[inline]
1809#[target_feature(enable = "neon")]
1810#[cfg_attr(test, assert_instr(sminp))]
1811#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1812pub unsafe fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
1813    vpminq_s16_(a, b)
1814}
1815
1816/// Folding minimum of adjacent pairs
1817#[inline]
1818#[target_feature(enable = "neon")]
1819#[cfg_attr(test, assert_instr(sminp))]
1820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1821pub unsafe fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
1822    vpminq_s32_(a, b)
1823}
1824
1825/// Folding minimum of adjacent pairs
1826#[inline]
1827#[target_feature(enable = "neon")]
1828#[cfg_attr(test, assert_instr(uminp))]
1829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1830pub unsafe fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
1831    vpminq_u8_(a, b)
1832}
1833
1834/// Folding minimum of adjacent pairs
1835#[inline]
1836#[target_feature(enable = "neon")]
1837#[cfg_attr(test, assert_instr(uminp))]
1838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1839pub unsafe fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
1840    vpminq_u16_(a, b)
1841}
1842
1843/// Folding minimum of adjacent pairs
1844#[inline]
1845#[target_feature(enable = "neon")]
1846#[cfg_attr(test, assert_instr(uminp))]
1847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1848pub unsafe fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
1849    vpminq_u32_(a, b)
1850}
1851
1852/// Folding minimum of adjacent pairs
1853#[inline]
1854#[target_feature(enable = "neon")]
1855#[cfg_attr(test, assert_instr(fminp))]
1856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1857pub unsafe fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1858    vpminq_f32_(a, b)
1859}
1860
1861/// Folding minimum of adjacent pairs
1862#[inline]
1863#[target_feature(enable = "neon")]
1864#[cfg_attr(test, assert_instr(fminp))]
1865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1866pub unsafe fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1867    vpminq_f64_(a, b)
1868}
1869
1870/// Folding maximum of adjacent pairs
1871#[inline]
1872#[target_feature(enable = "neon")]
1873#[cfg_attr(test, assert_instr(smaxp))]
1874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1875pub unsafe fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
1876    vpmaxq_s8_(a, b)
1877}
1878
1879/// Folding maximum of adjacent pairs
1880#[inline]
1881#[target_feature(enable = "neon")]
1882#[cfg_attr(test, assert_instr(smaxp))]
1883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1884pub unsafe fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
1885    vpmaxq_s16_(a, b)
1886}
1887
1888/// Folding maximum of adjacent pairs
1889#[inline]
1890#[target_feature(enable = "neon")]
1891#[cfg_attr(test, assert_instr(smaxp))]
1892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1893pub unsafe fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
1894    vpmaxq_s32_(a, b)
1895}
1896
1897/// Folding maximum of adjacent pairs
1898#[inline]
1899#[target_feature(enable = "neon")]
1900#[cfg_attr(test, assert_instr(umaxp))]
1901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1902pub unsafe fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
1903    vpmaxq_u8_(a, b)
1904}
1905
1906/// Folding maximum of adjacent pairs
1907#[inline]
1908#[target_feature(enable = "neon")]
1909#[cfg_attr(test, assert_instr(umaxp))]
1910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1911pub unsafe fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
1912    vpmaxq_u16_(a, b)
1913}
1914
1915/// Folding maximum of adjacent pairs
1916#[inline]
1917#[target_feature(enable = "neon")]
1918#[cfg_attr(test, assert_instr(umaxp))]
1919#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1920pub unsafe fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
1921    vpmaxq_u32_(a, b)
1922}
1923
1924/// Folding maximum of adjacent pairs
1925#[inline]
1926#[target_feature(enable = "neon")]
1927#[cfg_attr(test, assert_instr(fmaxp))]
1928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1929pub unsafe fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1930    vpmaxq_f32_(a, b)
1931}
1932
1933/// Folding maximum of adjacent pairs
1934#[inline]
1935#[target_feature(enable = "neon")]
1936#[cfg_attr(test, assert_instr(fmaxp))]
1937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1938pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1939    vpmaxq_f64_(a, b)
1940}
1941
1942/// Extract vector from pair of vectors
1943#[inline]
1944#[target_feature(enable = "neon")]
1945#[cfg_attr(test, assert_instr(nop, N = 0))]
1946#[rustc_legacy_const_generics(2)]
1947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1948pub unsafe fn vext_p64<const N: i32>(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t {
1949    static_assert!(N == 0);
1950    a
1951}
1952
1953/// Extract vector from pair of vectors
1954#[inline]
1955#[target_feature(enable = "neon")]
1956#[cfg_attr(test, assert_instr(nop, N = 0))]
1957#[rustc_legacy_const_generics(2)]
1958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1959pub unsafe fn vext_f64<const N: i32>(a: float64x1_t, _b: float64x1_t) -> float64x1_t {
1960    static_assert!(N == 0);
1961    a
1962}
1963
1964/// Duplicate vector element to vector or scalar
1965#[inline]
1966#[target_feature(enable = "neon")]
1967#[cfg_attr(test, assert_instr(fmov))]
1968#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1969pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t {
1970    transmute(u64x1::new(value))
1971}
1972
1973/// Duplicate vector element to vector or scalar
1974#[inline]
1975#[target_feature(enable = "neon")]
1976#[cfg_attr(test, assert_instr(nop))]
1977#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1978pub unsafe fn vdup_n_f64(value: f64) -> float64x1_t {
1979    float64x1_t::splat(value)
1980}
1981
1982/// Duplicate vector element to vector or scalar
1983#[inline]
1984#[target_feature(enable = "neon")]
1985#[cfg_attr(test, assert_instr(dup))]
1986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1987pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t {
1988    transmute(u64x2::new(value, value))
1989}
1990
1991/// Duplicate vector element to vector or scalar
1992#[inline]
1993#[target_feature(enable = "neon")]
1994#[cfg_attr(test, assert_instr(dup))]
1995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1996pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t {
1997    float64x2_t::splat(value)
1998}
1999
2000/// Duplicate vector element to vector or scalar
2001#[inline]
2002#[target_feature(enable = "neon")]
2003#[cfg_attr(test, assert_instr(fmov))]
2004#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2005pub unsafe fn vmov_n_p64(value: p64) -> poly64x1_t {
2006    vdup_n_p64(value)
2007}
2008
2009/// Duplicate vector element to vector or scalar
2010#[inline]
2011#[target_feature(enable = "neon")]
2012#[cfg_attr(test, assert_instr(nop))]
2013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2014pub unsafe fn vmov_n_f64(value: f64) -> float64x1_t {
2015    vdup_n_f64(value)
2016}
2017
2018/// Duplicate vector element to vector or scalar
2019#[inline]
2020#[target_feature(enable = "neon")]
2021#[cfg_attr(test, assert_instr(dup))]
2022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2023pub unsafe fn vmovq_n_p64(value: p64) -> poly64x2_t {
2024    vdupq_n_p64(value)
2025}
2026
2027/// Duplicate vector element to vector or scalar
2028#[inline]
2029#[target_feature(enable = "neon")]
2030#[cfg_attr(test, assert_instr(dup))]
2031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2032pub unsafe fn vmovq_n_f64(value: f64) -> float64x2_t {
2033    vdupq_n_f64(value)
2034}
2035
2036/// Duplicate vector element to vector or scalar
2037#[inline]
2038#[target_feature(enable = "neon")]
2039#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(mov))]
2040#[cfg_attr(all(test, target_env = "msvc"), assert_instr(dup))]
2041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2042pub unsafe fn vget_high_f64(a: float64x2_t) -> float64x1_t {
2043    float64x1_t([simd_extract!(a, 1)])
2044}
2045
2046/// Duplicate vector element to vector or scalar
2047#[inline]
2048#[target_feature(enable = "neon")]
2049#[cfg_attr(test, assert_instr(ext))]
2050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2051pub unsafe fn vget_high_p64(a: poly64x2_t) -> poly64x1_t {
2052    transmute(u64x1::new(simd_extract!(a, 1)))
2053}
2054
2055/// Duplicate vector element to vector or scalar
2056#[inline]
2057#[target_feature(enable = "neon")]
2058#[cfg_attr(test, assert_instr(nop))]
2059#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2060pub unsafe fn vget_low_f64(a: float64x2_t) -> float64x1_t {
2061    float64x1_t([simd_extract!(a, 0)])
2062}
2063
2064/// Duplicate vector element to vector or scalar
2065#[inline]
2066#[target_feature(enable = "neon")]
2067#[cfg_attr(test, assert_instr(nop))]
2068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2069pub unsafe fn vget_low_p64(a: poly64x2_t) -> poly64x1_t {
2070    transmute(u64x1::new(simd_extract!(a, 0)))
2071}
2072
2073/// Duplicate vector element to vector or scalar
2074#[inline]
2075#[target_feature(enable = "neon")]
2076#[rustc_legacy_const_generics(1)]
2077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2078#[cfg_attr(
2079    all(test, any(target_arch = "aarch64", target_arch = "arm64ec")),
2080    assert_instr(nop, IMM5 = 0)
2081)]
2082pub unsafe fn vget_lane_f64<const IMM5: i32>(v: float64x1_t) -> f64 {
2083    static_assert!(IMM5 == 0);
2084    simd_extract!(v, IMM5 as u32)
2085}
2086
2087/// Duplicate vector element to vector or scalar
2088#[inline]
2089#[target_feature(enable = "neon")]
2090#[rustc_legacy_const_generics(1)]
2091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2092#[cfg_attr(
2093    all(test, any(target_arch = "aarch64", target_arch = "arm64ec")),
2094    assert_instr(nop, IMM5 = 0)
2095)]
2096pub unsafe fn vgetq_lane_f64<const IMM5: i32>(v: float64x2_t) -> f64 {
2097    static_assert_uimm_bits!(IMM5, 1);
2098    simd_extract!(v, IMM5 as u32)
2099}
2100
2101/// Vector combine
2102#[inline]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(mov))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub unsafe fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t {
2107    simd_shuffle!(low, high, [0, 1])
2108}
2109
2110/// Table look-up
2111#[inline]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(tbl))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub unsafe fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
2116    vqtbl1_s8(vcombine_s8(a, zeroed()), transmute(b))
2117}
2118
2119/// Table look-up
2120#[inline]
2121#[target_feature(enable = "neon")]
2122#[cfg_attr(test, assert_instr(tbl))]
2123#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2124pub unsafe fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
2125    vqtbl1_u8(vcombine_u8(a, zeroed()), b)
2126}
2127
2128/// Table look-up
2129#[inline]
2130#[target_feature(enable = "neon")]
2131#[cfg_attr(test, assert_instr(tbl))]
2132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2133pub unsafe fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
2134    vqtbl1_p8(vcombine_p8(a, zeroed()), b)
2135}
2136
2137/// Table look-up
2138#[inline]
2139#[target_feature(enable = "neon")]
2140#[cfg_attr(test, assert_instr(tbl))]
2141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2142pub unsafe fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
2143    vqtbl1_s8(vcombine_s8(a.0, a.1), transmute(b))
2144}
2145
2146/// Table look-up
2147#[inline]
2148#[target_feature(enable = "neon")]
2149#[cfg_attr(test, assert_instr(tbl))]
2150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2151pub unsafe fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
2152    vqtbl1_u8(vcombine_u8(a.0, a.1), b)
2153}
2154
2155/// Table look-up
2156#[inline]
2157#[target_feature(enable = "neon")]
2158#[cfg_attr(test, assert_instr(tbl))]
2159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2160pub unsafe fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
2161    vqtbl1_p8(vcombine_p8(a.0, a.1), b)
2162}
2163
2164/// Table look-up
2165#[inline]
2166#[target_feature(enable = "neon")]
2167#[cfg_attr(test, assert_instr(tbl))]
2168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2169pub unsafe fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
2170    vqtbl2_s8(
2171        int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, zeroed())),
2172        transmute(b),
2173    )
2174}
2175
2176/// Table look-up
2177#[inline]
2178#[target_feature(enable = "neon")]
2179#[cfg_attr(test, assert_instr(tbl))]
2180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2181pub unsafe fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
2182    vqtbl2_u8(
2183        uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, zeroed())),
2184        b,
2185    )
2186}
2187
2188/// Table look-up
2189#[inline]
2190#[target_feature(enable = "neon")]
2191#[cfg_attr(test, assert_instr(tbl))]
2192#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2193pub unsafe fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
2194    vqtbl2_p8(
2195        poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, zeroed())),
2196        b,
2197    )
2198}
2199
2200/// Table look-up
2201#[inline]
2202#[target_feature(enable = "neon")]
2203#[cfg_attr(test, assert_instr(tbl))]
2204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2205pub unsafe fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
2206    vqtbl2_s8(
2207        int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3)),
2208        transmute(b),
2209    )
2210}
2211
2212/// Table look-up
2213#[inline]
2214#[target_feature(enable = "neon")]
2215#[cfg_attr(test, assert_instr(tbl))]
2216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2217pub unsafe fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
2218    vqtbl2_u8(
2219        uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3)),
2220        b,
2221    )
2222}
2223
2224/// Table look-up
2225#[inline]
2226#[target_feature(enable = "neon")]
2227#[cfg_attr(test, assert_instr(tbl))]
2228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2229pub unsafe fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
2230    vqtbl2_p8(
2231        poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3)),
2232        b,
2233    )
2234}
2235
2236/// Extended table look-up
2237#[inline]
2238#[target_feature(enable = "neon")]
2239#[cfg_attr(test, assert_instr(tbx))]
2240#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2241pub unsafe fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
2242    let r = vqtbx1_s8(a, vcombine_s8(b, zeroed()), transmute(c));
2243    let m: int8x8_t = simd_lt(c, transmute(i8x8::splat(8)));
2244    simd_select(m, r, a)
2245}
2246
2247/// Extended table look-up
2248#[inline]
2249#[target_feature(enable = "neon")]
2250#[cfg_attr(test, assert_instr(tbx))]
2251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2252pub unsafe fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
2253    let r = vqtbx1_u8(a, vcombine_u8(b, zeroed()), c);
2254    let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(8)));
2255    simd_select(m, r, a)
2256}
2257
2258/// Extended table look-up
2259#[inline]
2260#[target_feature(enable = "neon")]
2261#[cfg_attr(test, assert_instr(tbx))]
2262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2263pub unsafe fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
2264    let r = vqtbx1_p8(a, vcombine_p8(b, zeroed()), c);
2265    let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(8)));
2266    simd_select(m, r, a)
2267}
2268
2269/// Extended table look-up
2270#[inline]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(tbx))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub unsafe fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
2275    vqtbx1_s8(a, vcombine_s8(b.0, b.1), transmute(c))
2276}
2277
2278/// Extended table look-up
2279#[inline]
2280#[target_feature(enable = "neon")]
2281#[cfg_attr(test, assert_instr(tbx))]
2282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2283pub unsafe fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
2284    vqtbx1_u8(a, vcombine_u8(b.0, b.1), c)
2285}
2286
2287/// Extended table look-up
2288#[inline]
2289#[target_feature(enable = "neon")]
2290#[cfg_attr(test, assert_instr(tbx))]
2291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2292pub unsafe fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
2293    vqtbx1_p8(a, vcombine_p8(b.0, b.1), c)
2294}
2295
2296/// Extended table look-up
2297#[inline]
2298#[target_feature(enable = "neon")]
2299#[cfg_attr(test, assert_instr(tbx))]
2300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2301pub unsafe fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
2302    let r = vqtbx2_s8(
2303        a,
2304        int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, zeroed())),
2305        transmute(c),
2306    );
2307    let m: int8x8_t = simd_lt(c, transmute(i8x8::splat(24)));
2308    simd_select(m, r, a)
2309}
2310
2311/// Extended table look-up
2312#[inline]
2313#[target_feature(enable = "neon")]
2314#[cfg_attr(test, assert_instr(tbx))]
2315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2316pub unsafe fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
2317    let r = vqtbx2_u8(
2318        a,
2319        uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, zeroed())),
2320        c,
2321    );
2322    let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(24)));
2323    simd_select(m, r, a)
2324}
2325
2326/// Extended table look-up
2327#[inline]
2328#[target_feature(enable = "neon")]
2329#[cfg_attr(test, assert_instr(tbx))]
2330#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2331pub unsafe fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
2332    let r = vqtbx2_p8(
2333        a,
2334        poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, zeroed())),
2335        c,
2336    );
2337    let m: int8x8_t = simd_lt(c, transmute(u8x8::splat(24)));
2338    simd_select(m, r, a)
2339}
2340
2341/// Extended table look-up
2342#[inline]
2343#[target_feature(enable = "neon")]
2344#[cfg_attr(test, assert_instr(tbx))]
2345#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2346pub unsafe fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
2347    vqtbx2_s8(
2348        a,
2349        int8x16x2_t(vcombine_s8(b.0, b.1), vcombine_s8(b.2, b.3)),
2350        transmute(c),
2351    )
2352}
2353
2354/// Extended table look-up
2355#[inline]
2356#[target_feature(enable = "neon")]
2357#[cfg_attr(test, assert_instr(tbx))]
2358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2359pub unsafe fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
2360    vqtbx2_u8(
2361        a,
2362        uint8x16x2_t(vcombine_u8(b.0, b.1), vcombine_u8(b.2, b.3)),
2363        c,
2364    )
2365}
2366
2367/// Extended table look-up
2368#[inline]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(tbx))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub unsafe fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
2373    vqtbx2_p8(
2374        a,
2375        poly8x16x2_t(vcombine_p8(b.0, b.1), vcombine_p8(b.2, b.3)),
2376        c,
2377    )
2378}
2379
2380/// Table look-up
2381#[inline]
2382#[target_feature(enable = "neon")]
2383#[cfg_attr(test, assert_instr(tbl))]
2384#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2385pub unsafe fn vqtbl1_s8(t: int8x16_t, idx: uint8x8_t) -> int8x8_t {
2386    vqtbl1(t, idx)
2387}
2388
2389/// Table look-up
2390#[inline]
2391#[target_feature(enable = "neon")]
2392#[cfg_attr(test, assert_instr(tbl))]
2393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2394pub unsafe fn vqtbl1q_s8(t: int8x16_t, idx: uint8x16_t) -> int8x16_t {
2395    vqtbl1q(t, idx)
2396}
2397
2398/// Table look-up
2399#[inline]
2400#[target_feature(enable = "neon")]
2401#[cfg_attr(test, assert_instr(tbl))]
2402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2403pub unsafe fn vqtbl1_u8(t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t {
2404    transmute(vqtbl1(transmute(t), idx))
2405}
2406
2407/// Table look-up
2408#[inline]
2409#[target_feature(enable = "neon")]
2410#[cfg_attr(test, assert_instr(tbl))]
2411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2412pub unsafe fn vqtbl1q_u8(t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t {
2413    transmute(vqtbl1q(transmute(t), idx))
2414}
2415
2416/// Table look-up
2417#[inline]
2418#[target_feature(enable = "neon")]
2419#[cfg_attr(test, assert_instr(tbl))]
2420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2421pub unsafe fn vqtbl1_p8(t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t {
2422    transmute(vqtbl1(transmute(t), idx))
2423}
2424
2425/// Table look-up
2426#[inline]
2427#[target_feature(enable = "neon")]
2428#[cfg_attr(test, assert_instr(tbl))]
2429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2430pub unsafe fn vqtbl1q_p8(t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t {
2431    transmute(vqtbl1q(transmute(t), idx))
2432}
2433
2434/// Extended table look-up
2435#[inline]
2436#[target_feature(enable = "neon")]
2437#[cfg_attr(test, assert_instr(tbx))]
2438#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2439pub unsafe fn vqtbx1_s8(a: int8x8_t, t: int8x16_t, idx: uint8x8_t) -> int8x8_t {
2440    vqtbx1(a, t, idx)
2441}
2442
2443/// Extended table look-up
2444#[inline]
2445#[target_feature(enable = "neon")]
2446#[cfg_attr(test, assert_instr(tbx))]
2447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2448pub unsafe fn vqtbx1q_s8(a: int8x16_t, t: int8x16_t, idx: uint8x16_t) -> int8x16_t {
2449    vqtbx1q(a, t, idx)
2450}
2451
2452/// Extended table look-up
2453#[inline]
2454#[target_feature(enable = "neon")]
2455#[cfg_attr(test, assert_instr(tbx))]
2456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2457pub unsafe fn vqtbx1_u8(a: uint8x8_t, t: uint8x16_t, idx: uint8x8_t) -> uint8x8_t {
2458    transmute(vqtbx1(transmute(a), transmute(t), idx))
2459}
2460
2461/// Extended table look-up
2462#[inline]
2463#[target_feature(enable = "neon")]
2464#[cfg_attr(test, assert_instr(tbx))]
2465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2466pub unsafe fn vqtbx1q_u8(a: uint8x16_t, t: uint8x16_t, idx: uint8x16_t) -> uint8x16_t {
2467    transmute(vqtbx1q(transmute(a), transmute(t), idx))
2468}
2469
2470/// Extended table look-up
2471#[inline]
2472#[target_feature(enable = "neon")]
2473#[cfg_attr(test, assert_instr(tbx))]
2474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2475pub unsafe fn vqtbx1_p8(a: poly8x8_t, t: poly8x16_t, idx: uint8x8_t) -> poly8x8_t {
2476    transmute(vqtbx1(transmute(a), transmute(t), idx))
2477}
2478
2479/// Extended table look-up
2480#[inline]
2481#[target_feature(enable = "neon")]
2482#[cfg_attr(test, assert_instr(tbx))]
2483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2484pub unsafe fn vqtbx1q_p8(a: poly8x16_t, t: poly8x16_t, idx: uint8x16_t) -> poly8x16_t {
2485    transmute(vqtbx1q(transmute(a), transmute(t), idx))
2486}
2487
2488/// Table look-up
2489#[inline]
2490#[target_feature(enable = "neon")]
2491#[cfg_attr(test, assert_instr(tbl))]
2492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2493pub unsafe fn vqtbl2_s8(t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t {
2494    vqtbl2(t.0, t.1, idx)
2495}
2496
2497/// Table look-up
2498#[inline]
2499#[target_feature(enable = "neon")]
2500#[cfg_attr(test, assert_instr(tbl))]
2501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2502pub unsafe fn vqtbl2q_s8(t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t {
2503    vqtbl2q(t.0, t.1, idx)
2504}
2505
2506/// Table look-up
2507#[inline]
2508#[target_feature(enable = "neon")]
2509#[cfg_attr(test, assert_instr(tbl))]
2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2511pub unsafe fn vqtbl2_u8(t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t {
2512    transmute(vqtbl2(transmute(t.0), transmute(t.1), idx))
2513}
2514
2515/// Table look-up
2516#[inline]
2517#[target_feature(enable = "neon")]
2518#[cfg_attr(test, assert_instr(tbl))]
2519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2520pub unsafe fn vqtbl2q_u8(t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t {
2521    transmute(vqtbl2q(transmute(t.0), transmute(t.1), idx))
2522}
2523
2524/// Table look-up
2525#[inline]
2526#[target_feature(enable = "neon")]
2527#[cfg_attr(test, assert_instr(tbl))]
2528#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2529pub unsafe fn vqtbl2_p8(t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t {
2530    transmute(vqtbl2(transmute(t.0), transmute(t.1), idx))
2531}
2532
2533/// Table look-up
2534#[inline]
2535#[target_feature(enable = "neon")]
2536#[cfg_attr(test, assert_instr(tbl))]
2537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2538pub unsafe fn vqtbl2q_p8(t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t {
2539    transmute(vqtbl2q(transmute(t.0), transmute(t.1), idx))
2540}
2541
2542/// Extended table look-up
2543#[inline]
2544#[target_feature(enable = "neon")]
2545#[cfg_attr(test, assert_instr(tbx))]
2546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2547pub unsafe fn vqtbx2_s8(a: int8x8_t, t: int8x16x2_t, idx: uint8x8_t) -> int8x8_t {
2548    vqtbx2(a, t.0, t.1, idx)
2549}
2550
2551/// Extended table look-up
2552#[inline]
2553#[target_feature(enable = "neon")]
2554#[cfg_attr(test, assert_instr(tbx))]
2555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2556pub unsafe fn vqtbx2q_s8(a: int8x16_t, t: int8x16x2_t, idx: uint8x16_t) -> int8x16_t {
2557    vqtbx2q(a, t.0, t.1, idx)
2558}
2559
2560/// Extended table look-up
2561#[inline]
2562#[target_feature(enable = "neon")]
2563#[cfg_attr(test, assert_instr(tbx))]
2564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2565pub unsafe fn vqtbx2_u8(a: uint8x8_t, t: uint8x16x2_t, idx: uint8x8_t) -> uint8x8_t {
2566    transmute(vqtbx2(transmute(a), transmute(t.0), transmute(t.1), idx))
2567}
2568
2569/// Extended table look-up
2570#[inline]
2571#[target_feature(enable = "neon")]
2572#[cfg_attr(test, assert_instr(tbx))]
2573#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2574pub unsafe fn vqtbx2q_u8(a: uint8x16_t, t: uint8x16x2_t, idx: uint8x16_t) -> uint8x16_t {
2575    transmute(vqtbx2q(transmute(a), transmute(t.0), transmute(t.1), idx))
2576}
2577
2578/// Extended table look-up
2579#[inline]
2580#[target_feature(enable = "neon")]
2581#[cfg_attr(test, assert_instr(tbx))]
2582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2583pub unsafe fn vqtbx2_p8(a: poly8x8_t, t: poly8x16x2_t, idx: uint8x8_t) -> poly8x8_t {
2584    transmute(vqtbx2(transmute(a), transmute(t.0), transmute(t.1), idx))
2585}
2586
2587/// Extended table look-up
2588#[inline]
2589#[target_feature(enable = "neon")]
2590#[cfg_attr(test, assert_instr(tbx))]
2591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2592pub unsafe fn vqtbx2q_p8(a: poly8x16_t, t: poly8x16x2_t, idx: uint8x16_t) -> poly8x16_t {
2593    transmute(vqtbx2q(transmute(a), transmute(t.0), transmute(t.1), idx))
2594}
2595
2596/// Table look-up
2597#[inline]
2598#[target_feature(enable = "neon")]
2599#[cfg_attr(test, assert_instr(tbl))]
2600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2601pub unsafe fn vqtbl3_s8(t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t {
2602    vqtbl3(t.0, t.1, t.2, idx)
2603}
2604
2605/// Table look-up
2606#[inline]
2607#[target_feature(enable = "neon")]
2608#[cfg_attr(test, assert_instr(tbl))]
2609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2610pub unsafe fn vqtbl3q_s8(t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t {
2611    vqtbl3q(t.0, t.1, t.2, idx)
2612}
2613
2614/// Table look-up
2615#[inline]
2616#[target_feature(enable = "neon")]
2617#[cfg_attr(test, assert_instr(tbl))]
2618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2619pub unsafe fn vqtbl3_u8(t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t {
2620    transmute(vqtbl3(transmute(t.0), transmute(t.1), transmute(t.2), idx))
2621}
2622
2623/// Table look-up
2624#[inline]
2625#[target_feature(enable = "neon")]
2626#[cfg_attr(test, assert_instr(tbl))]
2627#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2628pub unsafe fn vqtbl3q_u8(t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t {
2629    transmute(vqtbl3q(transmute(t.0), transmute(t.1), transmute(t.2), idx))
2630}
2631
2632/// Table look-up
2633#[inline]
2634#[target_feature(enable = "neon")]
2635#[cfg_attr(test, assert_instr(tbl))]
2636#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2637pub unsafe fn vqtbl3_p8(t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t {
2638    transmute(vqtbl3(transmute(t.0), transmute(t.1), transmute(t.2), idx))
2639}
2640
2641/// Table look-up
2642#[inline]
2643#[target_feature(enable = "neon")]
2644#[cfg_attr(test, assert_instr(tbl))]
2645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2646pub unsafe fn vqtbl3q_p8(t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t {
2647    transmute(vqtbl3q(transmute(t.0), transmute(t.1), transmute(t.2), idx))
2648}
2649
2650/// Extended table look-up
2651#[inline]
2652#[target_feature(enable = "neon")]
2653#[cfg_attr(test, assert_instr(tbx))]
2654#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2655pub unsafe fn vqtbx3_s8(a: int8x8_t, t: int8x16x3_t, idx: uint8x8_t) -> int8x8_t {
2656    vqtbx3(a, t.0, t.1, t.2, idx)
2657}
2658
2659/// Extended table look-up
2660#[inline]
2661#[target_feature(enable = "neon")]
2662#[cfg_attr(test, assert_instr(tbx))]
2663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2664pub unsafe fn vqtbx3q_s8(a: int8x16_t, t: int8x16x3_t, idx: uint8x16_t) -> int8x16_t {
2665    vqtbx3q(a, t.0, t.1, t.2, idx)
2666}
2667
2668/// Extended table look-up
2669#[inline]
2670#[target_feature(enable = "neon")]
2671#[cfg_attr(test, assert_instr(tbx))]
2672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2673pub unsafe fn vqtbx3_u8(a: uint8x8_t, t: uint8x16x3_t, idx: uint8x8_t) -> uint8x8_t {
2674    transmute(vqtbx3(
2675        transmute(a),
2676        transmute(t.0),
2677        transmute(t.1),
2678        transmute(t.2),
2679        idx,
2680    ))
2681}
2682
2683/// Extended table look-up
2684#[inline]
2685#[target_feature(enable = "neon")]
2686#[cfg_attr(test, assert_instr(tbx))]
2687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2688pub unsafe fn vqtbx3q_u8(a: uint8x16_t, t: uint8x16x3_t, idx: uint8x16_t) -> uint8x16_t {
2689    transmute(vqtbx3q(
2690        transmute(a),
2691        transmute(t.0),
2692        transmute(t.1),
2693        transmute(t.2),
2694        idx,
2695    ))
2696}
2697
2698/// Extended table look-up
2699#[inline]
2700#[target_feature(enable = "neon")]
2701#[cfg_attr(test, assert_instr(tbx))]
2702#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2703pub unsafe fn vqtbx3_p8(a: poly8x8_t, t: poly8x16x3_t, idx: uint8x8_t) -> poly8x8_t {
2704    transmute(vqtbx3(
2705        transmute(a),
2706        transmute(t.0),
2707        transmute(t.1),
2708        transmute(t.2),
2709        idx,
2710    ))
2711}
2712
2713/// Extended table look-up
2714#[inline]
2715#[target_feature(enable = "neon")]
2716#[cfg_attr(test, assert_instr(tbx))]
2717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2718pub unsafe fn vqtbx3q_p8(a: poly8x16_t, t: poly8x16x3_t, idx: uint8x16_t) -> poly8x16_t {
2719    transmute(vqtbx3q(
2720        transmute(a),
2721        transmute(t.0),
2722        transmute(t.1),
2723        transmute(t.2),
2724        idx,
2725    ))
2726}
2727
2728/// Table look-up
2729#[inline]
2730#[target_feature(enable = "neon")]
2731#[cfg_attr(test, assert_instr(tbl))]
2732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2733pub unsafe fn vqtbl4_s8(t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t {
2734    vqtbl4(t.0, t.1, t.2, t.3, idx)
2735}
2736
2737/// Table look-up
2738#[inline]
2739#[target_feature(enable = "neon")]
2740#[cfg_attr(test, assert_instr(tbl))]
2741#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2742pub unsafe fn vqtbl4q_s8(t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t {
2743    vqtbl4q(t.0, t.1, t.2, t.3, idx)
2744}
2745
2746/// Table look-up
2747#[inline]
2748#[target_feature(enable = "neon")]
2749#[cfg_attr(test, assert_instr(tbl))]
2750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2751pub unsafe fn vqtbl4_u8(t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t {
2752    transmute(vqtbl4(
2753        transmute(t.0),
2754        transmute(t.1),
2755        transmute(t.2),
2756        transmute(t.3),
2757        idx,
2758    ))
2759}
2760
2761/// Table look-up
2762#[inline]
2763#[target_feature(enable = "neon")]
2764#[cfg_attr(test, assert_instr(tbl))]
2765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2766pub unsafe fn vqtbl4q_u8(t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t {
2767    transmute(vqtbl4q(
2768        transmute(t.0),
2769        transmute(t.1),
2770        transmute(t.2),
2771        transmute(t.3),
2772        idx,
2773    ))
2774}
2775
2776/// Table look-up
2777#[inline]
2778#[target_feature(enable = "neon")]
2779#[cfg_attr(test, assert_instr(tbl))]
2780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2781pub unsafe fn vqtbl4_p8(t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t {
2782    transmute(vqtbl4(
2783        transmute(t.0),
2784        transmute(t.1),
2785        transmute(t.2),
2786        transmute(t.3),
2787        idx,
2788    ))
2789}
2790
2791/// Table look-up
2792#[inline]
2793#[target_feature(enable = "neon")]
2794#[cfg_attr(test, assert_instr(tbl))]
2795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2796pub unsafe fn vqtbl4q_p8(t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t {
2797    transmute(vqtbl4q(
2798        transmute(t.0),
2799        transmute(t.1),
2800        transmute(t.2),
2801        transmute(t.3),
2802        idx,
2803    ))
2804}
2805
2806/// Extended table look-up
2807#[inline]
2808#[target_feature(enable = "neon")]
2809#[cfg_attr(test, assert_instr(tbx))]
2810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2811pub unsafe fn vqtbx4_s8(a: int8x8_t, t: int8x16x4_t, idx: uint8x8_t) -> int8x8_t {
2812    vqtbx4(a, t.0, t.1, t.2, t.3, idx)
2813}
2814
2815/// Extended table look-up
2816#[inline]
2817#[target_feature(enable = "neon")]
2818#[cfg_attr(test, assert_instr(tbx))]
2819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2820pub unsafe fn vqtbx4q_s8(a: int8x16_t, t: int8x16x4_t, idx: uint8x16_t) -> int8x16_t {
2821    vqtbx4q(a, t.0, t.1, t.2, t.3, idx)
2822}
2823
2824/// Extended table look-up
2825#[inline]
2826#[target_feature(enable = "neon")]
2827#[cfg_attr(test, assert_instr(tbx))]
2828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2829pub unsafe fn vqtbx4_u8(a: uint8x8_t, t: uint8x16x4_t, idx: uint8x8_t) -> uint8x8_t {
2830    transmute(vqtbx4(
2831        transmute(a),
2832        transmute(t.0),
2833        transmute(t.1),
2834        transmute(t.2),
2835        transmute(t.3),
2836        idx,
2837    ))
2838}
2839
2840/// Extended table look-up
2841#[inline]
2842#[target_feature(enable = "neon")]
2843#[cfg_attr(test, assert_instr(tbx))]
2844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2845pub unsafe fn vqtbx4q_u8(a: uint8x16_t, t: uint8x16x4_t, idx: uint8x16_t) -> uint8x16_t {
2846    transmute(vqtbx4q(
2847        transmute(a),
2848        transmute(t.0),
2849        transmute(t.1),
2850        transmute(t.2),
2851        transmute(t.3),
2852        idx,
2853    ))
2854}
2855
2856/// Extended table look-up
2857#[inline]
2858#[target_feature(enable = "neon")]
2859#[cfg_attr(test, assert_instr(tbx))]
2860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2861pub unsafe fn vqtbx4_p8(a: poly8x8_t, t: poly8x16x4_t, idx: uint8x8_t) -> poly8x8_t {
2862    transmute(vqtbx4(
2863        transmute(a),
2864        transmute(t.0),
2865        transmute(t.1),
2866        transmute(t.2),
2867        transmute(t.3),
2868        idx,
2869    ))
2870}
2871
2872/// Extended table look-up
2873#[inline]
2874#[target_feature(enable = "neon")]
2875#[cfg_attr(test, assert_instr(tbx))]
2876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2877pub unsafe fn vqtbx4q_p8(a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t) -> poly8x16_t {
2878    transmute(vqtbx4q(
2879        transmute(a),
2880        transmute(t.0),
2881        transmute(t.1),
2882        transmute(t.2),
2883        transmute(t.3),
2884        idx,
2885    ))
2886}
2887
2888/// Shift left
2889#[inline]
2890#[target_feature(enable = "neon")]
2891#[cfg_attr(test, assert_instr(nop, N = 2))]
2892#[rustc_legacy_const_generics(1)]
2893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2894pub unsafe fn vshld_n_s64<const N: i32>(a: i64) -> i64 {
2895    static_assert_uimm_bits!(N, 6);
2896    a << N
2897}
2898
2899/// Shift left
2900#[inline]
2901#[target_feature(enable = "neon")]
2902#[cfg_attr(test, assert_instr(nop, N = 2))]
2903#[rustc_legacy_const_generics(1)]
2904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2905pub unsafe fn vshld_n_u64<const N: i32>(a: u64) -> u64 {
2906    static_assert_uimm_bits!(N, 6);
2907    a << N
2908}
2909
2910/// Signed shift right
2911#[inline]
2912#[target_feature(enable = "neon")]
2913#[cfg_attr(test, assert_instr(nop, N = 2))]
2914#[rustc_legacy_const_generics(1)]
2915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2916pub unsafe fn vshrd_n_s64<const N: i32>(a: i64) -> i64 {
2917    static_assert!(N >= 1 && N <= 64);
2918    let n: i32 = if N == 64 { 63 } else { N };
2919    a >> n
2920}
2921
2922/// Unsigned shift right
2923#[inline]
2924#[target_feature(enable = "neon")]
2925#[cfg_attr(test, assert_instr(nop, N = 2))]
2926#[rustc_legacy_const_generics(1)]
2927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2928pub unsafe fn vshrd_n_u64<const N: i32>(a: u64) -> u64 {
2929    static_assert!(N >= 1 && N <= 64);
2930    let n: i32 = if N == 64 {
2931        return 0;
2932    } else {
2933        N
2934    };
2935    a >> n
2936}
2937
2938/// Signed shift right and accumulate
2939#[inline]
2940#[target_feature(enable = "neon")]
2941#[cfg_attr(test, assert_instr(nop, N = 2))]
2942#[rustc_legacy_const_generics(2)]
2943#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2944pub unsafe fn vsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
2945    static_assert!(N >= 1 && N <= 64);
2946    a.wrapping_add(vshrd_n_s64::<N>(b))
2947}
2948
2949/// Unsigned shift right and accumulate
2950#[inline]
2951#[target_feature(enable = "neon")]
2952#[cfg_attr(test, assert_instr(nop, N = 2))]
2953#[rustc_legacy_const_generics(2)]
2954#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2955pub unsafe fn vsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
2956    static_assert!(N >= 1 && N <= 64);
2957    a.wrapping_add(vshrd_n_u64::<N>(b))
2958}
2959
2960/// Shift Left and Insert (immediate)
2961#[inline]
2962#[target_feature(enable = "neon")]
2963#[cfg_attr(test, assert_instr(sli, N = 1))]
2964#[rustc_legacy_const_generics(2)]
2965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2966pub unsafe fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
2967    static_assert_uimm_bits!(N, 3);
2968    vsli_n_s8_(a, b, N)
2969}
2970/// Shift Left and Insert (immediate)
2971#[inline]
2972#[target_feature(enable = "neon")]
2973#[cfg_attr(test, assert_instr(sli, N = 1))]
2974#[rustc_legacy_const_generics(2)]
2975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2976pub unsafe fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
2977    static_assert_uimm_bits!(N, 3);
2978    vsliq_n_s8_(a, b, N)
2979}
2980/// Shift Left and Insert (immediate)
2981#[inline]
2982#[target_feature(enable = "neon")]
2983#[cfg_attr(test, assert_instr(sli, N = 1))]
2984#[rustc_legacy_const_generics(2)]
2985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2986pub unsafe fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
2987    static_assert_uimm_bits!(N, 4);
2988    vsli_n_s16_(a, b, N)
2989}
2990/// Shift Left and Insert (immediate)
2991#[inline]
2992#[target_feature(enable = "neon")]
2993#[cfg_attr(test, assert_instr(sli, N = 1))]
2994#[rustc_legacy_const_generics(2)]
2995#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2996pub unsafe fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
2997    static_assert_uimm_bits!(N, 4);
2998    vsliq_n_s16_(a, b, N)
2999}
3000/// Shift Left and Insert (immediate)
3001#[inline]
3002#[target_feature(enable = "neon")]
3003#[cfg_attr(test, assert_instr(sli, N = 1))]
3004#[rustc_legacy_const_generics(2)]
3005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3006pub unsafe fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
3007    static_assert!(N >= 0 && N <= 31);
3008    vsli_n_s32_(a, b, N)
3009}
3010/// Shift Left and Insert (immediate)
3011#[inline]
3012#[target_feature(enable = "neon")]
3013#[cfg_attr(test, assert_instr(sli, N = 1))]
3014#[rustc_legacy_const_generics(2)]
3015#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3016pub unsafe fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
3017    static_assert!(N >= 0 && N <= 31);
3018    vsliq_n_s32_(a, b, N)
3019}
3020/// Shift Left and Insert (immediate)
3021#[inline]
3022#[target_feature(enable = "neon")]
3023#[cfg_attr(test, assert_instr(sli, N = 1))]
3024#[rustc_legacy_const_generics(2)]
3025#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3026pub unsafe fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
3027    static_assert!(N >= 0 && N <= 63);
3028    vsli_n_s64_(a, b, N)
3029}
3030/// Shift Left and Insert (immediate)
3031#[inline]
3032#[target_feature(enable = "neon")]
3033#[cfg_attr(test, assert_instr(sli, N = 1))]
3034#[rustc_legacy_const_generics(2)]
3035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3036pub unsafe fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
3037    static_assert!(N >= 0 && N <= 63);
3038    vsliq_n_s64_(a, b, N)
3039}
3040/// Shift Left and Insert (immediate)
3041#[inline]
3042#[target_feature(enable = "neon")]
3043#[cfg_attr(test, assert_instr(sli, N = 1))]
3044#[rustc_legacy_const_generics(2)]
3045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3046pub unsafe fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
3047    static_assert_uimm_bits!(N, 3);
3048    transmute(vsli_n_s8_(transmute(a), transmute(b), N))
3049}
3050/// Shift Left and Insert (immediate)
3051#[inline]
3052#[target_feature(enable = "neon")]
3053#[cfg_attr(test, assert_instr(sli, N = 1))]
3054#[rustc_legacy_const_generics(2)]
3055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3056pub unsafe fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
3057    static_assert_uimm_bits!(N, 3);
3058    transmute(vsliq_n_s8_(transmute(a), transmute(b), N))
3059}
3060/// Shift Left and Insert (immediate)
3061#[inline]
3062#[target_feature(enable = "neon")]
3063#[cfg_attr(test, assert_instr(sli, N = 1))]
3064#[rustc_legacy_const_generics(2)]
3065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3066pub unsafe fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
3067    static_assert_uimm_bits!(N, 4);
3068    transmute(vsli_n_s16_(transmute(a), transmute(b), N))
3069}
3070/// Shift Left and Insert (immediate)
3071#[inline]
3072#[target_feature(enable = "neon")]
3073#[cfg_attr(test, assert_instr(sli, N = 1))]
3074#[rustc_legacy_const_generics(2)]
3075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3076pub unsafe fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
3077    static_assert_uimm_bits!(N, 4);
3078    transmute(vsliq_n_s16_(transmute(a), transmute(b), N))
3079}
3080/// Shift Left and Insert (immediate)
3081#[inline]
3082#[target_feature(enable = "neon")]
3083#[cfg_attr(test, assert_instr(sli, N = 1))]
3084#[rustc_legacy_const_generics(2)]
3085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3086pub unsafe fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
3087    static_assert!(N >= 0 && N <= 31);
3088    transmute(vsli_n_s32_(transmute(a), transmute(b), N))
3089}
3090/// Shift Left and Insert (immediate)
3091#[inline]
3092#[target_feature(enable = "neon")]
3093#[cfg_attr(test, assert_instr(sli, N = 1))]
3094#[rustc_legacy_const_generics(2)]
3095#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3096pub unsafe fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
3097    static_assert!(N >= 0 && N <= 31);
3098    transmute(vsliq_n_s32_(transmute(a), transmute(b), N))
3099}
3100/// Shift Left and Insert (immediate)
3101#[inline]
3102#[target_feature(enable = "neon")]
3103#[cfg_attr(test, assert_instr(sli, N = 1))]
3104#[rustc_legacy_const_generics(2)]
3105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3106pub unsafe fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
3107    static_assert!(N >= 0 && N <= 63);
3108    transmute(vsli_n_s64_(transmute(a), transmute(b), N))
3109}
3110/// Shift Left and Insert (immediate)
3111#[inline]
3112#[target_feature(enable = "neon")]
3113#[cfg_attr(test, assert_instr(sli, N = 1))]
3114#[rustc_legacy_const_generics(2)]
3115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3116pub unsafe fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
3117    static_assert!(N >= 0 && N <= 63);
3118    transmute(vsliq_n_s64_(transmute(a), transmute(b), N))
3119}
3120/// Shift Left and Insert (immediate)
3121#[inline]
3122#[target_feature(enable = "neon")]
3123#[cfg_attr(test, assert_instr(sli, N = 1))]
3124#[rustc_legacy_const_generics(2)]
3125#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3126pub unsafe fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
3127    static_assert_uimm_bits!(N, 3);
3128    transmute(vsli_n_s8_(transmute(a), transmute(b), N))
3129}
3130/// Shift Left and Insert (immediate)
3131#[inline]
3132#[target_feature(enable = "neon")]
3133#[cfg_attr(test, assert_instr(sli, N = 1))]
3134#[rustc_legacy_const_generics(2)]
3135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3136pub unsafe fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
3137    static_assert_uimm_bits!(N, 3);
3138    transmute(vsliq_n_s8_(transmute(a), transmute(b), N))
3139}
3140/// Shift Left and Insert (immediate)
3141#[inline]
3142#[target_feature(enable = "neon")]
3143#[cfg_attr(test, assert_instr(sli, N = 1))]
3144#[rustc_legacy_const_generics(2)]
3145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3146pub unsafe fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
3147    static_assert_uimm_bits!(N, 4);
3148    transmute(vsli_n_s16_(transmute(a), transmute(b), N))
3149}
3150/// Shift Left and Insert (immediate)
3151#[inline]
3152#[target_feature(enable = "neon")]
3153#[cfg_attr(test, assert_instr(sli, N = 1))]
3154#[rustc_legacy_const_generics(2)]
3155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3156pub unsafe fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
3157    static_assert_uimm_bits!(N, 4);
3158    transmute(vsliq_n_s16_(transmute(a), transmute(b), N))
3159}
3160
3161/// Shift Left and Insert (immediate)
3162///
3163/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)
3164#[inline]
3165#[target_feature(enable = "neon,aes")]
3166#[cfg_attr(test, assert_instr(sli, N = 1))]
3167#[rustc_legacy_const_generics(2)]
3168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3169pub unsafe fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
3170    static_assert!(N >= 0 && N <= 63);
3171    transmute(vsli_n_s64_(transmute(a), transmute(b), N))
3172}
3173
3174/// Shift Left and Insert (immediate)
3175///
3176/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)
3177#[inline]
3178#[target_feature(enable = "neon,aes")]
3179#[cfg_attr(test, assert_instr(sli, N = 1))]
3180#[rustc_legacy_const_generics(2)]
3181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3182pub unsafe fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
3183    static_assert!(N >= 0 && N <= 63);
3184    transmute(vsliq_n_s64_(transmute(a), transmute(b), N))
3185}
3186/// Shift Right and Insert (immediate)
3187#[inline]
3188#[target_feature(enable = "neon")]
3189#[cfg_attr(test, assert_instr(sri, N = 1))]
3190#[rustc_legacy_const_generics(2)]
3191#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3192pub unsafe fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
3193    static_assert!(N >= 1 && N <= 8);
3194    vsri_n_s8_(a, b, N)
3195}
3196/// Shift Right and Insert (immediate)
3197#[inline]
3198#[target_feature(enable = "neon")]
3199#[cfg_attr(test, assert_instr(sri, N = 1))]
3200#[rustc_legacy_const_generics(2)]
3201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3202pub unsafe fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
3203    static_assert!(N >= 1 && N <= 8);
3204    vsriq_n_s8_(a, b, N)
3205}
3206/// Shift Right and Insert (immediate)
3207#[inline]
3208#[target_feature(enable = "neon")]
3209#[cfg_attr(test, assert_instr(sri, N = 1))]
3210#[rustc_legacy_const_generics(2)]
3211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3212pub unsafe fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
3213    static_assert!(N >= 1 && N <= 16);
3214    vsri_n_s16_(a, b, N)
3215}
3216/// Shift Right and Insert (immediate)
3217#[inline]
3218#[target_feature(enable = "neon")]
3219#[cfg_attr(test, assert_instr(sri, N = 1))]
3220#[rustc_legacy_const_generics(2)]
3221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3222pub unsafe fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
3223    static_assert!(N >= 1 && N <= 16);
3224    vsriq_n_s16_(a, b, N)
3225}
3226/// Shift Right and Insert (immediate)
3227#[inline]
3228#[target_feature(enable = "neon")]
3229#[cfg_attr(test, assert_instr(sri, N = 1))]
3230#[rustc_legacy_const_generics(2)]
3231#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3232pub unsafe fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
3233    static_assert!(N >= 1 && N <= 32);
3234    vsri_n_s32_(a, b, N)
3235}
3236/// Shift Right and Insert (immediate)
3237#[inline]
3238#[target_feature(enable = "neon")]
3239#[cfg_attr(test, assert_instr(sri, N = 1))]
3240#[rustc_legacy_const_generics(2)]
3241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3242pub unsafe fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
3243    static_assert!(N >= 1 && N <= 32);
3244    vsriq_n_s32_(a, b, N)
3245}
3246/// Shift Right and Insert (immediate)
3247#[inline]
3248#[target_feature(enable = "neon")]
3249#[cfg_attr(test, assert_instr(sri, N = 1))]
3250#[rustc_legacy_const_generics(2)]
3251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3252pub unsafe fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
3253    static_assert!(N >= 1 && N <= 64);
3254    vsri_n_s64_(a, b, N)
3255}
3256/// Shift Right and Insert (immediate)
3257#[inline]
3258#[target_feature(enable = "neon")]
3259#[cfg_attr(test, assert_instr(sri, N = 1))]
3260#[rustc_legacy_const_generics(2)]
3261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3262pub unsafe fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
3263    static_assert!(N >= 1 && N <= 64);
3264    vsriq_n_s64_(a, b, N)
3265}
3266/// Shift Right and Insert (immediate)
3267#[inline]
3268#[target_feature(enable = "neon")]
3269#[cfg_attr(test, assert_instr(sri, N = 1))]
3270#[rustc_legacy_const_generics(2)]
3271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3272pub unsafe fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
3273    static_assert!(N >= 1 && N <= 8);
3274    transmute(vsri_n_s8_(transmute(a), transmute(b), N))
3275}
3276/// Shift Right and Insert (immediate)
3277#[inline]
3278#[target_feature(enable = "neon")]
3279#[cfg_attr(test, assert_instr(sri, N = 1))]
3280#[rustc_legacy_const_generics(2)]
3281#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3282pub unsafe fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
3283    static_assert!(N >= 1 && N <= 8);
3284    transmute(vsriq_n_s8_(transmute(a), transmute(b), N))
3285}
3286/// Shift Right and Insert (immediate)
3287#[inline]
3288#[target_feature(enable = "neon")]
3289#[cfg_attr(test, assert_instr(sri, N = 1))]
3290#[rustc_legacy_const_generics(2)]
3291#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3292pub unsafe fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
3293    static_assert!(N >= 1 && N <= 16);
3294    transmute(vsri_n_s16_(transmute(a), transmute(b), N))
3295}
3296/// Shift Right and Insert (immediate)
3297#[inline]
3298#[target_feature(enable = "neon")]
3299#[cfg_attr(test, assert_instr(sri, N = 1))]
3300#[rustc_legacy_const_generics(2)]
3301#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3302pub unsafe fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
3303    static_assert!(N >= 1 && N <= 16);
3304    transmute(vsriq_n_s16_(transmute(a), transmute(b), N))
3305}
3306/// Shift Right and Insert (immediate)
3307#[inline]
3308#[target_feature(enable = "neon")]
3309#[cfg_attr(test, assert_instr(sri, N = 1))]
3310#[rustc_legacy_const_generics(2)]
3311#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3312pub unsafe fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
3313    static_assert!(N >= 1 && N <= 32);
3314    transmute(vsri_n_s32_(transmute(a), transmute(b), N))
3315}
3316/// Shift Right and Insert (immediate)
3317#[inline]
3318#[target_feature(enable = "neon")]
3319#[cfg_attr(test, assert_instr(sri, N = 1))]
3320#[rustc_legacy_const_generics(2)]
3321#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3322pub unsafe fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
3323    static_assert!(N >= 1 && N <= 32);
3324    transmute(vsriq_n_s32_(transmute(a), transmute(b), N))
3325}
3326/// Shift Right and Insert (immediate)
3327#[inline]
3328#[target_feature(enable = "neon")]
3329#[cfg_attr(test, assert_instr(sri, N = 1))]
3330#[rustc_legacy_const_generics(2)]
3331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3332pub unsafe fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
3333    static_assert!(N >= 1 && N <= 64);
3334    transmute(vsri_n_s64_(transmute(a), transmute(b), N))
3335}
3336/// Shift Right and Insert (immediate)
3337#[inline]
3338#[target_feature(enable = "neon")]
3339#[cfg_attr(test, assert_instr(sri, N = 1))]
3340#[rustc_legacy_const_generics(2)]
3341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3342pub unsafe fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
3343    static_assert!(N >= 1 && N <= 64);
3344    transmute(vsriq_n_s64_(transmute(a), transmute(b), N))
3345}
3346/// Shift Right and Insert (immediate)
3347#[inline]
3348#[target_feature(enable = "neon")]
3349#[cfg_attr(test, assert_instr(sri, N = 1))]
3350#[rustc_legacy_const_generics(2)]
3351#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3352pub unsafe fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
3353    static_assert!(N >= 1 && N <= 8);
3354    transmute(vsri_n_s8_(transmute(a), transmute(b), N))
3355}
3356/// Shift Right and Insert (immediate)
3357#[inline]
3358#[target_feature(enable = "neon")]
3359#[cfg_attr(test, assert_instr(sri, N = 1))]
3360#[rustc_legacy_const_generics(2)]
3361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3362pub unsafe fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
3363    static_assert!(N >= 1 && N <= 8);
3364    transmute(vsriq_n_s8_(transmute(a), transmute(b), N))
3365}
3366/// Shift Right and Insert (immediate)
3367#[inline]
3368#[target_feature(enable = "neon")]
3369#[cfg_attr(test, assert_instr(sri, N = 1))]
3370#[rustc_legacy_const_generics(2)]
3371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3372pub unsafe fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
3373    static_assert!(N >= 1 && N <= 16);
3374    transmute(vsri_n_s16_(transmute(a), transmute(b), N))
3375}
3376/// Shift Right and Insert (immediate)
3377#[inline]
3378#[target_feature(enable = "neon")]
3379#[cfg_attr(test, assert_instr(sri, N = 1))]
3380#[rustc_legacy_const_generics(2)]
3381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3382pub unsafe fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
3383    static_assert!(N >= 1 && N <= 16);
3384    transmute(vsriq_n_s16_(transmute(a), transmute(b), N))
3385}
3386
3387/// Shift Right and Insert (immediate)
3388///
3389/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)
3390#[inline]
3391#[target_feature(enable = "neon,aes")]
3392#[cfg_attr(test, assert_instr(sri, N = 1))]
3393#[rustc_legacy_const_generics(2)]
3394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3395pub unsafe fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
3396    static_assert!(N >= 1 && N <= 64);
3397    transmute(vsri_n_s64_(transmute(a), transmute(b), N))
3398}
3399
3400/// Shift Right and Insert (immediate)
3401///
3402/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)
3403#[inline]
3404#[target_feature(enable = "neon,aes")]
3405#[cfg_attr(test, assert_instr(sri, N = 1))]
3406#[rustc_legacy_const_generics(2)]
3407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
3408pub unsafe fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
3409    static_assert!(N >= 1 && N <= 64);
3410    transmute(vsriq_n_s64_(transmute(a), transmute(b), N))
3411}
3412
3413/// SM3TT1A
3414#[inline]
3415#[target_feature(enable = "neon,sm4")]
3416#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
3417#[rustc_legacy_const_generics(3)]
3418#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
3419pub unsafe fn vsm3tt1aq_u32<const IMM2: i32>(
3420    a: uint32x4_t,
3421    b: uint32x4_t,
3422    c: uint32x4_t,
3423) -> uint32x4_t {
3424    static_assert_uimm_bits!(IMM2, 2);
3425    #[allow(improper_ctypes)]
3426    extern "unadjusted" {
3427        #[cfg_attr(
3428            any(target_arch = "aarch64", target_arch = "arm64ec"),
3429            link_name = "llvm.aarch64.crypto.sm3tt1a"
3430        )]
3431        fn vsm3tt1aq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
3432    }
3433    vsm3tt1aq_u32_(a, b, c, IMM2 as i64)
3434}
3435
3436/// SM3TT1B
3437#[inline]
3438#[target_feature(enable = "neon,sm4")]
3439#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
3440#[rustc_legacy_const_generics(3)]
3441#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
3442pub unsafe fn vsm3tt1bq_u32<const IMM2: i32>(
3443    a: uint32x4_t,
3444    b: uint32x4_t,
3445    c: uint32x4_t,
3446) -> uint32x4_t {
3447    static_assert_uimm_bits!(IMM2, 2);
3448    #[allow(improper_ctypes)]
3449    extern "unadjusted" {
3450        #[cfg_attr(
3451            any(target_arch = "aarch64", target_arch = "arm64ec"),
3452            link_name = "llvm.aarch64.crypto.sm3tt1b"
3453        )]
3454        fn vsm3tt1bq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
3455    }
3456    vsm3tt1bq_u32_(a, b, c, IMM2 as i64)
3457}
3458
3459/// SM3TT2A
3460#[inline]
3461#[target_feature(enable = "neon,sm4")]
3462#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
3463#[rustc_legacy_const_generics(3)]
3464#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
3465pub unsafe fn vsm3tt2aq_u32<const IMM2: i32>(
3466    a: uint32x4_t,
3467    b: uint32x4_t,
3468    c: uint32x4_t,
3469) -> uint32x4_t {
3470    static_assert_uimm_bits!(IMM2, 2);
3471    #[allow(improper_ctypes)]
3472    extern "unadjusted" {
3473        #[cfg_attr(
3474            any(target_arch = "aarch64", target_arch = "arm64ec"),
3475            link_name = "llvm.aarch64.crypto.sm3tt2a"
3476        )]
3477        fn vsm3tt2aq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
3478    }
3479    vsm3tt2aq_u32_(a, b, c, IMM2 as i64)
3480}
3481
3482/// SM3TT2B
3483#[inline]
3484#[target_feature(enable = "neon,sm4")]
3485#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
3486#[rustc_legacy_const_generics(3)]
3487#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
3488pub unsafe fn vsm3tt2bq_u32<const IMM2: i32>(
3489    a: uint32x4_t,
3490    b: uint32x4_t,
3491    c: uint32x4_t,
3492) -> uint32x4_t {
3493    static_assert_uimm_bits!(IMM2, 2);
3494    #[allow(improper_ctypes)]
3495    extern "unadjusted" {
3496        #[cfg_attr(
3497            any(target_arch = "aarch64", target_arch = "arm64ec"),
3498            link_name = "llvm.aarch64.crypto.sm3tt2b"
3499        )]
3500        fn vsm3tt2bq_u32_(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, imm2: i64) -> uint32x4_t;
3501    }
3502    vsm3tt2bq_u32_(a, b, c, IMM2 as i64)
3503}
3504
3505/// Exclusive OR and rotate
3506#[inline]
3507#[target_feature(enable = "neon,sha3")]
3508#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
3509#[rustc_legacy_const_generics(2)]
3510#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
3511pub unsafe fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
3512    static_assert_uimm_bits!(IMM6, 6);
3513    #[allow(improper_ctypes)]
3514    extern "unadjusted" {
3515        #[cfg_attr(
3516            any(target_arch = "aarch64", target_arch = "arm64ec"),
3517            link_name = "llvm.aarch64.crypto.xar"
3518        )]
3519        fn vxarq_u64_(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
3520    }
3521    vxarq_u64_(a, b, IMM6 as i64)
3522}
3523
3524#[cfg(test)]
3525mod tests {
3526    use crate::core_arch::aarch64::test_support::*;
3527    use crate::core_arch::arm_shared::test_support::*;
3528    use crate::core_arch::{aarch64::neon::*, aarch64::*, simd::*};
3529    use std::mem::transmute;
3530    use stdarch_test::simd_test;
3531
3532    #[simd_test(enable = "neon")]
3533    unsafe fn test_vuqadd_s8() {
3534        let a = i8x8::new(i8::MIN, -3, -2, -1, 0, 1, 2, i8::MAX);
3535        let b = u8x8::new(u8::MAX, 1, 2, 3, 4, 5, 6, 7);
3536        let e = i8x8::new(i8::MAX, -2, 0, 2, 4, 6, 8, i8::MAX);
3537        let r: i8x8 = transmute(vuqadd_s8(transmute(a), transmute(b)));
3538        assert_eq!(r, e);
3539    }
3540    #[simd_test(enable = "neon")]
3541    unsafe fn test_vuqaddq_s8() {
3542        let a = i8x16::new(
3543            i8::MIN,
3544            -7,
3545            -6,
3546            -5,
3547            -4,
3548            -3,
3549            -2,
3550            -1,
3551            0,
3552            1,
3553            2,
3554            3,
3555            4,
3556            5,
3557            6,
3558            i8::MAX,
3559        );
3560        let b = u8x16::new(u8::MAX, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3561        let e = i8x16::new(
3562            i8::MAX,
3563            -6,
3564            -4,
3565            -2,
3566            0,
3567            2,
3568            4,
3569            6,
3570            8,
3571            10,
3572            12,
3573            14,
3574            16,
3575            18,
3576            20,
3577            i8::MAX,
3578        );
3579        let r: i8x16 = transmute(vuqaddq_s8(transmute(a), transmute(b)));
3580        assert_eq!(r, e);
3581    }
3582    #[simd_test(enable = "neon")]
3583    unsafe fn test_vuqadd_s16() {
3584        let a = i16x4::new(i16::MIN, -1, 0, i16::MAX);
3585        let b = u16x4::new(u16::MAX, 1, 2, 3);
3586        let e = i16x4::new(i16::MAX, 0, 2, i16::MAX);
3587        let r: i16x4 = transmute(vuqadd_s16(transmute(a), transmute(b)));
3588        assert_eq!(r, e);
3589    }
3590    #[simd_test(enable = "neon")]
3591    unsafe fn test_vuqaddq_s16() {
3592        let a = i16x8::new(i16::MIN, -3, -2, -1, 0, 1, 2, i16::MAX);
3593        let b = u16x8::new(u16::MAX, 1, 2, 3, 4, 5, 6, 7);
3594        let e = i16x8::new(i16::MAX, -2, 0, 2, 4, 6, 8, i16::MAX);
3595        let r: i16x8 = transmute(vuqaddq_s16(transmute(a), transmute(b)));
3596        assert_eq!(r, e);
3597    }
3598    #[simd_test(enable = "neon")]
3599    unsafe fn test_vuqadd_s32() {
3600        let a = i32x2::new(i32::MIN, i32::MAX);
3601        let b = u32x2::new(u32::MAX, 1);
3602        let e = i32x2::new(i32::MAX, i32::MAX);
3603        let r: i32x2 = transmute(vuqadd_s32(transmute(a), transmute(b)));
3604        assert_eq!(r, e);
3605    }
3606    #[simd_test(enable = "neon")]
3607    unsafe fn test_vuqaddq_s32() {
3608        let a = i32x4::new(i32::MIN, -1, 0, i32::MAX);
3609        let b = u32x4::new(u32::MAX, 1, 2, 3);
3610        let e = i32x4::new(i32::MAX, 0, 2, i32::MAX);
3611        let r: i32x4 = transmute(vuqaddq_s32(transmute(a), transmute(b)));
3612        assert_eq!(r, e);
3613    }
3614    #[simd_test(enable = "neon")]
3615    unsafe fn test_vuqadd_s64() {
3616        let a = i64x1::new(i64::MIN);
3617        let b = u64x1::new(u64::MAX);
3618        let e = i64x1::new(i64::MAX);
3619        let r: i64x1 = transmute(vuqadd_s64(transmute(a), transmute(b)));
3620        assert_eq!(r, e);
3621    }
3622    #[simd_test(enable = "neon")]
3623    unsafe fn test_vuqaddq_s64() {
3624        let a = i64x2::new(i64::MIN, i64::MAX);
3625        let b = u64x2::new(u64::MAX, 1);
3626        let e = i64x2::new(i64::MAX, i64::MAX);
3627        let r: i64x2 = transmute(vuqaddq_s64(transmute(a), transmute(b)));
3628        assert_eq!(r, e);
3629    }
3630
3631    #[simd_test(enable = "neon")]
3632    unsafe fn test_vsqadd_u8() {
3633        let a = u8x8::new(0, 1, 2, 3, 4, 5, 6, u8::MAX);
3634        let b = i8x8::new(i8::MIN, -3, -2, -1, 0, 1, 2, 3);
3635        let e = u8x8::new(0, 0, 0, 2, 4, 6, 8, u8::MAX);
3636        let r: u8x8 = transmute(vsqadd_u8(transmute(a), transmute(b)));
3637        assert_eq!(r, e);
3638    }
3639    #[simd_test(enable = "neon")]
3640    unsafe fn test_vsqaddq_u8() {
3641        let a = u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, u8::MAX);
3642        let b = i8x16::new(i8::MIN, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7);
3643        let e = u8x16::new(0, 0, 0, 0, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, u8::MAX);
3644        let r: u8x16 = transmute(vsqaddq_u8(transmute(a), transmute(b)));
3645        assert_eq!(r, e);
3646    }
3647    #[simd_test(enable = "neon")]
3648    unsafe fn test_vsqadd_u16() {
3649        let a = u16x4::new(0, 1, 2, u16::MAX);
3650        let b = i16x4::new(i16::MIN, -1, 0, 1);
3651        let e = u16x4::new(0, 0, 2, u16::MAX);
3652        let r: u16x4 = transmute(vsqadd_u16(transmute(a), transmute(b)));
3653        assert_eq!(r, e);
3654    }
3655    #[simd_test(enable = "neon")]
3656    unsafe fn test_vsqaddq_u16() {
3657        let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, u16::MAX);
3658        let b = i16x8::new(i16::MIN, -3, -2, -1, 0, 1, 2, 3);
3659        let e = u16x8::new(0, 0, 0, 2, 4, 6, 8, u16::MAX);
3660        let r: u16x8 = transmute(vsqaddq_u16(transmute(a), transmute(b)));
3661        assert_eq!(r, e);
3662    }
3663    #[simd_test(enable = "neon")]
3664    unsafe fn test_vsqadd_u32() {
3665        let a = u32x2::new(0, u32::MAX);
3666        let b = i32x2::new(i32::MIN, 1);
3667        let e = u32x2::new(0, u32::MAX);
3668        let r: u32x2 = transmute(vsqadd_u32(transmute(a), transmute(b)));
3669        assert_eq!(r, e);
3670    }
3671    #[simd_test(enable = "neon")]
3672    unsafe fn test_vsqaddq_u32() {
3673        let a = u32x4::new(0, 1, 2, u32::MAX);
3674        let b = i32x4::new(i32::MIN, -1, 0, 1);
3675        let e = u32x4::new(0, 0, 2, u32::MAX);
3676        let r: u32x4 = transmute(vsqaddq_u32(transmute(a), transmute(b)));
3677        assert_eq!(r, e);
3678    }
3679    #[simd_test(enable = "neon")]
3680    unsafe fn test_vsqadd_u64() {
3681        let a = u64x1::new(0);
3682        let b = i64x1::new(i64::MIN);
3683        let e = u64x1::new(0);
3684        let r: u64x1 = transmute(vsqadd_u64(transmute(a), transmute(b)));
3685        assert_eq!(r, e);
3686    }
3687    #[simd_test(enable = "neon")]
3688    unsafe fn test_vsqaddq_u64() {
3689        let a = u64x2::new(0, u64::MAX);
3690        let b = i64x2::new(i64::MIN, 1);
3691        let e = u64x2::new(0, u64::MAX);
3692        let r: u64x2 = transmute(vsqaddq_u64(transmute(a), transmute(b)));
3693        assert_eq!(r, e);
3694    }
3695
3696    #[simd_test(enable = "neon")]
3697    unsafe fn test_vpaddq_s16() {
3698        let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
3699        let b = i16x8::new(0, -1, -2, -3, -4, -5, -6, -7);
3700        let r: i16x8 = transmute(vpaddq_s16(transmute(a), transmute(b)));
3701        let e = i16x8::new(3, 7, 11, 15, -1, -5, -9, -13);
3702        assert_eq!(r, e);
3703    }
3704    #[simd_test(enable = "neon")]
3705    unsafe fn test_vpaddq_s32() {
3706        let a = i32x4::new(1, 2, 3, 4);
3707        let b = i32x4::new(0, -1, -2, -3);
3708        let r: i32x4 = transmute(vpaddq_s32(transmute(a), transmute(b)));
3709        let e = i32x4::new(3, 7, -1, -5);
3710        assert_eq!(r, e);
3711    }
3712    #[simd_test(enable = "neon")]
3713    unsafe fn test_vpaddq_s64() {
3714        let a = i64x2::new(1, 2);
3715        let b = i64x2::new(0, -1);
3716        let r: i64x2 = transmute(vpaddq_s64(transmute(a), transmute(b)));
3717        let e = i64x2::new(3, -1);
3718        assert_eq!(r, e);
3719    }
3720    #[simd_test(enable = "neon")]
3721    unsafe fn test_vpaddq_s8() {
3722        let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
3723        let b = i8x16::new(
3724            0, -1, -2, -3, -4, -5, -6, -7, -8, -8, -10, -11, -12, -13, -14, -15,
3725        );
3726        let r: i8x16 = transmute(vpaddq_s8(transmute(a), transmute(b)));
3727        let e = i8x16::new(
3728            3, 7, 11, 15, 19, 23, 27, 31, -1, -5, -9, -13, -16, -21, -25, -29,
3729        );
3730        assert_eq!(r, e);
3731    }
3732    #[simd_test(enable = "neon")]
3733    unsafe fn test_vpaddq_u16() {
3734        let a = u16x8::new(0, 1, 2, 3, 4, 5, 6, 7);
3735        let b = u16x8::new(17, 18, 19, 20, 20, 21, 22, 23);
3736        let r: u16x8 = transmute(vpaddq_u16(transmute(a), transmute(b)));
3737        let e = u16x8::new(1, 5, 9, 13, 35, 39, 41, 45);
3738        assert_eq!(r, e);
3739    }
3740    #[simd_test(enable = "neon")]
3741    unsafe fn test_vpaddq_u32() {
3742        let a = u32x4::new(0, 1, 2, 3);
3743        let b = u32x4::new(17, 18, 19, 20);
3744        let r: u32x4 = transmute(vpaddq_u32(transmute(a), transmute(b)));
3745        let e = u32x4::new(1, 5, 35, 39);
3746        assert_eq!(r, e);
3747    }
3748    #[simd_test(enable = "neon")]
3749    unsafe fn test_vpaddq_u64() {
3750        let a = u64x2::new(0, 1);
3751        let b = u64x2::new(17, 18);
3752        let r: u64x2 = transmute(vpaddq_u64(transmute(a), transmute(b)));
3753        let e = u64x2::new(1, 35);
3754        assert_eq!(r, e);
3755    }
3756    #[simd_test(enable = "neon")]
3757    unsafe fn test_vpaddq_u8() {
3758        let a = i8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3759        let b = i8x16::new(
3760            17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 26, 27, 29, 29, 30, 31,
3761        );
3762        let r = i8x16::new(1, 5, 9, 13, 17, 21, 25, 29, 35, 39, 41, 45, 49, 53, 58, 61);
3763        let e: i8x16 = transmute(vpaddq_u8(transmute(a), transmute(b)));
3764        assert_eq!(r, e);
3765    }
3766    #[simd_test(enable = "neon")]
3767    unsafe fn test_vpaddd_s64() {
3768        let a = i64x2::new(2, -3);
3769        let r: i64 = vpaddd_s64(transmute(a));
3770        let e = -1_i64;
3771        assert_eq!(r, e);
3772    }
3773    #[simd_test(enable = "neon")]
3774    unsafe fn test_vpaddd_u64() {
3775        let a = i64x2::new(2, 3);
3776        let r: u64 = vpaddd_u64(transmute(a));
3777        let e = 5_u64;
3778        assert_eq!(r, e);
3779    }
3780
3781    #[simd_test(enable = "neon")]
3782    unsafe fn test_vadd_f64() {
3783        let a = 1.;
3784        let b = 8.;
3785        let e = 9.;
3786        let r: f64 = transmute(vadd_f64(transmute(a), transmute(b)));
3787        assert_eq!(r, e);
3788    }
3789
3790    #[simd_test(enable = "neon")]
3791    unsafe fn test_vaddq_f64() {
3792        let a = f64x2::new(1., 2.);
3793        let b = f64x2::new(8., 7.);
3794        let e = f64x2::new(9., 9.);
3795        let r: f64x2 = transmute(vaddq_f64(transmute(a), transmute(b)));
3796        assert_eq!(r, e);
3797    }
3798
3799    #[simd_test(enable = "neon")]
3800    unsafe fn test_vadd_s64() {
3801        let a = 1_i64;
3802        let b = 8_i64;
3803        let e = 9_i64;
3804        let r: i64 = transmute(vadd_s64(transmute(a), transmute(b)));
3805        assert_eq!(r, e);
3806    }
3807
3808    #[simd_test(enable = "neon")]
3809    unsafe fn test_vadd_u64() {
3810        let a = 1_u64;
3811        let b = 8_u64;
3812        let e = 9_u64;
3813        let r: u64 = transmute(vadd_u64(transmute(a), transmute(b)));
3814        assert_eq!(r, e);
3815    }
3816
3817    #[simd_test(enable = "neon")]
3818    unsafe fn test_vaddd_s64() {
3819        let a = 1_i64;
3820        let b = 8_i64;
3821        let e = 9_i64;
3822        let r: i64 = vaddd_s64(a, b);
3823        assert_eq!(r, e);
3824    }
3825
3826    #[simd_test(enable = "neon")]
3827    unsafe fn test_vaddd_u64() {
3828        let a = 1_u64;
3829        let b = 8_u64;
3830        let e = 9_u64;
3831        let r: u64 = vaddd_u64(a, b);
3832        assert_eq!(r, e);
3833    }
3834
3835    #[simd_test(enable = "neon")]
3836    unsafe fn test_vmaxv_s8() {
3837        let r = vmaxv_s8(transmute(i8x8::new(1, 2, 3, 4, -8, 6, 7, 5)));
3838        assert_eq!(r, 7_i8);
3839    }
3840
3841    #[simd_test(enable = "neon")]
3842    unsafe fn test_vmaxvq_s8() {
3843        #[rustfmt::skip]
3844        let r = vmaxvq_s8(transmute(i8x16::new(
3845            1, 2, 3, 4,
3846            -16, 6, 7, 5,
3847            8, 1, 1, 1,
3848            1, 1, 1, 1,
3849        )));
3850        assert_eq!(r, 8_i8);
3851    }
3852
3853    #[simd_test(enable = "neon")]
3854    unsafe fn test_vmaxv_s16() {
3855        let r = vmaxv_s16(transmute(i16x4::new(1, 2, -4, 3)));
3856        assert_eq!(r, 3_i16);
3857    }
3858
3859    #[simd_test(enable = "neon")]
3860    unsafe fn test_vmaxvq_s16() {
3861        let r = vmaxvq_s16(transmute(i16x8::new(1, 2, 7, 4, -16, 6, 7, 5)));
3862        assert_eq!(r, 7_i16);
3863    }
3864
3865    #[simd_test(enable = "neon")]
3866    unsafe fn test_vmaxv_s32() {
3867        let r = vmaxv_s32(transmute(i32x2::new(1, -4)));
3868        assert_eq!(r, 1_i32);
3869    }
3870
3871    #[simd_test(enable = "neon")]
3872    unsafe fn test_vmaxvq_s32() {
3873        let r = vmaxvq_s32(transmute(i32x4::new(1, 2, -32, 4)));
3874        assert_eq!(r, 4_i32);
3875    }
3876
3877    #[simd_test(enable = "neon")]
3878    unsafe fn test_vmaxv_u8() {
3879        let r = vmaxv_u8(transmute(u8x8::new(1, 2, 3, 4, 8, 6, 7, 5)));
3880        assert_eq!(r, 8_u8);
3881    }
3882
3883    #[simd_test(enable = "neon")]
3884    unsafe fn test_vmaxvq_u8() {
3885        #[rustfmt::skip]
3886        let r = vmaxvq_u8(transmute(u8x16::new(
3887            1, 2, 3, 4,
3888            16, 6, 7, 5,
3889            8, 1, 1, 1,
3890            1, 1, 1, 1,
3891        )));
3892        assert_eq!(r, 16_u8);
3893    }
3894
3895    #[simd_test(enable = "neon")]
3896    unsafe fn test_vmaxv_u16() {
3897        let r = vmaxv_u16(transmute(u16x4::new(1, 2, 4, 3)));
3898        assert_eq!(r, 4_u16);
3899    }
3900
3901    #[simd_test(enable = "neon")]
3902    unsafe fn test_vmaxvq_u16() {
3903        let r = vmaxvq_u16(transmute(u16x8::new(1, 2, 7, 4, 16, 6, 7, 5)));
3904        assert_eq!(r, 16_u16);
3905    }
3906
3907    #[simd_test(enable = "neon")]
3908    unsafe fn test_vmaxv_u32() {
3909        let r = vmaxv_u32(transmute(u32x2::new(1, 4)));
3910        assert_eq!(r, 4_u32);
3911    }
3912
3913    #[simd_test(enable = "neon")]
3914    unsafe fn test_vmaxvq_u32() {
3915        let r = vmaxvq_u32(transmute(u32x4::new(1, 2, 32, 4)));
3916        assert_eq!(r, 32_u32);
3917    }
3918
3919    #[simd_test(enable = "neon")]
3920    unsafe fn test_vmaxv_f32() {
3921        let r = vmaxv_f32(transmute(f32x2::new(1., 4.)));
3922        assert_eq!(r, 4_f32);
3923    }
3924
3925    #[simd_test(enable = "neon")]
3926    unsafe fn test_vmaxvq_f32() {
3927        let r = vmaxvq_f32(transmute(f32x4::new(1., 2., 32., 4.)));
3928        assert_eq!(r, 32_f32);
3929    }
3930
3931    #[simd_test(enable = "neon")]
3932    unsafe fn test_vmaxvq_f64() {
3933        let r = vmaxvq_f64(transmute(f64x2::new(1., 4.)));
3934        assert_eq!(r, 4_f64);
3935    }
3936
3937    #[simd_test(enable = "neon")]
3938    unsafe fn test_vminv_s8() {
3939        let r = vminv_s8(transmute(i8x8::new(1, 2, 3, 4, -8, 6, 7, 5)));
3940        assert_eq!(r, -8_i8);
3941    }
3942
3943    #[simd_test(enable = "neon")]
3944    unsafe fn test_vminvq_s8() {
3945        #[rustfmt::skip]
3946        let r = vminvq_s8(transmute(i8x16::new(
3947            1, 2, 3, 4,
3948            -16, 6, 7, 5,
3949            8, 1, 1, 1,
3950            1, 1, 1, 1,
3951        )));
3952        assert_eq!(r, -16_i8);
3953    }
3954
3955    #[simd_test(enable = "neon")]
3956    unsafe fn test_vminv_s16() {
3957        let r = vminv_s16(transmute(i16x4::new(1, 2, -4, 3)));
3958        assert_eq!(r, -4_i16);
3959    }
3960
3961    #[simd_test(enable = "neon")]
3962    unsafe fn test_vminvq_s16() {
3963        let r = vminvq_s16(transmute(i16x8::new(1, 2, 7, 4, -16, 6, 7, 5)));
3964        assert_eq!(r, -16_i16);
3965    }
3966
3967    #[simd_test(enable = "neon")]
3968    unsafe fn test_vminv_s32() {
3969        let r = vminv_s32(transmute(i32x2::new(1, -4)));
3970        assert_eq!(r, -4_i32);
3971    }
3972
3973    #[simd_test(enable = "neon")]
3974    unsafe fn test_vminvq_s32() {
3975        let r = vminvq_s32(transmute(i32x4::new(1, 2, -32, 4)));
3976        assert_eq!(r, -32_i32);
3977    }
3978
3979    #[simd_test(enable = "neon")]
3980    unsafe fn test_vminv_u8() {
3981        let r = vminv_u8(transmute(u8x8::new(1, 2, 3, 4, 8, 6, 7, 5)));
3982        assert_eq!(r, 1_u8);
3983    }
3984
3985    #[simd_test(enable = "neon")]
3986    unsafe fn test_vminvq_u8() {
3987        #[rustfmt::skip]
3988        let r = vminvq_u8(transmute(u8x16::new(
3989            1, 2, 3, 4,
3990            16, 6, 7, 5,
3991            8, 1, 1, 1,
3992            1, 1, 1, 1,
3993        )));
3994        assert_eq!(r, 1_u8);
3995    }
3996
3997    #[simd_test(enable = "neon")]
3998    unsafe fn test_vminv_u16() {
3999        let r = vminv_u16(transmute(u16x4::new(1, 2, 4, 3)));
4000        assert_eq!(r, 1_u16);
4001    }
4002
4003    #[simd_test(enable = "neon")]
4004    unsafe fn test_vminvq_u16() {
4005        let r = vminvq_u16(transmute(u16x8::new(1, 2, 7, 4, 16, 6, 7, 5)));
4006        assert_eq!(r, 1_u16);
4007    }
4008
4009    #[simd_test(enable = "neon")]
4010    unsafe fn test_vminv_u32() {
4011        let r = vminv_u32(transmute(u32x2::new(1, 4)));
4012        assert_eq!(r, 1_u32);
4013    }
4014
4015    #[simd_test(enable = "neon")]
4016    unsafe fn test_vminvq_u32() {
4017        let r = vminvq_u32(transmute(u32x4::new(1, 2, 32, 4)));
4018        assert_eq!(r, 1_u32);
4019    }
4020
4021    #[simd_test(enable = "neon")]
4022    unsafe fn test_vminv_f32() {
4023        let r = vminv_f32(transmute(f32x2::new(1., 4.)));
4024        assert_eq!(r, 1_f32);
4025    }
4026
4027    #[simd_test(enable = "neon")]
4028    unsafe fn test_vminvq_f32() {
4029        let r = vminvq_f32(transmute(f32x4::new(1., 2., 32., 4.)));
4030        assert_eq!(r, 1_f32);
4031    }
4032
4033    #[simd_test(enable = "neon")]
4034    unsafe fn test_vminvq_f64() {
4035        let r = vminvq_f64(transmute(f64x2::new(1., 4.)));
4036        assert_eq!(r, 1_f64);
4037    }
4038
4039    #[simd_test(enable = "neon")]
4040    unsafe fn test_vpminq_s8() {
4041        #[rustfmt::skip]
4042        let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
4043        #[rustfmt::skip]
4044        let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
4045        #[rustfmt::skip]
4046        let e = i8x16::new(-2, -4, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
4047        let r: i8x16 = transmute(vpminq_s8(transmute(a), transmute(b)));
4048        assert_eq!(r, e);
4049    }
4050
4051    #[simd_test(enable = "neon")]
4052    unsafe fn test_vpminq_s16() {
4053        let a = i16x8::new(1, -2, 3, 4, 5, 6, 7, 8);
4054        let b = i16x8::new(0, 3, 2, 5, 4, 7, 6, 9);
4055        let e = i16x8::new(-2, 3, 5, 7, 0, 2, 4, 6);
4056        let r: i16x8 = transmute(vpminq_s16(transmute(a), transmute(b)));
4057        assert_eq!(r, e);
4058    }
4059
4060    #[simd_test(enable = "neon")]
4061    unsafe fn test_vpminq_s32() {
4062        let a = i32x4::new(1, -2, 3, 4);
4063        let b = i32x4::new(0, 3, 2, 5);
4064        let e = i32x4::new(-2, 3, 0, 2);
4065        let r: i32x4 = transmute(vpminq_s32(transmute(a), transmute(b)));
4066        assert_eq!(r, e);
4067    }
4068
4069    #[simd_test(enable = "neon")]
4070    unsafe fn test_vpminq_u8() {
4071        #[rustfmt::skip]
4072        let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
4073        #[rustfmt::skip]
4074        let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
4075        #[rustfmt::skip]
4076        let e = u8x16::new(1, 3, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
4077        let r: u8x16 = transmute(vpminq_u8(transmute(a), transmute(b)));
4078        assert_eq!(r, e);
4079    }
4080
4081    #[simd_test(enable = "neon")]
4082    unsafe fn test_vpminq_u16() {
4083        let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
4084        let b = u16x8::new(0, 3, 2, 5, 4, 7, 6, 9);
4085        let e = u16x8::new(1, 3, 5, 7, 0, 2, 4, 6);
4086        let r: u16x8 = transmute(vpminq_u16(transmute(a), transmute(b)));
4087        assert_eq!(r, e);
4088    }
4089
4090    #[simd_test(enable = "neon")]
4091    unsafe fn test_vpminq_u32() {
4092        let a = u32x4::new(1, 2, 3, 4);
4093        let b = u32x4::new(0, 3, 2, 5);
4094        let e = u32x4::new(1, 3, 0, 2);
4095        let r: u32x4 = transmute(vpminq_u32(transmute(a), transmute(b)));
4096        assert_eq!(r, e);
4097    }
4098
4099    #[simd_test(enable = "neon")]
4100    unsafe fn test_vpmin_f32() {
4101        let a = f32x4::new(1., -2., 3., 4.);
4102        let b = f32x4::new(0., 3., 2., 5.);
4103        let e = f32x4::new(-2., 3., 0., 2.);
4104        let r: f32x4 = transmute(vpminq_f32(transmute(a), transmute(b)));
4105        assert_eq!(r, e);
4106    }
4107
4108    #[simd_test(enable = "neon")]
4109    unsafe fn test_vpmin_f64() {
4110        let a = f64x2::new(1., -2.);
4111        let b = f64x2::new(0., 3.);
4112        let e = f64x2::new(-2., 0.);
4113        let r: f64x2 = transmute(vpminq_f64(transmute(a), transmute(b)));
4114        assert_eq!(r, e);
4115    }
4116
4117    #[simd_test(enable = "neon")]
4118    unsafe fn test_vpmaxq_s8() {
4119        #[rustfmt::skip]
4120        let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
4121        #[rustfmt::skip]
4122        let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
4123        #[rustfmt::skip]
4124        let e = i8x16::new(1, 3, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
4125        let r: i8x16 = transmute(vpmaxq_s8(transmute(a), transmute(b)));
4126        assert_eq!(r, e);
4127    }
4128
4129    #[simd_test(enable = "neon")]
4130    unsafe fn test_vpmaxq_s16() {
4131        let a = i16x8::new(1, -2, 3, 4, 5, 6, 7, 8);
4132        let b = i16x8::new(0, 3, 2, 5, 4, 7, 6, 9);
4133        let e = i16x8::new(1, 4, 6, 8, 3, 5, 7, 9);
4134        let r: i16x8 = transmute(vpmaxq_s16(transmute(a), transmute(b)));
4135        assert_eq!(r, e);
4136    }
4137
4138    #[simd_test(enable = "neon")]
4139    unsafe fn test_vpmaxq_s32() {
4140        let a = i32x4::new(1, -2, 3, 4);
4141        let b = i32x4::new(0, 3, 2, 5);
4142        let e = i32x4::new(1, 4, 3, 5);
4143        let r: i32x4 = transmute(vpmaxq_s32(transmute(a), transmute(b)));
4144        assert_eq!(r, e);
4145    }
4146
4147    #[simd_test(enable = "neon")]
4148    unsafe fn test_vpmaxq_u8() {
4149        #[rustfmt::skip]
4150        let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
4151        #[rustfmt::skip]
4152        let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
4153        #[rustfmt::skip]
4154        let e = u8x16::new(2, 4, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
4155        let r: u8x16 = transmute(vpmaxq_u8(transmute(a), transmute(b)));
4156        assert_eq!(r, e);
4157    }
4158
4159    #[simd_test(enable = "neon")]
4160    unsafe fn test_vpmaxq_u16() {
4161        let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
4162        let b = u16x8::new(0, 3, 2, 5, 4, 7, 6, 9);
4163        let e = u16x8::new(2, 4, 6, 8, 3, 5, 7, 9);
4164        let r: u16x8 = transmute(vpmaxq_u16(transmute(a), transmute(b)));
4165        assert_eq!(r, e);
4166    }
4167
4168    #[simd_test(enable = "neon")]
4169    unsafe fn test_vpmaxq_u32() {
4170        let a = u32x4::new(1, 2, 3, 4);
4171        let b = u32x4::new(0, 3, 2, 5);
4172        let e = u32x4::new(2, 4, 3, 5);
4173        let r: u32x4 = transmute(vpmaxq_u32(transmute(a), transmute(b)));
4174        assert_eq!(r, e);
4175    }
4176
4177    #[simd_test(enable = "neon")]
4178    unsafe fn test_vpmax_f32() {
4179        let a = f32x4::new(1., -2., 3., 4.);
4180        let b = f32x4::new(0., 3., 2., 5.);
4181        let e = f32x4::new(1., 4., 3., 5.);
4182        let r: f32x4 = transmute(vpmaxq_f32(transmute(a), transmute(b)));
4183        assert_eq!(r, e);
4184    }
4185
4186    #[simd_test(enable = "neon")]
4187    unsafe fn test_vpmax_f64() {
4188        let a = f64x2::new(1., -2.);
4189        let b = f64x2::new(0., 3.);
4190        let e = f64x2::new(1., 3.);
4191        let r: f64x2 = transmute(vpmaxq_f64(transmute(a), transmute(b)));
4192        assert_eq!(r, e);
4193    }
4194
4195    #[simd_test(enable = "neon")]
4196    unsafe fn test_vext_p64() {
4197        let a: i64x1 = i64x1::new(0);
4198        let b: i64x1 = i64x1::new(1);
4199        let e: i64x1 = i64x1::new(0);
4200        let r: i64x1 = transmute(vext_p64::<0>(transmute(a), transmute(b)));
4201        assert_eq!(r, e);
4202    }
4203
4204    #[simd_test(enable = "neon")]
4205    unsafe fn test_vext_f64() {
4206        let a: f64x1 = f64x1::new(0.);
4207        let b: f64x1 = f64x1::new(1.);
4208        let e: f64x1 = f64x1::new(0.);
4209        let r: f64x1 = transmute(vext_f64::<0>(transmute(a), transmute(b)));
4210        assert_eq!(r, e);
4211    }
4212
4213    #[simd_test(enable = "neon")]
4214    unsafe fn test_vshld_n_s64() {
4215        let a: i64 = 1;
4216        let e: i64 = 4;
4217        let r: i64 = vshld_n_s64::<2>(a);
4218        assert_eq!(r, e);
4219    }
4220
4221    #[simd_test(enable = "neon")]
4222    unsafe fn test_vshld_n_u64() {
4223        let a: u64 = 1;
4224        let e: u64 = 4;
4225        let r: u64 = vshld_n_u64::<2>(a);
4226        assert_eq!(r, e);
4227    }
4228
4229    #[simd_test(enable = "neon")]
4230    unsafe fn test_vshrd_n_s64() {
4231        let a: i64 = 4;
4232        let e: i64 = 1;
4233        let r: i64 = vshrd_n_s64::<2>(a);
4234        assert_eq!(r, e);
4235    }
4236
4237    #[simd_test(enable = "neon")]
4238    unsafe fn test_vshrd_n_u64() {
4239        let a: u64 = 4;
4240        let e: u64 = 1;
4241        let r: u64 = vshrd_n_u64::<2>(a);
4242        assert_eq!(r, e);
4243    }
4244
4245    #[simd_test(enable = "neon")]
4246    unsafe fn test_vsrad_n_s64() {
4247        let a: i64 = 1;
4248        let b: i64 = 4;
4249        let e: i64 = 2;
4250        let r: i64 = vsrad_n_s64::<2>(a, b);
4251        assert_eq!(r, e);
4252    }
4253
4254    #[simd_test(enable = "neon")]
4255    unsafe fn test_vsrad_n_u64() {
4256        let a: u64 = 1;
4257        let b: u64 = 4;
4258        let e: u64 = 2;
4259        let r: u64 = vsrad_n_u64::<2>(a, b);
4260        assert_eq!(r, e);
4261    }
4262
4263    #[simd_test(enable = "neon")]
4264    unsafe fn test_vdup_n_f64() {
4265        let a: f64 = 3.3;
4266        let e = f64x1::new(3.3);
4267        let r: f64x1 = transmute(vdup_n_f64(a));
4268        assert_eq!(r, e);
4269    }
4270
4271    #[simd_test(enable = "neon")]
4272    unsafe fn test_vdup_n_p64() {
4273        let a: u64 = 3;
4274        let e = u64x1::new(3);
4275        let r: u64x1 = transmute(vdup_n_p64(a));
4276        assert_eq!(r, e);
4277    }
4278
4279    #[simd_test(enable = "neon")]
4280    unsafe fn test_vdupq_n_f64() {
4281        let a: f64 = 3.3;
4282        let e = f64x2::new(3.3, 3.3);
4283        let r: f64x2 = transmute(vdupq_n_f64(a));
4284        assert_eq!(r, e);
4285    }
4286
4287    #[simd_test(enable = "neon")]
4288    unsafe fn test_vdupq_n_p64() {
4289        let a: u64 = 3;
4290        let e = u64x2::new(3, 3);
4291        let r: u64x2 = transmute(vdupq_n_p64(a));
4292        assert_eq!(r, e);
4293    }
4294
4295    #[simd_test(enable = "neon")]
4296    unsafe fn test_vmov_n_p64() {
4297        let a: u64 = 3;
4298        let e = u64x1::new(3);
4299        let r: u64x1 = transmute(vmov_n_p64(a));
4300        assert_eq!(r, e);
4301    }
4302
4303    #[simd_test(enable = "neon")]
4304    unsafe fn test_vmov_n_f64() {
4305        let a: f64 = 3.3;
4306        let e = f64x1::new(3.3);
4307        let r: f64x1 = transmute(vmov_n_f64(a));
4308        assert_eq!(r, e);
4309    }
4310
4311    #[simd_test(enable = "neon")]
4312    unsafe fn test_vmovq_n_p64() {
4313        let a: u64 = 3;
4314        let e = u64x2::new(3, 3);
4315        let r: u64x2 = transmute(vmovq_n_p64(a));
4316        assert_eq!(r, e);
4317    }
4318
4319    #[simd_test(enable = "neon")]
4320    unsafe fn test_vmovq_n_f64() {
4321        let a: f64 = 3.3;
4322        let e = f64x2::new(3.3, 3.3);
4323        let r: f64x2 = transmute(vmovq_n_f64(a));
4324        assert_eq!(r, e);
4325    }
4326
4327    #[simd_test(enable = "neon")]
4328    unsafe fn test_vget_high_f64() {
4329        let a = f64x2::new(1.0, 2.0);
4330        let e = f64x1::new(2.0);
4331        let r: f64x1 = transmute(vget_high_f64(transmute(a)));
4332        assert_eq!(r, e);
4333    }
4334
4335    #[simd_test(enable = "neon")]
4336    unsafe fn test_vget_high_p64() {
4337        let a = u64x2::new(1, 2);
4338        let e = u64x1::new(2);
4339        let r: u64x1 = transmute(vget_high_p64(transmute(a)));
4340        assert_eq!(r, e);
4341    }
4342
4343    #[simd_test(enable = "neon")]
4344    unsafe fn test_vget_low_f64() {
4345        let a = f64x2::new(1.0, 2.0);
4346        let e = f64x1::new(1.0);
4347        let r: f64x1 = transmute(vget_low_f64(transmute(a)));
4348        assert_eq!(r, e);
4349    }
4350
4351    #[simd_test(enable = "neon")]
4352    unsafe fn test_vget_low_p64() {
4353        let a = u64x2::new(1, 2);
4354        let e = u64x1::new(1);
4355        let r: u64x1 = transmute(vget_low_p64(transmute(a)));
4356        assert_eq!(r, e);
4357    }
4358
4359    #[simd_test(enable = "neon")]
4360    unsafe fn test_vget_lane_f64() {
4361        let v = f64x1::new(1.0);
4362        let r = vget_lane_f64::<0>(transmute(v));
4363        assert_eq!(r, 1.0);
4364    }
4365
4366    #[simd_test(enable = "neon")]
4367    unsafe fn test_vgetq_lane_f64() {
4368        let v = f64x2::new(0.0, 1.0);
4369        let r = vgetq_lane_f64::<1>(transmute(v));
4370        assert_eq!(r, 1.0);
4371        let r = vgetq_lane_f64::<0>(transmute(v));
4372        assert_eq!(r, 0.0);
4373    }
4374
4375    #[simd_test(enable = "neon")]
4376    unsafe fn test_vcopy_lane_s64() {
4377        let a: i64x1 = i64x1::new(1);
4378        let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
4379        let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
4380        let r: i64x1 = transmute(vcopy_lane_s64::<0, 0>(transmute(a), transmute(b)));
4381        assert_eq!(r, e);
4382    }
4383
4384    #[simd_test(enable = "neon")]
4385    unsafe fn test_vcopy_lane_u64() {
4386        let a: u64x1 = u64x1::new(1);
4387        let b: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
4388        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
4389        let r: u64x1 = transmute(vcopy_lane_u64::<0, 0>(transmute(a), transmute(b)));
4390        assert_eq!(r, e);
4391    }
4392
4393    #[simd_test(enable = "neon")]
4394    unsafe fn test_vcopy_lane_p64() {
4395        let a: i64x1 = i64x1::new(1);
4396        let b: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
4397        let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
4398        let r: i64x1 = transmute(vcopy_lane_p64::<0, 0>(transmute(a), transmute(b)));
4399        assert_eq!(r, e);
4400    }
4401
4402    #[simd_test(enable = "neon")]
4403    unsafe fn test_vcopy_lane_f64() {
4404        let a: f64 = 1.;
4405        let b: f64 = 0.;
4406        let e: f64 = 0.;
4407        let r: f64 = transmute(vcopy_lane_f64::<0, 0>(transmute(a), transmute(b)));
4408        assert_eq!(r, e);
4409    }
4410
4411    #[simd_test(enable = "neon")]
4412    unsafe fn test_vcopy_laneq_s64() {
4413        let a: i64x1 = i64x1::new(1);
4414        let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
4415        let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
4416        let r: i64x1 = transmute(vcopy_laneq_s64::<0, 1>(transmute(a), transmute(b)));
4417        assert_eq!(r, e);
4418    }
4419
4420    #[simd_test(enable = "neon")]
4421    unsafe fn test_vcopy_laneq_u64() {
4422        let a: u64x1 = u64x1::new(1);
4423        let b: u64x2 = u64x2::new(0, 0xFF_FF_FF_FF_FF_FF_FF_FF);
4424        let e: u64x1 = u64x1::new(0xFF_FF_FF_FF_FF_FF_FF_FF);
4425        let r: u64x1 = transmute(vcopy_laneq_u64::<0, 1>(transmute(a), transmute(b)));
4426        assert_eq!(r, e);
4427    }
4428
4429    #[simd_test(enable = "neon")]
4430    unsafe fn test_vcopy_laneq_p64() {
4431        let a: i64x1 = i64x1::new(1);
4432        let b: i64x2 = i64x2::new(0, 0x7F_FF_FF_FF_FF_FF_FF_FF);
4433        let e: i64x1 = i64x1::new(0x7F_FF_FF_FF_FF_FF_FF_FF);
4434        let r: i64x1 = transmute(vcopy_laneq_p64::<0, 1>(transmute(a), transmute(b)));
4435        assert_eq!(r, e);
4436    }
4437
4438    #[simd_test(enable = "neon")]
4439    unsafe fn test_vcopy_laneq_f64() {
4440        let a: f64 = 1.;
4441        let b: f64x2 = f64x2::new(0., 0.5);
4442        let e: f64 = 0.5;
4443        let r: f64 = transmute(vcopy_laneq_f64::<0, 1>(transmute(a), transmute(b)));
4444        assert_eq!(r, e);
4445    }
4446
4447    #[simd_test(enable = "neon")]
4448    unsafe fn test_vceq_u64() {
4449        test_cmp_u64(
4450            |i, j| vceq_u64(i, j),
4451            |a: u64, b: u64| -> u64 {
4452                if a == b {
4453                    0xFFFFFFFFFFFFFFFF
4454                } else {
4455                    0
4456                }
4457            },
4458        );
4459    }
4460    #[simd_test(enable = "neon")]
4461    unsafe fn test_vceqq_u64() {
4462        testq_cmp_u64(
4463            |i, j| vceqq_u64(i, j),
4464            |a: u64, b: u64| -> u64 {
4465                if a == b {
4466                    0xFFFFFFFFFFFFFFFF
4467                } else {
4468                    0
4469                }
4470            },
4471        );
4472    }
4473
4474    #[simd_test(enable = "neon")]
4475    unsafe fn test_vceq_s64() {
4476        test_cmp_s64(
4477            |i, j| vceq_s64(i, j),
4478            |a: i64, b: i64| -> u64 {
4479                if a == b {
4480                    0xFFFFFFFFFFFFFFFF
4481                } else {
4482                    0
4483                }
4484            },
4485        );
4486    }
4487    #[simd_test(enable = "neon")]
4488    unsafe fn test_vceqq_s64() {
4489        testq_cmp_s64(
4490            |i, j| vceqq_s64(i, j),
4491            |a: i64, b: i64| -> u64 {
4492                if a == b {
4493                    0xFFFFFFFFFFFFFFFF
4494                } else {
4495                    0
4496                }
4497            },
4498        );
4499    }
4500
4501    #[simd_test(enable = "neon")]
4502    unsafe fn test_vceq_p64() {
4503        test_cmp_p64(
4504            |i, j| vceq_p64(i, j),
4505            |a: u64, b: u64| -> u64 {
4506                if a == b {
4507                    0xFFFFFFFFFFFFFFFF
4508                } else {
4509                    0
4510                }
4511            },
4512        );
4513    }
4514    #[simd_test(enable = "neon")]
4515    unsafe fn test_vceqq_p64() {
4516        testq_cmp_p64(
4517            |i, j| vceqq_p64(i, j),
4518            |a: u64, b: u64| -> u64 {
4519                if a == b {
4520                    0xFFFFFFFFFFFFFFFF
4521                } else {
4522                    0
4523                }
4524            },
4525        );
4526    }
4527
4528    #[simd_test(enable = "neon")]
4529    unsafe fn test_vceq_f64() {
4530        test_cmp_f64(
4531            |i, j| vceq_f64(i, j),
4532            |a: f64, b: f64| -> u64 {
4533                if a == b {
4534                    0xFFFFFFFFFFFFFFFF
4535                } else {
4536                    0
4537                }
4538            },
4539        );
4540    }
4541    #[simd_test(enable = "neon")]
4542    unsafe fn test_vceqq_f64() {
4543        testq_cmp_f64(
4544            |i, j| vceqq_f64(i, j),
4545            |a: f64, b: f64| -> u64 {
4546                if a == b {
4547                    0xFFFFFFFFFFFFFFFF
4548                } else {
4549                    0
4550                }
4551            },
4552        );
4553    }
4554
4555    #[simd_test(enable = "neon")]
4556    unsafe fn test_vcgt_s64() {
4557        test_cmp_s64(
4558            |i, j| vcgt_s64(i, j),
4559            |a: i64, b: i64| -> u64 {
4560                if a > b {
4561                    0xFFFFFFFFFFFFFFFF
4562                } else {
4563                    0
4564                }
4565            },
4566        );
4567    }
4568    #[simd_test(enable = "neon")]
4569    unsafe fn test_vcgtq_s64() {
4570        testq_cmp_s64(
4571            |i, j| vcgtq_s64(i, j),
4572            |a: i64, b: i64| -> u64 {
4573                if a > b {
4574                    0xFFFFFFFFFFFFFFFF
4575                } else {
4576                    0
4577                }
4578            },
4579        );
4580    }
4581
4582    #[simd_test(enable = "neon")]
4583    unsafe fn test_vcgt_u64() {
4584        test_cmp_u64(
4585            |i, j| vcgt_u64(i, j),
4586            |a: u64, b: u64| -> u64 {
4587                if a > b {
4588                    0xFFFFFFFFFFFFFFFF
4589                } else {
4590                    0
4591                }
4592            },
4593        );
4594    }
4595    #[simd_test(enable = "neon")]
4596    unsafe fn test_vcgtq_u64() {
4597        testq_cmp_u64(
4598            |i, j| vcgtq_u64(i, j),
4599            |a: u64, b: u64| -> u64 {
4600                if a > b {
4601                    0xFFFFFFFFFFFFFFFF
4602                } else {
4603                    0
4604                }
4605            },
4606        );
4607    }
4608
4609    #[simd_test(enable = "neon")]
4610    unsafe fn test_vcgt_f64() {
4611        test_cmp_f64(
4612            |i, j| vcgt_f64(i, j),
4613            |a: f64, b: f64| -> u64 {
4614                if a > b {
4615                    0xFFFFFFFFFFFFFFFF
4616                } else {
4617                    0
4618                }
4619            },
4620        );
4621    }
4622    #[simd_test(enable = "neon")]
4623    unsafe fn test_vcgtq_f64() {
4624        testq_cmp_f64(
4625            |i, j| vcgtq_f64(i, j),
4626            |a: f64, b: f64| -> u64 {
4627                if a > b {
4628                    0xFFFFFFFFFFFFFFFF
4629                } else {
4630                    0
4631                }
4632            },
4633        );
4634    }
4635
4636    #[simd_test(enable = "neon")]
4637    unsafe fn test_vclt_s64() {
4638        test_cmp_s64(
4639            |i, j| vclt_s64(i, j),
4640            |a: i64, b: i64| -> u64 {
4641                if a < b {
4642                    0xFFFFFFFFFFFFFFFF
4643                } else {
4644                    0
4645                }
4646            },
4647        );
4648    }
4649    #[simd_test(enable = "neon")]
4650    unsafe fn test_vcltq_s64() {
4651        testq_cmp_s64(
4652            |i, j| vcltq_s64(i, j),
4653            |a: i64, b: i64| -> u64 {
4654                if a < b {
4655                    0xFFFFFFFFFFFFFFFF
4656                } else {
4657                    0
4658                }
4659            },
4660        );
4661    }
4662
4663    #[simd_test(enable = "neon")]
4664    unsafe fn test_vclt_u64() {
4665        test_cmp_u64(
4666            |i, j| vclt_u64(i, j),
4667            |a: u64, b: u64| -> u64 {
4668                if a < b {
4669                    0xFFFFFFFFFFFFFFFF
4670                } else {
4671                    0
4672                }
4673            },
4674        );
4675    }
4676    #[simd_test(enable = "neon")]
4677    unsafe fn test_vcltq_u64() {
4678        testq_cmp_u64(
4679            |i, j| vcltq_u64(i, j),
4680            |a: u64, b: u64| -> u64 {
4681                if a < b {
4682                    0xFFFFFFFFFFFFFFFF
4683                } else {
4684                    0
4685                }
4686            },
4687        );
4688    }
4689
4690    #[simd_test(enable = "neon")]
4691    unsafe fn test_vltq_f64() {
4692        test_cmp_f64(
4693            |i, j| vclt_f64(i, j),
4694            |a: f64, b: f64| -> u64 {
4695                if a < b {
4696                    0xFFFFFFFFFFFFFFFF
4697                } else {
4698                    0
4699                }
4700            },
4701        );
4702    }
4703    #[simd_test(enable = "neon")]
4704    unsafe fn test_vcltq_f64() {
4705        testq_cmp_f64(
4706            |i, j| vcltq_f64(i, j),
4707            |a: f64, b: f64| -> u64 {
4708                if a < b {
4709                    0xFFFFFFFFFFFFFFFF
4710                } else {
4711                    0
4712                }
4713            },
4714        );
4715    }
4716
4717    #[simd_test(enable = "neon")]
4718    unsafe fn test_vcle_s64() {
4719        test_cmp_s64(
4720            |i, j| vcle_s64(i, j),
4721            |a: i64, b: i64| -> u64 {
4722                if a <= b {
4723                    0xFFFFFFFFFFFFFFFF
4724                } else {
4725                    0
4726                }
4727            },
4728        );
4729    }
4730    #[simd_test(enable = "neon")]
4731    unsafe fn test_vcleq_s64() {
4732        testq_cmp_s64(
4733            |i, j| vcleq_s64(i, j),
4734            |a: i64, b: i64| -> u64 {
4735                if a <= b {
4736                    0xFFFFFFFFFFFFFFFF
4737                } else {
4738                    0
4739                }
4740            },
4741        );
4742    }
4743
4744    #[simd_test(enable = "neon")]
4745    unsafe fn test_vcle_u64() {
4746        test_cmp_u64(
4747            |i, j| vcle_u64(i, j),
4748            |a: u64, b: u64| -> u64 {
4749                if a <= b {
4750                    0xFFFFFFFFFFFFFFFF
4751                } else {
4752                    0
4753                }
4754            },
4755        );
4756    }
4757    #[simd_test(enable = "neon")]
4758    unsafe fn test_vcleq_u64() {
4759        testq_cmp_u64(
4760            |i, j| vcleq_u64(i, j),
4761            |a: u64, b: u64| -> u64 {
4762                if a <= b {
4763                    0xFFFFFFFFFFFFFFFF
4764                } else {
4765                    0
4766                }
4767            },
4768        );
4769    }
4770
4771    #[simd_test(enable = "neon")]
4772    unsafe fn test_vleq_f64() {
4773        test_cmp_f64(
4774            |i, j| vcle_f64(i, j),
4775            |a: f64, b: f64| -> u64 {
4776                if a <= b {
4777                    0xFFFFFFFFFFFFFFFF
4778                } else {
4779                    0
4780                }
4781            },
4782        );
4783    }
4784    #[simd_test(enable = "neon")]
4785    unsafe fn test_vcleq_f64() {
4786        testq_cmp_f64(
4787            |i, j| vcleq_f64(i, j),
4788            |a: f64, b: f64| -> u64 {
4789                if a <= b {
4790                    0xFFFFFFFFFFFFFFFF
4791                } else {
4792                    0
4793                }
4794            },
4795        );
4796    }
4797
4798    #[simd_test(enable = "neon")]
4799    unsafe fn test_vcge_s64() {
4800        test_cmp_s64(
4801            |i, j| vcge_s64(i, j),
4802            |a: i64, b: i64| -> u64 {
4803                if a >= b {
4804                    0xFFFFFFFFFFFFFFFF
4805                } else {
4806                    0
4807                }
4808            },
4809        );
4810    }
4811    #[simd_test(enable = "neon")]
4812    unsafe fn test_vcgeq_s64() {
4813        testq_cmp_s64(
4814            |i, j| vcgeq_s64(i, j),
4815            |a: i64, b: i64| -> u64 {
4816                if a >= b {
4817                    0xFFFFFFFFFFFFFFFF
4818                } else {
4819                    0
4820                }
4821            },
4822        );
4823    }
4824
4825    #[simd_test(enable = "neon")]
4826    unsafe fn test_vcge_u64() {
4827        test_cmp_u64(
4828            |i, j| vcge_u64(i, j),
4829            |a: u64, b: u64| -> u64 {
4830                if a >= b {
4831                    0xFFFFFFFFFFFFFFFF
4832                } else {
4833                    0
4834                }
4835            },
4836        );
4837    }
4838    #[simd_test(enable = "neon")]
4839    unsafe fn test_vcgeq_u64() {
4840        testq_cmp_u64(
4841            |i, j| vcgeq_u64(i, j),
4842            |a: u64, b: u64| -> u64 {
4843                if a >= b {
4844                    0xFFFFFFFFFFFFFFFF
4845                } else {
4846                    0
4847                }
4848            },
4849        );
4850    }
4851
4852    #[simd_test(enable = "neon")]
4853    unsafe fn test_vgeq_f64() {
4854        test_cmp_f64(
4855            |i, j| vcge_f64(i, j),
4856            |a: f64, b: f64| -> u64 {
4857                if a >= b {
4858                    0xFFFFFFFFFFFFFFFF
4859                } else {
4860                    0
4861                }
4862            },
4863        );
4864    }
4865    #[simd_test(enable = "neon")]
4866    unsafe fn test_vcgeq_f64() {
4867        testq_cmp_f64(
4868            |i, j| vcgeq_f64(i, j),
4869            |a: f64, b: f64| -> u64 {
4870                if a >= b {
4871                    0xFFFFFFFFFFFFFFFF
4872                } else {
4873                    0
4874                }
4875            },
4876        );
4877    }
4878
4879    #[simd_test(enable = "neon")]
4880    unsafe fn test_vmul_f64() {
4881        test_ari_f64(|i, j| vmul_f64(i, j), |a: f64, b: f64| -> f64 { a * b });
4882    }
4883    #[simd_test(enable = "neon")]
4884    unsafe fn test_vmulq_f64() {
4885        testq_ari_f64(|i, j| vmulq_f64(i, j), |a: f64, b: f64| -> f64 { a * b });
4886    }
4887
4888    #[simd_test(enable = "neon")]
4889    unsafe fn test_vsub_f64() {
4890        test_ari_f64(|i, j| vsub_f64(i, j), |a: f64, b: f64| -> f64 { a - b });
4891    }
4892    #[simd_test(enable = "neon")]
4893    unsafe fn test_vsubq_f64() {
4894        testq_ari_f64(|i, j| vsubq_f64(i, j), |a: f64, b: f64| -> f64 { a - b });
4895    }
4896
4897    #[simd_test(enable = "neon")]
4898    unsafe fn test_vabsd_s64() {
4899        assert_eq!(vabsd_s64(-1), 1);
4900        assert_eq!(vabsd_s64(0), 0);
4901        assert_eq!(vabsd_s64(1), 1);
4902        assert_eq!(vabsd_s64(i64::MIN), i64::MIN);
4903        assert_eq!(vabsd_s64(i64::MIN + 1), i64::MAX);
4904    }
4905    #[simd_test(enable = "neon")]
4906    unsafe fn test_vabs_s64() {
4907        let a = i64x1::new(i64::MIN);
4908        let r: i64x1 = transmute(vabs_s64(transmute(a)));
4909        let e = i64x1::new(i64::MIN);
4910        assert_eq!(r, e);
4911        let a = i64x1::new(i64::MIN + 1);
4912        let r: i64x1 = transmute(vabs_s64(transmute(a)));
4913        let e = i64x1::new(i64::MAX);
4914        assert_eq!(r, e);
4915    }
4916    #[simd_test(enable = "neon")]
4917    unsafe fn test_vabsq_s64() {
4918        let a = i64x2::new(i64::MIN, i64::MIN + 1);
4919        let r: i64x2 = transmute(vabsq_s64(transmute(a)));
4920        let e = i64x2::new(i64::MIN, i64::MAX);
4921        assert_eq!(r, e);
4922    }
4923
4924    #[simd_test(enable = "neon")]
4925    unsafe fn test_vbsl_f64() {
4926        let a = u64x1::new(0x8000000000000000);
4927        let b = f64x1::new(-1.23f64);
4928        let c = f64x1::new(2.34f64);
4929        let e = f64x1::new(-2.34f64);
4930        let r: f64x1 = transmute(vbsl_f64(transmute(a), transmute(b), transmute(c)));
4931        assert_eq!(r, e);
4932    }
4933    #[simd_test(enable = "neon")]
4934    unsafe fn test_vbsl_p64() {
4935        let a = u64x1::new(1);
4936        let b = u64x1::new(u64::MAX);
4937        let c = u64x1::new(u64::MIN);
4938        let e = u64x1::new(1);
4939        let r: u64x1 = transmute(vbsl_p64(transmute(a), transmute(b), transmute(c)));
4940        assert_eq!(r, e);
4941    }
4942    #[simd_test(enable = "neon")]
4943    unsafe fn test_vbslq_f64() {
4944        let a = u64x2::new(1, 0x8000000000000000);
4945        let b = f64x2::new(f64::MAX, -1.23f64);
4946        let c = f64x2::new(f64::MIN, 2.34f64);
4947        let e = f64x2::new(f64::MIN, -2.34f64);
4948        let r: f64x2 = transmute(vbslq_f64(transmute(a), transmute(b), transmute(c)));
4949        assert_eq!(r, e);
4950    }
4951    #[simd_test(enable = "neon")]
4952    unsafe fn test_vbslq_p64() {
4953        let a = u64x2::new(u64::MAX, 1);
4954        let b = u64x2::new(u64::MAX, u64::MAX);
4955        let c = u64x2::new(u64::MIN, u64::MIN);
4956        let e = u64x2::new(u64::MAX, 1);
4957        let r: u64x2 = transmute(vbslq_p64(transmute(a), transmute(b), transmute(c)));
4958        assert_eq!(r, e);
4959    }
4960
4961    #[simd_test(enable = "neon")]
4962    unsafe fn test_vaddv_s16() {
4963        let a = i16x4::new(1, 2, 3, -4);
4964        let r: i16 = vaddv_s16(transmute(a));
4965        let e = 2_i16;
4966        assert_eq!(r, e);
4967    }
4968    #[simd_test(enable = "neon")]
4969    unsafe fn test_vaddv_u16() {
4970        let a = u16x4::new(1, 2, 3, 4);
4971        let r: u16 = vaddv_u16(transmute(a));
4972        let e = 10_u16;
4973        assert_eq!(r, e);
4974    }
4975    #[simd_test(enable = "neon")]
4976    unsafe fn test_vaddv_s32() {
4977        let a = i32x2::new(1, -2);
4978        let r: i32 = vaddv_s32(transmute(a));
4979        let e = -1_i32;
4980        assert_eq!(r, e);
4981    }
4982    #[simd_test(enable = "neon")]
4983    unsafe fn test_vaddv_u32() {
4984        let a = u32x2::new(1, 2);
4985        let r: u32 = vaddv_u32(transmute(a));
4986        let e = 3_u32;
4987        assert_eq!(r, e);
4988    }
4989    #[simd_test(enable = "neon")]
4990    unsafe fn test_vaddv_s8() {
4991        let a = i8x8::new(1, 2, 3, 4, 5, 6, 7, -8);
4992        let r: i8 = vaddv_s8(transmute(a));
4993        let e = 20_i8;
4994        assert_eq!(r, e);
4995    }
4996    #[simd_test(enable = "neon")]
4997    unsafe fn test_vaddv_u8() {
4998        let a = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
4999        let r: u8 = vaddv_u8(transmute(a));
5000        let e = 36_u8;
5001        assert_eq!(r, e);
5002    }
5003    #[simd_test(enable = "neon")]
5004    unsafe fn test_vaddvq_s16() {
5005        let a = i16x8::new(1, 2, 3, 4, 5, 6, 7, -8);
5006        let r: i16 = vaddvq_s16(transmute(a));
5007        let e = 20_i16;
5008        assert_eq!(r, e);
5009    }
5010    #[simd_test(enable = "neon")]
5011    unsafe fn test_vaddvq_u16() {
5012        let a = u16x8::new(1, 2, 3, 4, 5, 6, 7, 8);
5013        let r: u16 = vaddvq_u16(transmute(a));
5014        let e = 36_u16;
5015        assert_eq!(r, e);
5016    }
5017    #[simd_test(enable = "neon")]
5018    unsafe fn test_vaddvq_s32() {
5019        let a = i32x4::new(1, 2, 3, -4);
5020        let r: i32 = vaddvq_s32(transmute(a));
5021        let e = 2_i32;
5022        assert_eq!(r, e);
5023    }
5024    #[simd_test(enable = "neon")]
5025    unsafe fn test_vaddvq_u32() {
5026        let a = u32x4::new(1, 2, 3, 4);
5027        let r: u32 = vaddvq_u32(transmute(a));
5028        let e = 10_u32;
5029        assert_eq!(r, e);
5030    }
5031    #[simd_test(enable = "neon")]
5032    unsafe fn test_vaddvq_s8() {
5033        let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16);
5034        let r: i8 = vaddvq_s8(transmute(a));
5035        let e = 104_i8;
5036        assert_eq!(r, e);
5037    }
5038    #[simd_test(enable = "neon")]
5039    unsafe fn test_vaddvq_u8() {
5040        let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
5041        let r: u8 = vaddvq_u8(transmute(a));
5042        let e = 136_u8;
5043        assert_eq!(r, e);
5044    }
5045    #[simd_test(enable = "neon")]
5046    unsafe fn test_vaddvq_s64() {
5047        let a = i64x2::new(1, -2);
5048        let r: i64 = vaddvq_s64(transmute(a));
5049        let e = -1_i64;
5050        assert_eq!(r, e);
5051    }
5052    #[simd_test(enable = "neon")]
5053    unsafe fn test_vaddvq_u64() {
5054        let a = u64x2::new(1, 2);
5055        let r: u64 = vaddvq_u64(transmute(a));
5056        let e = 3_u64;
5057        assert_eq!(r, e);
5058    }
5059
5060    #[simd_test(enable = "neon")]
5061    unsafe fn test_vaddlv_s8() {
5062        let a = i8x8::new(1, 2, 3, 4, 5, 6, 7, -8);
5063        let r: i16 = vaddlv_s8(transmute(a));
5064        let e = 20_i16;
5065        assert_eq!(r, e);
5066    }
5067    #[simd_test(enable = "neon")]
5068    unsafe fn test_vaddlv_u8() {
5069        let a = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
5070        let r: u16 = vaddlv_u8(transmute(a));
5071        let e = 36_u16;
5072        assert_eq!(r, e);
5073    }
5074    #[simd_test(enable = "neon")]
5075    unsafe fn test_vaddlvq_s8() {
5076        let a = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16);
5077        let r: i16 = vaddlvq_s8(transmute(a));
5078        let e = 104_i16;
5079        assert_eq!(r, e);
5080    }
5081    #[simd_test(enable = "neon")]
5082    unsafe fn test_vaddlvq_u8() {
5083        let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
5084        let r: u16 = vaddlvq_u8(transmute(a));
5085        let e = 136_u16;
5086        assert_eq!(r, e);
5087    }
5088
5089    #[simd_test(enable = "neon")]
5090    unsafe fn test_vld1_f64() {
5091        let a: [f64; 2] = [0., 1.];
5092        let e = f64x1::new(1.);
5093        let r: f64x1 = transmute(vld1_f64(a[1..].as_ptr()));
5094        assert_eq!(r, e)
5095    }
5096
5097    #[simd_test(enable = "neon")]
5098    unsafe fn test_vld1q_f64() {
5099        let a: [f64; 3] = [0., 1., 2.];
5100        let e = f64x2::new(1., 2.);
5101        let r: f64x2 = transmute(vld1q_f64(a[1..].as_ptr()));
5102        assert_eq!(r, e)
5103    }
5104
5105    #[simd_test(enable = "neon")]
5106    unsafe fn test_vld1_dup_f64() {
5107        let a: [f64; 2] = [1., 42.];
5108        let e = f64x1::new(42.);
5109        let r: f64x1 = transmute(vld1_dup_f64(a[1..].as_ptr()));
5110        assert_eq!(r, e)
5111    }
5112
5113    #[simd_test(enable = "neon")]
5114    unsafe fn test_vld1q_dup_f64() {
5115        let elem: f64 = 42.;
5116        let e = f64x2::new(42., 42.);
5117        let r: f64x2 = transmute(vld1q_dup_f64(&elem));
5118        assert_eq!(r, e)
5119    }
5120
5121    #[simd_test(enable = "neon")]
5122    unsafe fn test_vld1_lane_f64() {
5123        let a = f64x1::new(0.);
5124        let elem: f64 = 42.;
5125        let e = f64x1::new(42.);
5126        let r: f64x1 = transmute(vld1_lane_f64::<0>(&elem, transmute(a)));
5127        assert_eq!(r, e)
5128    }
5129
5130    #[simd_test(enable = "neon")]
5131    unsafe fn test_vld1q_lane_f64() {
5132        let a = f64x2::new(0., 1.);
5133        let elem: f64 = 42.;
5134        let e = f64x2::new(0., 42.);
5135        let r: f64x2 = transmute(vld1q_lane_f64::<1>(&elem, transmute(a)));
5136        assert_eq!(r, e)
5137    }
5138
5139    #[simd_test(enable = "neon")]
5140    unsafe fn test_vst1_f64() {
5141        let mut vals = [0_f64; 2];
5142        let a = f64x1::new(1.);
5143
5144        vst1_f64(vals[1..].as_mut_ptr(), transmute(a));
5145
5146        assert_eq!(vals[0], 0.);
5147        assert_eq!(vals[1], 1.);
5148    }
5149
5150    #[simd_test(enable = "neon")]
5151    unsafe fn test_vst1q_f64() {
5152        let mut vals = [0_f64; 3];
5153        let a = f64x2::new(1., 2.);
5154
5155        vst1q_f64(vals[1..].as_mut_ptr(), transmute(a));
5156
5157        assert_eq!(vals[0], 0.);
5158        assert_eq!(vals[1], 1.);
5159        assert_eq!(vals[2], 2.);
5160    }
5161
5162    #[simd_test(enable = "neon,sm4")]
5163    unsafe fn test_vsm3tt1aq_u32() {
5164        let a: u32x4 = u32x4::new(1, 2, 3, 4);
5165        let b: u32x4 = u32x4::new(1, 2, 3, 4);
5166        let c: u32x4 = u32x4::new(1, 2, 3, 4);
5167        let e: u32x4 = u32x4::new(2, 1536, 4, 16395);
5168        let r: u32x4 = transmute(vsm3tt1aq_u32::<0>(transmute(a), transmute(b), transmute(c)));
5169        assert_eq!(r, e);
5170    }
5171
5172    #[simd_test(enable = "neon,sm4")]
5173    unsafe fn test_vsm3tt1bq_u32() {
5174        let a: u32x4 = u32x4::new(1, 2, 3, 4);
5175        let b: u32x4 = u32x4::new(1, 2, 3, 4);
5176        let c: u32x4 = u32x4::new(1, 2, 3, 4);
5177        let e: u32x4 = u32x4::new(2, 1536, 4, 16392);
5178        let r: u32x4 = transmute(vsm3tt1bq_u32::<0>(transmute(a), transmute(b), transmute(c)));
5179        assert_eq!(r, e);
5180    }
5181
5182    #[simd_test(enable = "neon,sm4")]
5183    unsafe fn test_vsm3tt2aq_u32() {
5184        let a: u32x4 = u32x4::new(1, 2, 3, 4);
5185        let b: u32x4 = u32x4::new(1, 2, 3, 4);
5186        let c: u32x4 = u32x4::new(1, 2, 3, 4);
5187        let e: u32x4 = u32x4::new(2, 1572864, 4, 1447435);
5188        let r: u32x4 = transmute(vsm3tt2aq_u32::<0>(transmute(a), transmute(b), transmute(c)));
5189        assert_eq!(r, e);
5190    }
5191
5192    #[simd_test(enable = "neon,sm4")]
5193    unsafe fn test_vsm3tt2bq_u32() {
5194        let a: u32x4 = u32x4::new(1, 2, 3, 4);
5195        let b: u32x4 = u32x4::new(1, 2, 3, 4);
5196        let c: u32x4 = u32x4::new(1, 2, 3, 4);
5197        let e: u32x4 = u32x4::new(2, 1572864, 4, 1052680);
5198        let r: u32x4 = transmute(vsm3tt2bq_u32::<0>(transmute(a), transmute(b), transmute(c)));
5199        assert_eq!(r, e);
5200    }
5201
5202    #[simd_test(enable = "neon,sha3")]
5203    unsafe fn test_vxarq_u64() {
5204        let a: u64x2 = u64x2::new(1, 2);
5205        let b: u64x2 = u64x2::new(3, 4);
5206        let e: u64x2 = u64x2::new(2, 6);
5207        let r: u64x2 = transmute(vxarq_u64::<0>(transmute(a), transmute(b)));
5208        assert_eq!(r, e);
5209    }
5210}
5211
5212#[cfg(test)]
5213#[path = "../../arm_shared/neon/table_lookup_tests.rs"]
5214mod table_lookup_tests;
5215
5216#[cfg(test)]
5217#[path = "../../arm_shared/neon/shift_and_insert_tests.rs"]
5218mod shift_and_insert_tests;
5219
5220#[cfg(test)]
5221#[path = "../../arm_shared/neon/load_tests.rs"]
5222mod load_tests;
5223
5224#[cfg(test)]
5225#[path = "../../arm_shared/neon/store_tests.rs"]
5226mod store_tests;